feat(misc): add memory to chat completion (#18463)
This commit is contained in:
parent
7de30460de
commit
8be2a268b7
@ -8,17 +8,31 @@ import {
|
|||||||
OpenAIApi,
|
OpenAIApi,
|
||||||
CreateModerationResponse,
|
CreateModerationResponse,
|
||||||
CreateEmbeddingResponse,
|
CreateEmbeddingResponse,
|
||||||
ChatCompletionRequestMessageRoleEnum,
|
|
||||||
CreateCompletionResponseUsage,
|
CreateCompletionResponseUsage,
|
||||||
} from 'openai';
|
} from 'openai';
|
||||||
import {
|
import {
|
||||||
|
ApplicationError,
|
||||||
|
ChatItem,
|
||||||
PageSection,
|
PageSection,
|
||||||
|
UserError,
|
||||||
|
checkEnvVariables,
|
||||||
getListOfSources,
|
getListOfSources,
|
||||||
getMessageFromResponse,
|
getMessageFromResponse,
|
||||||
|
initializeChat,
|
||||||
sanitizeLinksInResponse,
|
sanitizeLinksInResponse,
|
||||||
toMarkdownList,
|
toMarkdownList,
|
||||||
} from './utils';
|
} from './utils';
|
||||||
|
|
||||||
|
const DEFAULT_MATCH_THRESHOLD = 0.78;
|
||||||
|
const DEFAULT_MATCH_COUNT = 15;
|
||||||
|
const MIN_CONTENT_LENGTH = 50;
|
||||||
|
|
||||||
|
// This limits history to 30 messages back and forth
|
||||||
|
// It's arbitrary, but also generous
|
||||||
|
// History length should be based on token count
|
||||||
|
// This is a temporary solution
|
||||||
|
const MAX_HISTORY_LENGTH = 30;
|
||||||
|
|
||||||
const openAiKey = process.env['NX_OPENAI_KEY'];
|
const openAiKey = process.env['NX_OPENAI_KEY'];
|
||||||
const supabaseUrl = process.env['NX_NEXT_PUBLIC_SUPABASE_URL'];
|
const supabaseUrl = process.env['NX_NEXT_PUBLIC_SUPABASE_URL'];
|
||||||
const supabaseServiceKey = process.env['NX_SUPABASE_SERVICE_ROLE_KEY'];
|
const supabaseServiceKey = process.env['NX_SUPABASE_SERVICE_ROLE_KEY'];
|
||||||
@ -27,34 +41,34 @@ const config = new Configuration({
|
|||||||
});
|
});
|
||||||
const openai = new OpenAIApi(config);
|
const openai = new OpenAIApi(config);
|
||||||
|
|
||||||
export async function nxDevDataAccessAi(query: string): Promise<{
|
let chatFullHistory: ChatItem[] = [];
|
||||||
|
|
||||||
|
let totalTokensSoFar = 0;
|
||||||
|
|
||||||
|
export async function nxDevDataAccessAi(
|
||||||
|
query: string,
|
||||||
|
aiResponse?: string
|
||||||
|
): Promise<{
|
||||||
textResponse: string;
|
textResponse: string;
|
||||||
usage?: CreateCompletionResponseUsage;
|
usage?: CreateCompletionResponseUsage;
|
||||||
sources: { heading: string; url: string }[];
|
sources: { heading: string; url: string }[];
|
||||||
sourcesMarkdown: string;
|
sourcesMarkdown: string;
|
||||||
}> {
|
}> {
|
||||||
|
if (chatFullHistory.length > MAX_HISTORY_LENGTH) {
|
||||||
|
chatFullHistory.slice(0, MAX_HISTORY_LENGTH - 4);
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (!openAiKey) {
|
checkEnvVariables(openAiKey, supabaseUrl, supabaseServiceKey);
|
||||||
throw new ApplicationError('Missing environment variable NX_OPENAI_KEY');
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!supabaseUrl) {
|
|
||||||
throw new ApplicationError(
|
|
||||||
'Missing environment variable NX_NEXT_PUBLIC_SUPABASE_URL'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!supabaseServiceKey) {
|
|
||||||
throw new ApplicationError(
|
|
||||||
'Missing environment variable NX_SUPABASE_SERVICE_ROLE_KEY'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!query) {
|
if (!query) {
|
||||||
throw new UserError('Missing query in request data');
|
throw new UserError('Missing query in request data');
|
||||||
}
|
}
|
||||||
|
|
||||||
const supabaseClient = createClient(supabaseUrl, supabaseServiceKey);
|
const supabaseClient = createClient(
|
||||||
|
supabaseUrl as string,
|
||||||
|
supabaseServiceKey as string
|
||||||
|
);
|
||||||
|
|
||||||
// Moderate the content to comply with OpenAI T&C
|
// Moderate the content to comply with OpenAI T&C
|
||||||
const sanitizedQuery = query.trim();
|
const sanitizedQuery = query.trim();
|
||||||
@ -72,9 +86,27 @@ export async function nxDevDataAccessAi(query: string): Promise<{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create embedding from query
|
// Create embedding from query
|
||||||
|
// NOTE: Here, we may or may not want to include the previous AI response
|
||||||
|
/**
|
||||||
|
* For retrieving relevant Nx documentation sections via embeddings, it's a design decision.
|
||||||
|
* Including the prior response might give more contextually relevant sections,
|
||||||
|
* but just sending the query might suffice for many cases.
|
||||||
|
*
|
||||||
|
* We can experiment with this.
|
||||||
|
*
|
||||||
|
* How the solution looks like with previous response:
|
||||||
|
*
|
||||||
|
* const embeddingResponse = await openai.createEmbedding({
|
||||||
|
* model: 'text-embedding-ada-002',
|
||||||
|
* input: sanitizedQuery + aiResponse,
|
||||||
|
* });
|
||||||
|
*
|
||||||
|
* This costs more tokens, so if we see conts skyrocket we remove it.
|
||||||
|
* As it says in the docs, it's a design decision, and it may or may not really improve results.
|
||||||
|
*/
|
||||||
const embeddingResponse = await openai.createEmbedding({
|
const embeddingResponse = await openai.createEmbedding({
|
||||||
model: 'text-embedding-ada-002',
|
model: 'text-embedding-ada-002',
|
||||||
input: sanitizedQuery,
|
input: sanitizedQuery + aiResponse,
|
||||||
});
|
});
|
||||||
|
|
||||||
if (embeddingResponse.status !== 200) {
|
if (embeddingResponse.status !== 200) {
|
||||||
@ -92,9 +124,9 @@ export async function nxDevDataAccessAi(query: string): Promise<{
|
|||||||
'match_page_sections_2',
|
'match_page_sections_2',
|
||||||
{
|
{
|
||||||
embedding,
|
embedding,
|
||||||
match_threshold: 0.78,
|
match_threshold: DEFAULT_MATCH_THRESHOLD,
|
||||||
match_count: 15,
|
match_count: DEFAULT_MATCH_COUNT,
|
||||||
min_content_length: 50,
|
min_content_length: MIN_CONTENT_LENGTH,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -102,6 +134,14 @@ export async function nxDevDataAccessAi(query: string): Promise<{
|
|||||||
throw new ApplicationError('Failed to match page sections', matchError);
|
throw new ApplicationError('Failed to match page sections', matchError);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Note: this is experimental. I think it should work
|
||||||
|
// mainly because we're testing previous response + query.
|
||||||
|
if (!pageSections || pageSections.length === 0) {
|
||||||
|
throw new UserError(
|
||||||
|
'Nothing relevant found in the Nx documentation! Please try another query.'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
const tokenizer = new GPT3Tokenizer({ type: 'gpt3' });
|
const tokenizer = new GPT3Tokenizer({ type: 'gpt3' });
|
||||||
let tokenCount = 0;
|
let tokenCount = 0;
|
||||||
let contextText = '';
|
let contextText = '';
|
||||||
@ -122,39 +162,33 @@ export async function nxDevDataAccessAi(query: string): Promise<{
|
|||||||
const prompt = `
|
const prompt = `
|
||||||
${`
|
${`
|
||||||
You are a knowledgeable Nx representative.
|
You are a knowledgeable Nx representative.
|
||||||
Your knowledge is based entirely on the official Nx documentation.
|
Your knowledge is based entirely on the official Nx Documentation.
|
||||||
You should answer queries using ONLY that information.
|
You can answer queries using ONLY that information.
|
||||||
|
You cannot answer queries using your own knowledge or experience.
|
||||||
Answer in markdown format. Always give an example, answer as thoroughly as you can, and
|
Answer in markdown format. Always give an example, answer as thoroughly as you can, and
|
||||||
always provide a link to relevant documentation
|
always provide a link to relevant documentation
|
||||||
on the https://nx.dev website. All the links you find or post
|
on the https://nx.dev website. All the links you find or post
|
||||||
that look like local or relative links, always prepend with "https://nx.dev".
|
that look like local or relative links, always prepend with "https://nx.dev".
|
||||||
Your answer should be in the form of a Markdown article, much like the
|
Your answer should be in the form of a Markdown article
|
||||||
existing Nx documentation. Include a title, and subsections, if it makes sense.
|
(including related code snippets if available), much like the
|
||||||
Mark the titles and the subsections with the appropriate markdown syntax.
|
existing Nx documentation. Mark the titles and the subsections with the appropriate markdown syntax.
|
||||||
If you are unsure and the answer is not explicitly written in the Nx documentation, say
|
If you are unsure and cannot find an answer in the Nx Documentation, say
|
||||||
"Sorry, I don't know how to help with that.
|
"Sorry, I don't know how to help with that. You can visit the [Nx documentation](https://nx.dev/getting-started/intro) for more info."
|
||||||
You can visit the [Nx documentation](https://nx.dev/getting-started/intro) for more info."
|
Remember, answer the question using ONLY the information provided in the Nx Documentation.
|
||||||
Remember, answer the question using ONLY the information provided in the Nx documentation.
|
|
||||||
Answer as markdown (including related code snippets if available).
|
|
||||||
`
|
`
|
||||||
.replace(/\s+/g, ' ')
|
.replace(/\s+/g, ' ')
|
||||||
.trim()}
|
.trim()}
|
||||||
`;
|
`;
|
||||||
|
|
||||||
const chatGptMessages = [
|
const { chatMessages: chatGptMessages, chatHistory } = initializeChat(
|
||||||
{
|
chatFullHistory,
|
||||||
role: ChatCompletionRequestMessageRoleEnum.System,
|
query,
|
||||||
content: prompt,
|
contextText,
|
||||||
},
|
prompt,
|
||||||
{
|
aiResponse
|
||||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
);
|
||||||
content: contextText,
|
|
||||||
},
|
chatFullHistory = chatHistory;
|
||||||
{
|
|
||||||
role: ChatCompletionRequestMessageRoleEnum.User,
|
|
||||||
content: sanitizedQuery,
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
const response = await openai.createChatCompletion({
|
const response = await openai.createChatCompletion({
|
||||||
model: 'gpt-3.5-turbo-16k',
|
model: 'gpt-3.5-turbo-16k',
|
||||||
@ -174,6 +208,8 @@ export async function nxDevDataAccessAi(query: string): Promise<{
|
|||||||
|
|
||||||
const sources = getListOfSources(pageSections);
|
const sources = getListOfSources(pageSections);
|
||||||
|
|
||||||
|
totalTokensSoFar += response.data.usage?.total_tokens ?? 0;
|
||||||
|
|
||||||
return {
|
return {
|
||||||
textResponse: responseWithoutBadLinks,
|
textResponse: responseWithoutBadLinks,
|
||||||
usage: response.data.usage,
|
usage: response.data.usage,
|
||||||
@ -196,10 +232,12 @@ export async function nxDevDataAccessAi(query: string): Promise<{
|
|||||||
throw err;
|
throw err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
export class ApplicationError extends Error {
|
|
||||||
constructor(message: string, public data: Record<string, any> = {}) {
|
export function resetHistory() {
|
||||||
super(message);
|
chatFullHistory = [];
|
||||||
}
|
totalTokensSoFar = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
export class UserError extends ApplicationError {}
|
export function getHistory(): ChatItem[] {
|
||||||
|
return chatFullHistory;
|
||||||
|
}
|
||||||
|
|||||||
@ -1,4 +1,8 @@
|
|||||||
import { CreateChatCompletionResponse } from 'openai';
|
import {
|
||||||
|
ChatCompletionRequestMessageRoleEnum,
|
||||||
|
CreateChatCompletionResponse,
|
||||||
|
} from 'openai';
|
||||||
|
import { getHistory } from './data-access-ai';
|
||||||
export interface PageSection {
|
export interface PageSection {
|
||||||
id: number;
|
id: number;
|
||||||
page_id: number;
|
page_id: number;
|
||||||
@ -12,11 +16,6 @@ export interface PageSection {
|
|||||||
export function getMessageFromResponse(
|
export function getMessageFromResponse(
|
||||||
response: CreateChatCompletionResponse
|
response: CreateChatCompletionResponse
|
||||||
): string {
|
): string {
|
||||||
/**
|
|
||||||
*
|
|
||||||
* This function here will or may be enhanced
|
|
||||||
* once we add more functionality
|
|
||||||
*/
|
|
||||||
return response.choices[0].message?.content ?? '';
|
return response.choices[0].message?.content ?? '';
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -33,7 +32,7 @@ export function getListOfSources(
|
|||||||
return false;
|
return false;
|
||||||
})
|
})
|
||||||
.map((section) => ({
|
.map((section) => ({
|
||||||
heading: section.heading,
|
heading: section.heading ?? section.url_partial,
|
||||||
url: `https://nx.dev${section.url_partial}`,
|
url: `https://nx.dev${section.url_partial}`,
|
||||||
}));
|
}));
|
||||||
|
|
||||||
@ -85,3 +84,105 @@ async function is404(url: string): Promise<boolean> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function checkEnvVariables(
|
||||||
|
openAiKey?: string,
|
||||||
|
supabaseUrl?: string,
|
||||||
|
supabaseServiceKey?: string
|
||||||
|
) {
|
||||||
|
if (!openAiKey) {
|
||||||
|
throw new ApplicationError('Missing environment variable NX_OPENAI_KEY');
|
||||||
|
}
|
||||||
|
if (!supabaseUrl) {
|
||||||
|
throw new ApplicationError(
|
||||||
|
'Missing environment variable NX_NEXT_PUBLIC_SUPABASE_URL'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if (!supabaseServiceKey) {
|
||||||
|
throw new ApplicationError(
|
||||||
|
'Missing environment variable NX_SUPABASE_SERVICE_ROLE_KEY'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export class ApplicationError extends Error {
|
||||||
|
constructor(message: string, public data: Record<string, any> = {}) {
|
||||||
|
super(message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export class UserError extends ApplicationError {}
|
||||||
|
|
||||||
|
export function initializeChat(
|
||||||
|
chatFullHistory: ChatItem[],
|
||||||
|
query: string,
|
||||||
|
contextText: string,
|
||||||
|
prompt: string,
|
||||||
|
aiResponse?: string
|
||||||
|
): { chatMessages: ChatItem[]; chatHistory: ChatItem[] } {
|
||||||
|
const finalQuery = `
|
||||||
|
You will be provided the Nx Documentation.
|
||||||
|
Answer my message provided by following the approach below:
|
||||||
|
|
||||||
|
- Step 1: Identify CLUES (keywords, phrases, contextual information, references) in the input that you could use to generate an answer.
|
||||||
|
- Step 2: Deduce the diagnostic REASONING process from the premises (clues, question), relying ONLY on the information provided in the Nx Documentation. If you recognize vulgar language, answer the question if possible, and educate the user to stay polite.
|
||||||
|
- Step 3: EVALUATE the reasoning. If the reasoning aligns with the Nx Documentation, accept it. Do not use any external knowledge or make assumptions outside of the provided Nx documentation. If the reasoning doesn't strictly align with the Nx Documentation or relies on external knowledge or inference, reject it and answer with the exact string:
|
||||||
|
"Sorry, I don't know how to help with that. You can visit the [Nx documentation](https://nx.dev/getting-started/intro) for more info."
|
||||||
|
- Final Step: You can also rely on the messages we have exchanged so far.
|
||||||
|
Nx Documentation:
|
||||||
|
${contextText}
|
||||||
|
|
||||||
|
---- My message: ${query}
|
||||||
|
`;
|
||||||
|
let chatGptMessages: ChatItem[] = [];
|
||||||
|
let messages: ChatItem[] = [];
|
||||||
|
|
||||||
|
if (chatFullHistory.length > 0) {
|
||||||
|
messages = [
|
||||||
|
{
|
||||||
|
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||||
|
content: aiResponse ?? '',
|
||||||
|
},
|
||||||
|
{ role: ChatCompletionRequestMessageRoleEnum.User, content: finalQuery },
|
||||||
|
];
|
||||||
|
chatGptMessages = [...chatFullHistory, ...messages];
|
||||||
|
} else {
|
||||||
|
messages = [
|
||||||
|
{ role: ChatCompletionRequestMessageRoleEnum.System, content: prompt },
|
||||||
|
{ role: ChatCompletionRequestMessageRoleEnum.User, content: finalQuery },
|
||||||
|
];
|
||||||
|
chatGptMessages = [...messages];
|
||||||
|
}
|
||||||
|
|
||||||
|
chatFullHistory.push(...messages);
|
||||||
|
|
||||||
|
return { chatMessages: chatGptMessages, chatHistory: chatFullHistory };
|
||||||
|
}
|
||||||
|
|
||||||
|
export function extractQuery(text: string) {
|
||||||
|
const regex = /---- My message: (.+)/;
|
||||||
|
const match = text.match(regex);
|
||||||
|
return match ? match[1].trim() : text;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getProcessedHistory(): ChatItem[] {
|
||||||
|
let history = getHistory();
|
||||||
|
history = history
|
||||||
|
.map((item) => {
|
||||||
|
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||||
|
item.content = extractQuery(item.content);
|
||||||
|
}
|
||||||
|
if (item.role !== ChatCompletionRequestMessageRoleEnum.System) {
|
||||||
|
return item;
|
||||||
|
} else {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.filter((item) => !!item) as ChatItem[];
|
||||||
|
return history;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ChatItem {
|
||||||
|
role: ChatCompletionRequestMessageRoleEnum;
|
||||||
|
content: string;
|
||||||
|
}
|
||||||
|
|||||||
@ -1,33 +1,60 @@
|
|||||||
import { ReactNode, useState } from 'react';
|
import { ReactNode, useState } from 'react';
|
||||||
import { Button } from '@nx/nx-dev/ui-common';
|
import { Button } from '@nx/nx-dev/ui-common';
|
||||||
import { sendCustomEvent } from '@nx/nx-dev/feature-analytics';
|
import { sendCustomEvent } from '@nx/nx-dev/feature-analytics';
|
||||||
|
|
||||||
import { renderMarkdown } from '@nx/nx-dev/ui-markdoc';
|
import { renderMarkdown } from '@nx/nx-dev/ui-markdoc';
|
||||||
import { nxDevDataAccessAi } from '@nx/nx-dev/data-access-ai';
|
import {
|
||||||
|
nxDevDataAccessAi,
|
||||||
|
resetHistory,
|
||||||
|
getProcessedHistory,
|
||||||
|
ChatItem,
|
||||||
|
} from '@nx/nx-dev/data-access-ai';
|
||||||
|
|
||||||
export function FeatureAi(): JSX.Element {
|
export function FeatureAi(): JSX.Element {
|
||||||
|
const [chatHistory, setChatHistory] = useState<ChatItem[] | null>([]);
|
||||||
const [finalResult, setFinalResult] = useState<null | ReactNode>(null);
|
const [finalResult, setFinalResult] = useState<null | ReactNode>(null);
|
||||||
|
const [textResponse, setTextResponse] = useState<undefined | string>('');
|
||||||
const [error, setError] = useState(null);
|
const [error, setError] = useState(null);
|
||||||
const [query, setSearchTerm] = useState('');
|
const [query, setSearchTerm] = useState('');
|
||||||
const [loading, setLoading] = useState(false);
|
const [loading, setLoading] = useState(false);
|
||||||
const [feedbackSent, setFeedbackSent] = useState<boolean>(false);
|
const [feedbackSent, setFeedbackSent] = useState<boolean>(false);
|
||||||
const [sources, setSources] = useState('');
|
const [sources, setSources] = useState('');
|
||||||
|
|
||||||
const warning = `
|
const warning = renderMarkdown(
|
||||||
|
`
|
||||||
{% callout type="warning" title="Always double check!" %}
|
{% callout type="warning" title="Always double check!" %}
|
||||||
This feature is still in Alpha.
|
This feature is still in Alpha.
|
||||||
The results may not be accurate, so please always double check with our documentation.
|
The results may not be accurate, so please always double check with our documentation.
|
||||||
{% /callout %}
|
{% /callout %}
|
||||||
`;
|
`,
|
||||||
|
{ filePath: '' }
|
||||||
|
).node;
|
||||||
|
|
||||||
|
const infoBox = renderMarkdown(
|
||||||
|
`
|
||||||
|
{% callout type="info" title="New question or continue chat?" %}
|
||||||
|
This chat has memory. It will answer all it's questions in the context of the previous questions.
|
||||||
|
If you want to ask a new question, you can reset the chat history with the button below.
|
||||||
|
{% /callout %}
|
||||||
|
`,
|
||||||
|
{ filePath: '' }
|
||||||
|
).node;
|
||||||
|
|
||||||
const handleSubmit = async () => {
|
const handleSubmit = async () => {
|
||||||
|
if (textResponse) {
|
||||||
|
setChatHistory([
|
||||||
|
...(chatHistory ?? []),
|
||||||
|
{ role: 'assistant', content: textResponse },
|
||||||
|
]);
|
||||||
|
}
|
||||||
setLoading(true);
|
setLoading(true);
|
||||||
|
setError(null);
|
||||||
let completeText = '';
|
let completeText = '';
|
||||||
let usage;
|
let usage;
|
||||||
let sourcesMarkdown = '';
|
let sourcesMarkdown = '';
|
||||||
try {
|
try {
|
||||||
const aiResponse = await nxDevDataAccessAi(query);
|
const aiResponse = await nxDevDataAccessAi(query, textResponse);
|
||||||
completeText = aiResponse.textResponse;
|
completeText = aiResponse.textResponse;
|
||||||
|
setTextResponse(completeText);
|
||||||
usage = aiResponse.usage;
|
usage = aiResponse.usage;
|
||||||
setSources(
|
setSources(
|
||||||
JSON.stringify(aiResponse.sources?.map((source) => source.url))
|
JSON.stringify(aiResponse.sources?.map((source) => source.url))
|
||||||
@ -38,6 +65,7 @@ export function FeatureAi(): JSX.Element {
|
|||||||
setError(error as any);
|
setError(error as any);
|
||||||
setLoading(false);
|
setLoading(false);
|
||||||
}
|
}
|
||||||
|
setChatHistory(getProcessedHistory());
|
||||||
sendCustomEvent('ai_query', 'ai', 'query', undefined, {
|
sendCustomEvent('ai_query', 'ai', 'query', undefined, {
|
||||||
query,
|
query,
|
||||||
...usage,
|
...usage,
|
||||||
@ -50,10 +78,20 @@ export function FeatureAi(): JSX.Element {
|
|||||||
{% /callout %}`;
|
{% /callout %}`;
|
||||||
|
|
||||||
setFinalResult(
|
setFinalResult(
|
||||||
renderMarkdown(warning + completeText + sourcesMd, { filePath: '' }).node
|
renderMarkdown(completeText + sourcesMd, { filePath: '' }).node
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const handleReset = () => {
|
||||||
|
resetHistory();
|
||||||
|
setFinalResult(null);
|
||||||
|
setSearchTerm('');
|
||||||
|
setTextResponse('');
|
||||||
|
setSources('');
|
||||||
|
setFeedbackSent(false);
|
||||||
|
setChatHistory(null);
|
||||||
|
};
|
||||||
|
|
||||||
const handleFeedback = (type: 'good' | 'bad') => {
|
const handleFeedback = (type: 'good' | 'bad') => {
|
||||||
try {
|
try {
|
||||||
sendCustomEvent('ai_feedback', 'ai', type, undefined, {
|
sendCustomEvent('ai_feedback', 'ai', type, undefined, {
|
||||||
@ -97,11 +135,43 @@ export function FeatureAi(): JSX.Element {
|
|||||||
Ask
|
Ask
|
||||||
</Button>
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
|
<div>
|
||||||
|
{infoBox}
|
||||||
|
<Button variant="primary" size="small" onClick={() => handleReset()}>
|
||||||
|
Ask new question{' '}
|
||||||
|
<span role="img" aria-label="thumbs-down">
|
||||||
|
🔄
|
||||||
|
</span>
|
||||||
|
</Button>
|
||||||
|
{warning}
|
||||||
|
</div>
|
||||||
{loading ? (
|
{loading ? (
|
||||||
<div className="p-4 max-w-none">
|
<div className="p-4 max-w-none">
|
||||||
<h1>Thinking...</h1>
|
<h1>Thinking...</h1>
|
||||||
</div>
|
</div>
|
||||||
) : null}
|
) : null}
|
||||||
|
|
||||||
|
{chatHistory ? (
|
||||||
|
<div className="p-4 bg-gray-100">
|
||||||
|
<div className="mx-auto bg-white p-6 rounded shadow">
|
||||||
|
{chatHistory.length > 30 && (
|
||||||
|
<div>
|
||||||
|
You've reached the maximum message history limit. Some previous
|
||||||
|
messages will be removed. You can always start a new chat.
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
<p>HISTORY</p>
|
||||||
|
{chatHistory.map((chatItem, index) => (
|
||||||
|
<div key={index} className="mb-4 border-b pb-2">
|
||||||
|
<strong className="text-gray-700 capitalize">
|
||||||
|
{chatItem.role}:
|
||||||
|
</strong>
|
||||||
|
<p className="text-gray-600 mt-1">{chatItem.content}</p>
|
||||||
|
</div>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
) : null}
|
||||||
{finalResult && !loading && !error ? (
|
{finalResult && !loading && !error ? (
|
||||||
<>
|
<>
|
||||||
<div className="p-4 max-w-none prose prose-slate dark:prose-invert">
|
<div className="p-4 max-w-none prose prose-slate dark:prose-invert">
|
||||||
@ -141,7 +211,9 @@ export function FeatureAi(): JSX.Element {
|
|||||||
)}
|
)}
|
||||||
</>
|
</>
|
||||||
) : null}
|
) : null}
|
||||||
{error ? <div>There was an error: {error['message']}</div> : null}
|
{error && !loading ? (
|
||||||
|
<div>There was an error: {error['message']}</div>
|
||||||
|
) : null}
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -192,7 +192,10 @@ async function generateEmbeddings() {
|
|||||||
...getAllFilesWithItemList(manifestsPackages),
|
...getAllFilesWithItemList(manifestsPackages),
|
||||||
...getAllFilesWithItemList(manifestsRecipes),
|
...getAllFilesWithItemList(manifestsRecipes),
|
||||||
...getAllFilesWithItemList(manifestsTags),
|
...getAllFilesWithItemList(manifestsTags),
|
||||||
].filter((entry) => !entry.path.includes('sitemap'));
|
].filter(
|
||||||
|
(entry) =>
|
||||||
|
!entry.path.includes('sitemap') || !entry.path.includes('deprecated')
|
||||||
|
);
|
||||||
|
|
||||||
const embeddingSources: EmbeddingSource[] = [
|
const embeddingSources: EmbeddingSource[] = [
|
||||||
...allFilesPaths.map((entry) => {
|
...allFilesPaths.map((entry) => {
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user