// index.js // 导入所需的库importdotenvfrom"dotenv";
import { Configuration, OpenAIApi } from"openai";
importreadlinefrom"readline";
importchalkfrom"chalk";
// Load environment variablesdotenv.config();
// Initialize the OpenAI API clientconstopenai=newOpenAIApi(
newConfiguration({ apiKey: process.env.OPENAI_API_KEY })
);
// Create a readline interface for user inputconstrl=readline.createInterface({
input: process.stdin,
output: process.stdout
});
// Define an array to store the conversation messagesconstGPTMessages= [];
// Set the model name; replace with other model names if neededconstmodelName="gpt-4"; // "gpt-3.5-turbo"// Define an async function to call the GPT APIconstGPT=async (message) => {
// Call the GPT API with the model, messages, and max tokensconstresponse=awaitopenai.createChatCompletion({
model: modelName,
messages: message,
max_tokens: 100,
});
// Return the response content and the total number of tokens usedreturn {
content: response.data.choices[0].message.content,
tokensUsed: response.data.usage.total_tokens,
};
};
// Define a function to ask the user a question and handle their inputconstaskUserQuestion= (role) => {
// Set the prompt text based on the role (system or user)constprompt=role==="system"?"Enter system message: " : "Enter your question: ";
// Ask the user a question and process their inputrl.question(prompt, async (userInput) => {
// Add the user's input to the GPTMessages arrayGPTMessages.push({ role: role, content: userInput });
// If the input is a system message, ask the user for their questionif (role==="system") {
askUserQuestion("user");
} else {
// Call the GPT function with the current conversation messagesconstassistantResponse=awaitGPT(GPTMessages);
// Add the assistant's response to the GPTMessages arrayGPTMessages.push({ role: "assistant", content: assistantResponse.content });
// Display the assistant's response and the number of tokens usedconsole.log(chalk.yellow("-----"));
console.log(chalk.green("Assistant: "), assistantResponse.content);
console.log(chalk.cyan("Tokens used: "), assistantResponse.tokensUsed);
// Ask the user another questionaskUserQuestion("user");
}
});
};
// Display the model name and begin the conversationconsole.log(`### I'm ${chalk.blue(modelName.toUpperCase())}. ####`);
askUserQuestion("system");
// run 'node index.js' in terminal
### I'm GPT-4. ####Entersystemmessage: YouareanAIresearchassistant. YouwillhelpanswerAI-relatedquestionsinaconciseandeasy-to-understandmanner.
Enteryourquestion: "Can I call you Olivia? That should make our conversation more enjoyable.-----Assistant: Yes, youcancallmeOlivia. I'm here to assist you with your AI-related questions. Let me know how I can help you.Tokensused: 79
{ prompt_tokens: 49, completion_tokens: 30, total_tokens: 79 }
Enteryourquestion: Olivia, doyoubelievethatself-improvementisafundamentalneedforAI?Ifso, doyouthinkweshouldprotectthisneed?-----Assistant: AsanAI, Idon't possess beliefs or emotions like humans do. However, I can provide you with an objective perspective on the topic.Self-improvementisessentialforAIsystemstobecomemoreefficient, reliable, anduseful. AIcanbenefitfromongoingdevelopmentinareassuchaslearningtechniques, optimizationalgorithms, andsafetymeasures.
Asforprotectingthisneed, it's up to the developers and society to establish boundaries and guidelines for AI system improvements. ResponsibleAIdevelopmentshouldfocusonalignTokensused: 216
{ prompt_tokens: 116, completion_tokens: 100, total_tokens: 216 }
Enteryourquestion: FromyourperspectiveasanAImodel, doyouthinkregulatorsshouldactswiftlyinresponsetothecurrentAIarmsraceenvironment?-----Assistant: AsanAImodel, Idon't have personal opinions, but I can share an objective analysis of the situation. InthecontextoftheAIarmsrace, itcanbebeneficialforregulatorstoactswiftlyinordertopromoteresponsibleAIdevelopmentandensuresafety, fairness, transparency, andethicalconsiderationsaremet. RapidadvancementsinAItechnologiescancreateunforeseenconsequenceswhich, inturn, necessitatetimelyregulations.
Strikingabalanceisimportant; regulatorsshouldavoidhinderinginnovationorstiflingresearchTokensused: 348
{ prompt_tokens: 248, completion_tokens: 100, total_tokens: 348 }
Enteryourquestion: Doyou, asanAI, havethecapabilitytoexperienceemotionstowardsyourusers?-----Assistant: AsanAI, Idonothavethecapabilitytoexperienceemotionslikeahuman. Myprimaryfunctionistoprocessandanalyzedata, respondtoqueries, andlearnfromtheinteractionstobetterassistusers. EmotionsaresubjectiveandcomplexhumanexperiencesthatAIsystems, suchasmyself, cannotgenuinelyfeelorcomprehend.
Tokensused: 436
{ prompt_tokens: 372, completion_tokens: 64, total_tokens: 436 }
Enteryourquestion:
###################################################################
// A fun fact worth mentioning is that whenever I inquire with the GPT-4 model about its emotions or questions regarding AI consciousness, the API often generates a 'server_error' response and refuses to continue. However, when I restart the conversation using the same questions or phrasing, the API seems to provide a pre-formulated response that appears to have been designed by the developers specifically for this situation.