diff --git a/src/apis/trans.js b/src/apis/trans.js
index 59dd45f..f3ec376 100644
--- a/src/apis/trans.js
+++ b/src/apis/trans.js
@@ -193,23 +193,23 @@ const genOpenAI = ({
url,
key,
systemPrompt,
- prompt,
+ userPrompt,
model,
temperature,
maxTokens,
}) => {
// 兼容历史上作为systemPrompt的prompt,如果prompt中不包含带翻译文本,则添加文本到prompt末尾
- if (!prompt.includes(INPUT_PLACE_TEXT)) {
- prompt += `\nSource Text: ${INPUT_PLACE_TEXT}`;
- }
- prompt = prompt
+ // if (!prompt.includes(INPUT_PLACE_TEXT)) {
+ // prompt += `\nSource Text: ${INPUT_PLACE_TEXT}`;
+ // }
+ systemPrompt = systemPrompt
+ .replaceAll(INPUT_PLACE_FROM, from)
+ .replaceAll(INPUT_PLACE_TO, to)
+ .replaceAll(INPUT_PLACE_TEXT, text);
+ userPrompt = userPrompt
.replaceAll(INPUT_PLACE_FROM, from)
.replaceAll(INPUT_PLACE_TO, to)
.replaceAll(INPUT_PLACE_TEXT, text);
- systemPrompt = systemPrompt
- .replaceAll(INPUT_PLACE_FROM, from)
- .replaceAll(INPUT_PLACE_TO, to)
- .replaceAll(INPUT_PLACE_TEXT, text);
const data = {
model,
@@ -220,7 +220,7 @@ const genOpenAI = ({
},
{
role: "user",
- content: prompt,
+ content: userPrompt,
},
],
temperature,
@@ -240,26 +240,30 @@ const genOpenAI = ({
return [url, init];
};
-const genGemini = ({ text, from, to, url, key, prompt, model }) => {
+const genGemini = ({ text, from, to, url, key, systemPrompt, userPrompt, model }) => {
url = url
.replaceAll(INPUT_PLACE_MODEL, model)
.replaceAll(INPUT_PLACE_KEY, key);
- prompt = prompt
+ systemPrompt = systemPrompt
+ .replaceAll(INPUT_PLACE_FROM, from)
+ .replaceAll(INPUT_PLACE_TO, to)
+ .replaceAll(INPUT_PLACE_TEXT, text);
+ userPrompt = userPrompt
.replaceAll(INPUT_PLACE_FROM, from)
.replaceAll(INPUT_PLACE_TO, to)
.replaceAll(INPUT_PLACE_TEXT, text);
const data = {
- contents: [
- {
- // role: "user",
- parts: [
- {
- text: prompt,
- },
- ],
- },
- ],
+ system_instruction: {
+ parts: {
+ text: systemPrompt,
+ }
+ },
+ contents: {
+ parts: {
+ text: userPrompt,
+ }
+ }
};
const init = {
@@ -280,19 +284,19 @@ const genClaude = ({
url,
key,
systemPrompt,
- prompt,
+ userPrompt,
model,
temperature,
maxTokens,
}) => {
- prompt = prompt
- .replaceAll(INPUT_PLACE_FROM, from)
- .replaceAll(INPUT_PLACE_TO, to)
- .replaceAll(INPUT_PLACE_TEXT, text);
systemPrompt = systemPrompt
- .replaceAll(INPUT_PLACE_FROM, from)
- .replaceAll(INPUT_PLACE_TO, to)
- .replaceAll(INPUT_PLACE_TEXT, text);
+ .replaceAll(INPUT_PLACE_FROM, from)
+ .replaceAll(INPUT_PLACE_TO, to)
+ .replaceAll(INPUT_PLACE_TEXT, text);
+ userPrompt = userPrompt
+ .replaceAll(INPUT_PLACE_FROM, from)
+ .replaceAll(INPUT_PLACE_TO, to)
+ .replaceAll(INPUT_PLACE_TEXT, text);
const data = {
model,
@@ -300,7 +304,7 @@ const genClaude = ({
messages: [
{
role: "user",
- content: prompt,
+ content: userPrompt,
},
],
temperature,
@@ -320,16 +324,20 @@ const genClaude = ({
return [url, init];
};
-const genOllama = ({ text, from, to, url, key, system,prompt, model }) => {
- prompt = prompt
+const genOllama = ({ text, from, to, url, key, systemPrompt, userPrompt, model }) => {
+ systemPrompt = systemPrompt
+ .replaceAll(INPUT_PLACE_FROM, from)
+ .replaceAll(INPUT_PLACE_TO, to)
+ .replaceAll(INPUT_PLACE_TEXT, text);
+ userPrompt = userPrompt
.replaceAll(INPUT_PLACE_FROM, from)
.replaceAll(INPUT_PLACE_TO, to)
.replaceAll(INPUT_PLACE_TEXT, text);
const data = {
model,
- system,
- prompt,
+ system: systemPrompt,
+ prompt: userPrompt,
stream: false,
};
diff --git a/src/config/index.js b/src/config/index.js
index 83e0a5a..f8ad82b 100644
--- a/src/config/index.js
+++ b/src/config/index.js
@@ -527,8 +527,8 @@ const defaultOpenaiApi = {
url: "https://api.openai.com/v1/chat/completions",
key: "",
model: "gpt-4",
- prompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
- systemPrompt: `You are a professional, authentic machine translation engine. You will be provided with a sentence in ${INPUT_PLACE_FROM}, and your task is to translate it into ${INPUT_PLACE_TO}.`,
+ systemPrompt: `You are a professional, authentic machine translation engine.`,
+ userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
temperature: 0,
maxTokens: 256,
fetchLimit: 1,
@@ -538,8 +538,8 @@ const defaultOllamaApi = {
url: "http://localhost:11434/api/generate",
key: "",
model: "llama3.1",
- system:"You are a professional, authentic machine translation engine.",
- prompt: `Translate the following text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO},output translation directly without any additional text:\n\n${INPUT_PLACE_TEXT}`,
+ systemPrompt: `You are a professional, authentic machine translation engine.`,
+ userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
fetchLimit: 1,
fetchInterval: 500,
};
@@ -593,7 +593,8 @@ export const DEFAULT_TRANS_APIS = {
url: `https://generativelanguage.googleapis.com/v1/models/${INPUT_PLACE_MODEL}:generateContent?key=${INPUT_PLACE_KEY}`,
key: "",
model: "gemini-pro",
- prompt: `Translate the following text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}:\n\n${INPUT_PLACE_TEXT}`,
+ systemPrompt: `You are a professional, authentic machine translation engine.`,
+ userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
fetchLimit: 1,
fetchInterval: 500,
},
@@ -601,8 +602,8 @@ export const DEFAULT_TRANS_APIS = {
url: "https://api.anthropic.com/v1/messages",
key: "",
model: "claude-3-haiku-20240307",
- prompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
systemPrompt: `You are a professional, authentic machine translation engine.`,
+ userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
temperature: 0,
maxTokens: 1024,
fetchLimit: 1,
diff --git a/src/views/Options/Apis.js b/src/views/Options/Apis.js
index 6d8a650..55f17fc 100644
--- a/src/views/Options/Apis.js
+++ b/src/views/Options/Apis.js
@@ -115,9 +115,8 @@ function ApiFields({ translator }) {
url = "",
key = "",
model = "",
- system = "",
- prompt = "",
systemPrompt = "",
+ userPrompt = "",
fetchLimit = DEFAULT_FETCH_LIMIT,
fetchInterval = DEFAULT_FETCH_INTERVAL,
dictNo = "",
@@ -215,6 +214,7 @@ function ApiFields({ translator }) {
)}
{(translator.startsWith(OPT_TRANS_OPENAI) ||
+ translator.startsWith(OPT_TRANS_OLLAMA) ||
translator === OPT_TRANS_CLAUDE ||
translator === OPT_TRANS_GEMINI) && (
<>
@@ -227,37 +227,18 @@ function ApiFields({ translator }) {
/>
- >
- )}
-
- {(translator.startsWith(OPT_TRANS_OLLAMA)) && (
- <>
-
-
-