我发送:二战时期的护士。 您回复只回复: A WWII-era nurse in a German uniform, holding a wine bottle and stethoscope, sitting at a table in white attire, with a table in the background, masterpiece, best quality, 4k, illustration style, best lighting, depth of field, detailed character, detailed environment.` }, { role: "user", content: prompt } ] };
/** * Generate an image from a given text prompt using the Prodia AI API * @param {string} model - The name of the AI model to use for image generation * @param {string} prompt - The text prompt to generate an image from * @returns {string} - The URL of the generated image * @throws {Error} - If the image generation fails * @see https://docs.prodia.ai/docs/api-reference */ async function generateImageByText(model, prompt) { // First request to generate the image const generateOptions = { method: 'POST', headers: { accept: 'application/json', 'content-type': 'application/json', 'X-Prodia-Key': PRODIA_API_KEY }, body: JSON.stringify({ model: model, prompt: prompt, negative_prompt: 'low resolution, blurry, distorted features, wrong fingers, extra numbers, watermarks, ugly, distorted, deformed, deformed, repetitive, missing arms and legs, multiple hands and legs, incomplete limbs, long neck, cross-eyed, glazed eyes, lax eyes, squinting, deformed eyes', steps: 20, cfg_scale: 7, seed: -1, sampler: 'DPM++ 2M Karras', width: 1024, height: 1024 }) };
if (generateData.status !== 'queued') { throw new Error('Failed to queue the job'); }
const jobId = generateData.job;
// Polling for the job status const statusOptions = { method: 'GET', headers: { accept: 'application/json', 'X-Prodia-Key': PRODIA_API_KEY } };
let statusData; while (true) { const statusResponse = await fetch(`https://api.prodia.com/v1/job/${jobId}`, statusOptions); statusData = await statusResponse.json();
if (statusData.status === 'succeeded') { return statusData.imageUrl; } else if (statusData.status === 'failed') { throw new Error('Image generation failed'); }
// Wait for a short period before checking again await new Promise(resolve => setTimeout(resolve, 5000)); } } catch (error) { return "图像生成或转换失败,请检查!" + error.message; } }
/** * Return a streaming response with the generated image. * * The response will contain the generated image as a base64 encoded string * and the original and translated prompts as text. The response will be sent * as a Server-Sent Event (SSE) stream, with the `data` event containing the * response payload. * * @param {string} originalPrompt - The original prompt given to the model. * @param {string} translatedPrompt - The translated prompt given to the model. * @param {string} size - The size of the generated image. * @param {string} model - The model used to generate the image. * @param {string} imageUrl - The URL of the generated image. * @returns {Response} - The response object. */ function handleStreamResponse(originalPrompt, translatedPrompt, size, model, imageUrl) { const uniqueId = `chatcmpl-${Date.now()}`; const createdTimestamp = Math.floor(Date.now() / 1000); const systemFingerprint = "fp_" + Math.random().toString(36).substr(2, 9); const content = `🎨 原始提示词:${originalPrompt}\n` + `🌐 翻译后的提示词:${translatedPrompt}\n` + `📐 图像规格:${size}\n` + `🌟 图像生成成功!\n` + `以下是结果:\n\n` + ``;
/** * Return a non-streaming response with the generated image. * * The response will contain the generated image as a base64 encoded string * and the original and translated prompts as text. * * @param {string} originalPrompt - The original prompt given to the model. * @param {string} translatedPrompt - The translated prompt given to the model. * @param {string} size - The size of the generated image (e.g. 1024x1024). * * @param {string} model - The model used to generate the image (e.g. @cf/stabilityai/stable-diffusion-xl-base-1.0). * @param {string} imageUrl - The URL of the generated image. * @return {Response} - The response object with the generated image and prompts. */ function handleNonStreamResponse(originalPrompt, translatedPrompt, size, model, imageUrl) { const uniqueId = `chatcmpl-${Date.now()}`; const createdTimestamp = Math.floor(Date.now() / 1000); const systemFingerprint = "fp_" + Math.random().toString(36).substr(2, 9); const content = `🎨 原始提示词:${originalPrompt}\n` + `🌐 翻译后的提示词:${translatedPrompt}\n` + `📐 图像规格:${size}\n` + `🌟 图像生成成功!\n` + `以下是结果:\n\n` + ``;
/** * @description * POST request to Cloudflare AI API * @param {string} model - AI model name * @param {object} jsonBody - JSON object to be sent in the body of the request * @returns {Promise<Response>} - Response object * @throws {Error} - If response status is not OK */ async function postRequest(model, jsonBody) { const cf_account = CF_ACCOUNT_LIST[Math.floor(Math.random() * CF_ACCOUNT_LIST.length)]; const apiUrl = `https://api.cloudflare.com/client/v4/accounts/${cf_account.account_id}/ai/run/${model}`; const response = await fetch(apiUrl, { method: 'POST', headers: { 'Authorization': `Bearer ${cf_account.token}`, 'Content-Type': 'application/json' }, body: JSON.stringify(jsonBody) });
if (!response.ok) { throw new Error('Unexpected response ' + response.status); } return response; }
/** * Extract translate flag from prompt string. * * This function will parse the flag from the given prompt string and return the * translate flag. If the flag is not found, it will return the default translate * flag set in CF_IS_TRANSLATE. * * @param {string} prompt The prompt string to parse the flag from. * @return {boolean} The translate flag parsed from the prompt string. */ function extractTranslate(prompt) { const match = prompt.match(/---n?tl/); if (match && match[0]) { if (match[0] == "---ntl") { return false; } else if (match[0] == "---tl") { return true; } } return CF_IS_TRANSLATE; }
/** * Remove translate flag from prompt string. * * This function will remove the translate flag ("---ntl" or "---tl") from the * given prompt string and return the cleaned prompt string. * * @param {string} prompt The prompt string to clean. * @return {string} The cleaned prompt string. */ function cleanPromptString(prompt) { return prompt.replace(/---n?tl/, "").trim(); }