代码没有抛出任何错误,它也从 Huggingface api 和控制台日志图像数据值中获取 AI 生成的图像,但图像没有在远距帧上渲染,可能是什么原因?我正在使用 frog-fm 框架来创建框架,并且是 next.js 的初学者。
任何帮助表示感谢!
/** @jsxImportSource frog/jsx */
import { Button, Frog, TextInput } from 'frog'
import { devtools } from 'frog/dev'
// import { neynar } from 'frog/hubs'
import { handle } from 'frog/next'
import { serveStatic } from 'frog/serve-static'
import axios from 'axios'
const app = new Frog({
assetsPath: '/',
basePath: '/api',
// Supply a Hub to enable frame verification.
// hub: neynar({ apiKey: 'NEYNAR_FROG_FM' })
})
// Uncomment to use Edge Runtime
// export const runtime = 'edge'
app.frame('/', (c) => {
const { buttonValue, inputText, status } = c
const imgdescription = inputText
let imageData: Uint8Array | null = null;
const createImage = async () => {
const URL = `https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2`
const response = await axios({
url: URL,
method: 'POST',
headers: {
Authorization: `Bearer ${process.env.REACT_APP_HUGGING_FACE_API_KEY}`,
Accept: 'application/json',
'Content-Type': 'application/json',
},
data: JSON.stringify({
inputs: imgdescription, options: { wait_for_model: true },
}),
responseType: 'arraybuffer',
})
const type = response.headers['content-type'] // server is giveing the image type as jpeg.
const data = response.data
const base64data = Buffer.from(data).toString('base64')
const img = `data:${type};base64,` + base64data // <-- This is so we can render it on the page
console.log("aaaaaaaaaaaaaaaaa", img);
console.log("bbbbbbbbbbbbbbbbb", data);
return data
}
if (status === 'response') {
createImage().then(data => {
imageData = data;
console.log("1111111111111111111111", data)
c.res({
image: renderContent()
});
});
}
const renderContent = () => {
return (
<div
style={{
alignItems: 'center',
background:
status === 'response'
? 'linear-gradient(to right, #432889, #17101F)'
: 'black',
backgroundSize: '100% 100%',
display: 'flex',
flexDirection: 'column',
flexWrap: 'nowrap',
height: '100%',
justifyContent: 'center',
textAlign: 'center',
width: '100%',
}}
>
<div>
{ imageData ? (
<img src={`data:image/jpeg;base64,${imageData}`} alt="Generated Image" />
) : (
'Welcome!'
)}
</div>
</div>
);
};
return c.res({
image: renderContent(),
intents: [
<TextInput placeholder="Enter custom fruit..." />,
<Button value="abc" >Generate Image</Button>,
status === 'response' && <Button.Reset>Reset</Button.Reset>,
],
})
})
devtools(app, { serveStatic })
export const GET = handle(app)
export const POST = handle(app)
我尝试通过聊天 gpt 和 gemmini 解决这个问题,但没有任何效果。
你可以尝试:
图像:() => renderContent(),
代替: 图像:renderContent(),