OpenAI 이미지 스튜디오
채팅 기반 이미지 생성과 수정을 빠르게 붙이는 설치형 확장팩
OpenAI 이미지 스튜디오
채팅형 이미지 생성과 참고 이미지 기반 수정을 바로 붙이는 확장팩입니다.
설치
pnpm dlx shadcn@latest add https://n-exit.io/r/openai-image-studio.jsonnpx shadcn@latest add https://n-exit.io/r/openai-image-studio.json추가 파일
openai-image-studio.tsx
use-openai-image-studio.ts
route.ts
"use client";
import { useOpenAIImageStudio } from "@/hooks/use-openai-image-studio";
import { Badge } from "@/components/ui/badge";
import { Button } from "@/components/ui/button";
import {
Card,
CardContent,
CardDescription,
CardHeader,
CardTitle,
} from "@/components/ui/card";
import { Input } from "@/components/ui/input";
import { Textarea } from "@/components/ui/textarea";
export default function OpenAIImageStudio() {
const {
prompt,
setPrompt,
referenceFile,
referencePreview,
isSubmitting,
error,
messages,
quality,
setQuality,
size,
setSize,
setReferenceFile,
clearReferenceFile,
submit,
} = useOpenAIImageStudio();
return (
<Card className="border-border/70 bg-background/95">
<CardHeader>
<div className="flex items-center gap-2">
<CardTitle>OpenAI Image Studio</CardTitle>
<Badge variant="secondary">
{referenceFile ? "Edit mode" : "Generate mode"}
</Badge>
</div>
<CardDescription>
Generate new images from a prompt, or upload a reference image to make
guided edits in a chat-like workflow.
</CardDescription>
</CardHeader>
<CardContent className="space-y-6">
<div className="grid gap-4 md:grid-cols-[1.4fr_0.8fr_0.8fr]">
<div className="space-y-2">
<label className="text-sm font-medium">Prompt</label>
<Textarea
value={prompt}
onChange={(event) => setPrompt(event.target.value)}
placeholder="Describe the image you want, or explain how the uploaded image should change."
className="min-h-28"
/>
</div>
<div className="space-y-2">
<label className="text-sm font-medium">Size</label>
<select
value={size}
onChange={(event) =>
setSize(event.target.value as "1024x1024" | "1536x1024" | "1024x1536")
}
className="border-input bg-background w-full rounded-md border px-3 py-2 text-sm"
>
<option value="1024x1024">1024x1024</option>
<option value="1536x1024">1536x1024</option>
<option value="1024x1536">1024x1536</option>
</select>
</div>
<div className="space-y-2">
<label className="text-sm font-medium">Quality</label>
<select
value={quality}
onChange={(event) =>
setQuality(event.target.value as "low" | "medium" | "high")
}
className="border-input bg-background w-full rounded-md border px-3 py-2 text-sm"
>
<option value="low">low</option>
<option value="medium">medium</option>
<option value="high">high</option>
</select>
</div>
</div>
<div className="space-y-3">
<div className="flex flex-col gap-3 sm:flex-row sm:items-center">
<Input
type="file"
accept="image/png,image/jpeg,image/webp"
onChange={(event) =>
setReferenceFile(event.target.files?.[0] ?? null)
}
className="cursor-pointer"
/>
{referenceFile ? (
<Button type="button" variant="outline" onClick={clearReferenceFile}>
Remove reference image
</Button>
) : null}
<Button
type="button"
onClick={submit}
disabled={isSubmitting || !prompt.trim()}
>
{isSubmitting ? "Generating..." : "Run prompt"}
</Button>
</div>
{referencePreview ? (
<div className="space-y-2">
<p className="text-sm font-medium">Reference image</p>
<img
src={referencePreview}
alt="Reference preview"
className="border-border h-40 w-40 rounded-xl border object-cover"
/>
</div>
) : null}
{error ? <p className="text-sm text-red-500">{error}</p> : null}
</div>
<div className="space-y-4">
{messages.length === 0 ? (
<div className="text-muted-foreground rounded-xl border border-dashed p-6 text-sm">
Start with a prompt. If you upload an image first, the prompt will
be treated as an edit request instead of a fresh generation.
</div>
) : null}
{messages.map((message) => (
<div
key={message.id}
className={`space-y-3 rounded-2xl border p-4 ${
message.role === "assistant"
? "bg-muted/30"
: "bg-background"
}`}
>
<div className="flex items-center gap-2">
<Badge variant={message.role === "assistant" ? "default" : "outline"}>
{message.role === "assistant" ? "Assistant" : "You"}
</Badge>
{message.mode ? <Badge variant="secondary">{message.mode}</Badge> : null}
</div>
<p className="text-sm leading-6 whitespace-pre-wrap">{message.text}</p>
{message.imageUrl ? (
<img
src={message.imageUrl}
alt="Generated result"
className="border-border max-h-[420px] w-full rounded-xl border object-contain"
/>
) : null}
</div>
))}
</div>
</CardContent>
</Card>
);
}"use client";
import { useState } from "react";
type StudioMessage = {
id: string;
role: "user" | "assistant";
text: string;
imageUrl?: string;
mode?: "generate" | "edit";
};
type StudioRequest = {
prompt: string;
imageDataUrl?: string;
quality: "low" | "medium" | "high";
size: "1024x1024" | "1536x1024" | "1024x1536";
};
async function fileToDataUrl(file: File) {
return new Promise<string>((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => resolve(String(reader.result));
reader.onerror = () => reject(new Error("Failed to read file."));
reader.readAsDataURL(file);
});
}
function createMessageId() {
return `${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
}
export function useOpenAIImageStudio() {
const [messages, setMessages] = useState<StudioMessage[]>([]);
const [prompt, setPrompt] = useState("");
const [referenceFile, setReferenceFileState] = useState<File | null>(null);
const [referencePreview, setReferencePreview] = useState<string | null>(null);
const [quality, setQuality] = useState<"low" | "medium" | "high">("medium");
const [size, setSize] = useState<"1024x1024" | "1536x1024" | "1024x1536">(
"1024x1024"
);
const [isSubmitting, setIsSubmitting] = useState(false);
const [error, setError] = useState<string | null>(null);
const setReferenceFile = async (file: File | null) => {
setReferenceFileState(file);
if (!file) {
setReferencePreview(null);
return;
}
setReferencePreview(await fileToDataUrl(file));
};
const clearReferenceFile = () => {
setReferenceFileState(null);
setReferencePreview(null);
};
const submit = async () => {
const trimmedPrompt = prompt.trim();
if (!trimmedPrompt || isSubmitting) return;
setIsSubmitting(true);
setError(null);
try {
const payload: StudioRequest = {
prompt: trimmedPrompt,
quality,
size,
};
if (referenceFile) {
payload.imageDataUrl = await fileToDataUrl(referenceFile);
}
const mode = referenceFile ? "edit" : "generate";
setMessages((current) => [
...current,
{
id: createMessageId(),
role: "user",
text: trimmedPrompt,
imageUrl: referencePreview ?? undefined,
mode,
},
]);
const response = await fetch("/api/ai/images", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify(payload),
});
const data = await response.json();
if (!response.ok) {
throw new Error(data.error || "Failed to generate image.");
}
setMessages((current) => [
...current,
{
id: createMessageId(),
role: "assistant",
text: data.message || "Image generated successfully.",
imageUrl: data.imageUrl,
mode,
},
]);
setPrompt("");
} catch (submitError) {
setError(
submitError instanceof Error
? submitError.message
: "Failed to generate image."
);
} finally {
setIsSubmitting(false);
}
};
return {
messages,
prompt,
setPrompt,
referenceFile,
referencePreview,
quality,
setQuality,
size,
setSize,
isSubmitting,
error,
setReferenceFile,
clearReferenceFile,
submit,
};
}import { NextResponse } from "next/server";
const OPENAI_API_URL = "https://api.openai.com/v1/images";
const DEFAULT_MODEL = "gpt-image-1.5";
type ImageRequestBody = {
prompt?: string;
imageDataUrl?: string;
size?: "1024x1024" | "1536x1024" | "1024x1536";
quality?: "low" | "medium" | "high";
};
function dataUrlToBlob(dataUrl: string) {
const [meta, content] = dataUrl.split(",");
const mimeMatch = meta.match(/data:(.*?);base64/);
const mimeType = mimeMatch?.[1] || "image/png";
const binary = Buffer.from(content, "base64");
return new Blob([binary], { type: mimeType });
}
function toImageUrl(payload: { b64_json?: string; url?: string }) {
if (payload.b64_json) {
return `data:image/png;base64,${payload.b64_json}`;
}
return payload.url || null;
}
async function requestJson(endpoint: string, body: Record<string, unknown>) {
return fetch(`${OPENAI_API_URL}${endpoint}`, {
method: "POST",
headers: {
Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
"Content-Type": "application/json",
},
body: JSON.stringify(body),
});
}
async function requestFormData(endpoint: string, formData: FormData) {
return fetch(`${OPENAI_API_URL}${endpoint}`, {
method: "POST",
headers: {
Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
},
body: formData,
});
}
export async function POST(request: Request) {
if (!process.env.OPENAI_API_KEY) {
return NextResponse.json(
{ error: "OPENAI_API_KEY is not configured." },
{ status: 500 }
);
}
const body = (await request.json()) as ImageRequestBody;
const prompt = body.prompt?.trim();
const size = body.size || "1024x1024";
const quality = body.quality || "medium";
const model = process.env.OPENAI_IMAGE_MODEL || DEFAULT_MODEL;
if (!prompt) {
return NextResponse.json(
{ error: "Prompt is required." },
{ status: 400 }
);
}
try {
let response: Response;
if (body.imageDataUrl) {
const formData = new FormData();
formData.append("model", model);
formData.append("prompt", prompt);
formData.append("size", size);
formData.append("quality", quality);
formData.append(
"image[]",
dataUrlToBlob(body.imageDataUrl),
"reference-image.png"
);
response = await requestFormData("/edits", formData);
} else {
response = await requestJson("/generations", {
model,
prompt,
size,
quality,
});
}
const result = await response.json();
if (!response.ok) {
return NextResponse.json(
{ error: result.error?.message || "OpenAI request failed." },
{ status: response.status }
);
}
const firstImage = result.data?.[0];
const imageUrl = firstImage ? toImageUrl(firstImage) : null;
if (!imageUrl) {
return NextResponse.json(
{ error: "No image was returned by OpenAI." },
{ status: 502 }
);
}
return NextResponse.json({
imageUrl,
message: body.imageDataUrl
? "Edited image is ready."
: "Generated image is ready.",
storage: {
persisted: false,
note: "This extension does not store generated images. Add your own object storage or database if you want history or saved assets.",
},
});
} catch (error) {
return NextResponse.json(
{
error:
error instanceof Error ? error.message : "Unexpected server error.",
},
{ status: 500 }
);
}
}환경변수
OPENAI_API_KEY=your_openai_api_key
OPENAI_IMAGE_MODEL=gpt-image-1.5저장
기본 구성에는 저장이 없습니다.
- Supabase Storage
- S3 / R2 / GCS
SQL은 포함하지 않았습니다. 저장이 필요하면 프로젝트에 맞는 스토리지를 따로 연결하면 됩니다.