Spaces:
Running
Running
File size: 3,617 Bytes
450060f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
export type ProjectionMode = 'cartesian' | 'spherical'
export type CacheMode = "use" | "renew" | "ignore"
export interface RenderRequest {
prompt: string
// whether to use video segmentation
// disabled (default)
// firstframe: we only analyze the first frame
// allframes: we analyze all the frames
segmentation: 'disabled' | 'firstframe' | 'allframes'
// segmentation will only be executed if we have a non-empty list of actionnables
// actionnables are names of things like "chest", "key", "tree", "chair" etc
actionnables: string[]
// note: this is the number of frames for Zeroscope,
// which is currently configured to only output 3 seconds, so:
// nbFrames=8 -> 1 sec
// nbFrames=16 -> 2 sec
// nbFrames=24 -> 3 sec
nbFrames: number // min: 1, max: 24
nbSteps: number // min: 1, max: 50
seed: number
width: number // fixed at 1024 for now
height: number // fixed at 512 for now
// upscaling factor
// 0: no upscaling
// 1: no upscaling
// 2: 2x larger
// 3: 3x larger
// 4x: 4x larger, up to 4096x4096 (warning: a PNG of this size can be 50 Mb!)
upscalingFactor: number
projection: ProjectionMode
/**
* Use turbo mode
*
* At the time of writing this will use SSD-1B + LCM
* https://huggingface.co/spaces/jbilcke-hf/fast-image-server
*/
turbo: boolean
cache: CacheMode
wait: boolean // wait until the job is completed
analyze: boolean // analyze the image to generate a caption (optional)
}
export interface ImageSegment {
id: number
box: number[]
color: number[]
label: string
score: number
}
export type RenderedSceneStatus =
| "pending"
| "completed"
| "error"
export interface RenderedScene {
renderId: string
status: RenderedSceneStatus
assetUrl: string
alt: string
error: string
maskUrl: string
segments: ImageSegment[]
}
export interface ImageAnalysisRequest {
image: string // in base64
prompt: string
}
export interface ImageAnalysisResponse {
result: string
error?: string
}
export type GeneratedPanel = {
panel: number
instructions: string
caption: string
}
export type GeneratedPanels = GeneratedPanel[]
export type LLMEngine =
| "INFERENCE_API"
| "INFERENCE_ENDPOINT"
| "OPENAI"
| "REPLICATE"
export type RenderingEngine =
| "VIDEOCHAIN"
| "OPENAI"
| "REPLICATE"
| "INFERENCE_API"
| "INFERENCE_ENDPOINT"
export type RenderingModelVendor =
| "SERVER"
| "OPENAI"
| "REPLICATE"
| "HUGGINGFACE"
export type PostVisibility =
| "featured" // featured by admins
| "trending" // top trending / received more than 10 upvotes
| "normal" // default visibility
export type Post = {
postId: string
appId: string
prompt: string
previewUrl: string
assetUrl: string
createdAt: string
visibility: PostVisibility
upvotes: number
downvotes: number
}
export type CreatePostResponse = {
success?: boolean
error?: string
post: Post
}
export type GetAppPostsResponse = {
success?: boolean
error?: string
posts: Post[]
}
export type GetAppPostResponse = {
success?: boolean
error?: string
post: Post
}
export type LayoutProps = {
page: number
nbPanels: number
}
export type Settings = {
renderingModelVendor: RenderingModelVendor
renderingUseTurbo: boolean
huggingfaceApiKey: string
huggingfaceInferenceApiModel: string
huggingfaceInferenceApiModelTrigger: string
huggingfaceInferenceApiFileType: string
replicateApiKey: string
replicateApiModel: string
replicateApiModelVersion: string
replicateApiModelTrigger: string
openaiApiKey: string
openaiApiModel: string
} |