Spaces:
Sleeping
Sleeping
Julian Bilcke
commited on
Commit
·
0f35d4c
1
Parent(s):
534ad64
update
Browse files- src/app/config.ts +4 -1
- src/app/server/actions/ai-tube-hf/getChannelVideos.ts +5 -1
- src/app/server/actions/ai-tube-hf/getVideoRequestsFromChannel.ts +17 -1
- src/app/server/actions/ai-tube-hf/parseChannel.ts +8 -4
- src/app/server/actions/ai-tube-hf/uploadVideoRequestToDataset.ts +19 -1
- src/app/server/actions/submitVideoRequest.ts +8 -2
- src/app/server/actions/utils/isValidNumber.ts +3 -0
- src/app/server/actions/utils/orientationToWidthHeight.ts +38 -0
- src/app/server/actions/utils/parseDatasetPrompt.ts +23 -2
- src/app/server/actions/utils/parseDatasetReadme.ts +9 -3
- src/app/server/actions/utils/parseVideoModelName.ts +2 -2
- src/app/server/actions/utils/parseVideoOrientation.ts +31 -0
- src/app/views/user-channel-view/index.tsx +84 -41
- src/types.ts +41 -0
src/app/config.ts
CHANGED
|
@@ -1,9 +1,12 @@
|
|
|
|
|
|
|
|
| 1 |
export const showBetaFeatures = `${
|
| 2 |
process.env.NEXT_PUBLIC_SHOW_BETA_FEATURES || ""
|
| 3 |
}`.trim().toLowerCase() === "true"
|
| 4 |
|
| 5 |
|
| 6 |
-
export const defaultVideoModel = "SVD"
|
|
|
|
| 7 |
export const defaultVoice = "Julian"
|
| 8 |
|
| 9 |
export const developerMode = `${
|
|
|
|
| 1 |
+
import { VideoGenerationModel, VideoOrientation } from "@/types"
|
| 2 |
+
|
| 3 |
export const showBetaFeatures = `${
|
| 4 |
process.env.NEXT_PUBLIC_SHOW_BETA_FEATURES || ""
|
| 5 |
}`.trim().toLowerCase() === "true"
|
| 6 |
|
| 7 |
|
| 8 |
+
export const defaultVideoModel: VideoGenerationModel = "SVD"
|
| 9 |
+
export const defaultVideoOrientation: VideoOrientation = "landscape"
|
| 10 |
export const defaultVoice = "Julian"
|
| 11 |
|
| 12 |
export const developerMode = `${
|
src/app/server/actions/ai-tube-hf/getChannelVideos.ts
CHANGED
|
@@ -6,6 +6,7 @@ import { getVideoRequestsFromChannel } from "./getVideoRequestsFromChannel"
|
|
| 6 |
import { adminApiKey } from "../config"
|
| 7 |
import { getVideoIndex } from "./getVideoIndex"
|
| 8 |
import { extendVideosWithStats } from "./extendVideosWithStats"
|
|
|
|
| 9 |
|
| 10 |
// return
|
| 11 |
export async function getChannelVideos({
|
|
@@ -19,7 +20,7 @@ export async function getChannelVideos({
|
|
| 19 |
}): Promise<VideoInfo[]> {
|
| 20 |
|
| 21 |
if (!channel) { return [] }
|
| 22 |
-
|
| 23 |
const videos = await getVideoRequestsFromChannel({
|
| 24 |
channel,
|
| 25 |
apiKey: adminApiKey,
|
|
@@ -50,6 +51,9 @@ export async function getChannelVideos({
|
|
| 50 |
updatedAt: v.updatedAt,
|
| 51 |
tags: v.tags,
|
| 52 |
channel,
|
|
|
|
|
|
|
|
|
|
| 53 |
}
|
| 54 |
|
| 55 |
if (queued[v.id]) {
|
|
|
|
| 6 |
import { adminApiKey } from "../config"
|
| 7 |
import { getVideoIndex } from "./getVideoIndex"
|
| 8 |
import { extendVideosWithStats } from "./extendVideosWithStats"
|
| 9 |
+
import { orientationToWidthHeight } from "../utils/orientationToWidthHeight"
|
| 10 |
|
| 11 |
// return
|
| 12 |
export async function getChannelVideos({
|
|
|
|
| 20 |
}): Promise<VideoInfo[]> {
|
| 21 |
|
| 22 |
if (!channel) { return [] }
|
| 23 |
+
|
| 24 |
const videos = await getVideoRequestsFromChannel({
|
| 25 |
channel,
|
| 26 |
apiKey: adminApiKey,
|
|
|
|
| 51 |
updatedAt: v.updatedAt,
|
| 52 |
tags: v.tags,
|
| 53 |
channel,
|
| 54 |
+
duration: v.duration || 0,
|
| 55 |
+
orientation: v.orientation,
|
| 56 |
+
...orientationToWidthHeight(v.orientation),
|
| 57 |
}
|
| 58 |
|
| 59 |
if (queued[v.id]) {
|
src/app/server/actions/ai-tube-hf/getVideoRequestsFromChannel.ts
CHANGED
|
@@ -7,6 +7,7 @@ import { parsePromptFileName } from "../utils/parsePromptFileName"
|
|
| 7 |
import { downloadFileAsText } from "./downloadFileAsText"
|
| 8 |
import { parseDatasetPrompt } from "../utils/parseDatasetPrompt"
|
| 9 |
import { parseVideoModelName } from "../utils/parseVideoModelName"
|
|
|
|
| 10 |
|
| 11 |
/**
|
| 12 |
* Return all the videos requests created by a user on their channel
|
|
@@ -72,7 +73,19 @@ export async function getVideoRequestsFromChannel({
|
|
| 72 |
continue
|
| 73 |
}
|
| 74 |
|
| 75 |
-
const {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
if (!title || !description || !prompt) {
|
| 78 |
// console.log("dataset prompt is incomplete or unparseable")
|
|
@@ -101,6 +114,9 @@ export async function getVideoRequestsFromChannel({
|
|
| 101 |
updatedAt: file.lastCommit?.date || new Date().toISOString(),
|
| 102 |
tags: Array.isArray(tags) && tags.length ? tags : channel.tags,
|
| 103 |
channel,
|
|
|
|
|
|
|
|
|
|
| 104 |
}
|
| 105 |
|
| 106 |
videos[id] = video
|
|
|
|
| 7 |
import { downloadFileAsText } from "./downloadFileAsText"
|
| 8 |
import { parseDatasetPrompt } from "../utils/parseDatasetPrompt"
|
| 9 |
import { parseVideoModelName } from "../utils/parseVideoModelName"
|
| 10 |
+
import { orientationToWidthHeight } from "../utils/orientationToWidthHeight"
|
| 11 |
|
| 12 |
/**
|
| 13 |
* Return all the videos requests created by a user on their channel
|
|
|
|
| 73 |
continue
|
| 74 |
}
|
| 75 |
|
| 76 |
+
const {
|
| 77 |
+
title,
|
| 78 |
+
description,
|
| 79 |
+
tags,
|
| 80 |
+
prompt,
|
| 81 |
+
thumbnail,
|
| 82 |
+
model,
|
| 83 |
+
lora,
|
| 84 |
+
style,
|
| 85 |
+
music,
|
| 86 |
+
voice,
|
| 87 |
+
orientation,
|
| 88 |
+
} = parseDatasetPrompt(rawMarkdown, channel)
|
| 89 |
|
| 90 |
if (!title || !description || !prompt) {
|
| 91 |
// console.log("dataset prompt is incomplete or unparseable")
|
|
|
|
| 114 |
updatedAt: file.lastCommit?.date || new Date().toISOString(),
|
| 115 |
tags: Array.isArray(tags) && tags.length ? tags : channel.tags,
|
| 116 |
channel,
|
| 117 |
+
orientation,
|
| 118 |
+
...orientationToWidthHeight(orientation),
|
| 119 |
+
duration: 0,
|
| 120 |
}
|
| 121 |
|
| 122 |
videos[id] = video
|
src/app/server/actions/ai-tube-hf/parseChannel.ts
CHANGED
|
@@ -2,9 +2,10 @@
|
|
| 2 |
|
| 3 |
import { Credentials, downloadFile, whoAmI } from "@/huggingface/hub/src"
|
| 4 |
import { parseDatasetReadme } from "@/app/server/actions/utils/parseDatasetReadme"
|
| 5 |
-
import { ChannelInfo, VideoGenerationModel } from "@/types"
|
| 6 |
|
| 7 |
import { adminCredentials } from "../config"
|
|
|
|
| 8 |
|
| 9 |
export async function parseChannel(options: {
|
| 10 |
id: string
|
|
@@ -62,7 +63,7 @@ export async function parseChannel(options: {
|
|
| 62 |
// TODO parse the README to get the proper label
|
| 63 |
let label = slug.replaceAll("-", " ")
|
| 64 |
|
| 65 |
-
let model: VideoGenerationModel =
|
| 66 |
let lora = ""
|
| 67 |
let style = ""
|
| 68 |
let thumbnail = ""
|
|
@@ -71,6 +72,7 @@ export async function parseChannel(options: {
|
|
| 71 |
let voice = ""
|
| 72 |
let music = ""
|
| 73 |
let tags: string[] = []
|
|
|
|
| 74 |
|
| 75 |
// console.log(`going to read datasets/${name}`)
|
| 76 |
try {
|
|
@@ -89,11 +91,12 @@ export async function parseChannel(options: {
|
|
| 89 |
label = parsedDatasetReadme.pretty_name
|
| 90 |
description = parsedDatasetReadme.description
|
| 91 |
thumbnail = parsedDatasetReadme.thumbnail || "thumbnail.jpg"
|
| 92 |
-
model = parsedDatasetReadme.model
|
| 93 |
lora = parsedDatasetReadme.lora || ""
|
| 94 |
style = parsedDatasetReadme.style || ""
|
| 95 |
voice = parsedDatasetReadme.voice || ""
|
| 96 |
music = parsedDatasetReadme.music || ""
|
|
|
|
| 97 |
|
| 98 |
thumbnail =
|
| 99 |
thumbnail.startsWith("http")
|
|
@@ -126,7 +129,8 @@ export async function parseChannel(options: {
|
|
| 126 |
prompt,
|
| 127 |
likes: options.likes,
|
| 128 |
tags,
|
| 129 |
-
updatedAt: options.updatedAt.toISOString()
|
|
|
|
| 130 |
}
|
| 131 |
|
| 132 |
return channel
|
|
|
|
| 2 |
|
| 3 |
import { Credentials, downloadFile, whoAmI } from "@/huggingface/hub/src"
|
| 4 |
import { parseDatasetReadme } from "@/app/server/actions/utils/parseDatasetReadme"
|
| 5 |
+
import { ChannelInfo, VideoGenerationModel, VideoOrientation } from "@/types"
|
| 6 |
|
| 7 |
import { adminCredentials } from "../config"
|
| 8 |
+
import { defaultVideoModel, defaultVideoOrientation } from "@/app/config"
|
| 9 |
|
| 10 |
export async function parseChannel(options: {
|
| 11 |
id: string
|
|
|
|
| 63 |
// TODO parse the README to get the proper label
|
| 64 |
let label = slug.replaceAll("-", " ")
|
| 65 |
|
| 66 |
+
let model: VideoGenerationModel = defaultVideoModel
|
| 67 |
let lora = ""
|
| 68 |
let style = ""
|
| 69 |
let thumbnail = ""
|
|
|
|
| 72 |
let voice = ""
|
| 73 |
let music = ""
|
| 74 |
let tags: string[] = []
|
| 75 |
+
let orientation: VideoOrientation = defaultVideoOrientation
|
| 76 |
|
| 77 |
// console.log(`going to read datasets/${name}`)
|
| 78 |
try {
|
|
|
|
| 91 |
label = parsedDatasetReadme.pretty_name
|
| 92 |
description = parsedDatasetReadme.description
|
| 93 |
thumbnail = parsedDatasetReadme.thumbnail || "thumbnail.jpg"
|
| 94 |
+
model = parsedDatasetReadme.model || defaultVideoModel
|
| 95 |
lora = parsedDatasetReadme.lora || ""
|
| 96 |
style = parsedDatasetReadme.style || ""
|
| 97 |
voice = parsedDatasetReadme.voice || ""
|
| 98 |
music = parsedDatasetReadme.music || ""
|
| 99 |
+
orientation = parsedDatasetReadme.orientation || defaultVideoOrientation
|
| 100 |
|
| 101 |
thumbnail =
|
| 102 |
thumbnail.startsWith("http")
|
|
|
|
| 129 |
prompt,
|
| 130 |
likes: options.likes,
|
| 131 |
tags,
|
| 132 |
+
updatedAt: options.updatedAt.toISOString(),
|
| 133 |
+
orientation,
|
| 134 |
}
|
| 135 |
|
| 136 |
return channel
|
src/app/server/actions/ai-tube-hf/uploadVideoRequestToDataset.ts
CHANGED
|
@@ -3,8 +3,9 @@
|
|
| 3 |
import { Blob } from "buffer"
|
| 4 |
|
| 5 |
import { Credentials, uploadFile, whoAmI } from "@/huggingface/hub/src"
|
| 6 |
-
import { ChannelInfo, VideoGenerationModel, VideoInfo, VideoRequest } from "@/types"
|
| 7 |
import { formatPromptFileName } from "../utils/formatPromptFileName"
|
|
|
|
| 8 |
|
| 9 |
/**
|
| 10 |
* Save the video request to the user's own dataset
|
|
@@ -22,6 +23,8 @@ export async function uploadVideoRequestToDataset({
|
|
| 22 |
voice,
|
| 23 |
music,
|
| 24 |
tags,
|
|
|
|
|
|
|
| 25 |
}: {
|
| 26 |
channel: ChannelInfo
|
| 27 |
apiKey: string
|
|
@@ -34,6 +37,8 @@ export async function uploadVideoRequestToDataset({
|
|
| 34 |
voice: string
|
| 35 |
music: string
|
| 36 |
tags: string[]
|
|
|
|
|
|
|
| 37 |
}): Promise<{
|
| 38 |
videoRequest: VideoRequest
|
| 39 |
videoInfo: VideoInfo
|
|
@@ -81,6 +86,14 @@ ${voice}
|
|
| 81 |
|
| 82 |
${music}
|
| 83 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
# Tags
|
| 85 |
|
| 86 |
${tags.map(tag => `- ${tag}`).join("\n")}
|
|
@@ -116,6 +129,8 @@ ${prompt}
|
|
| 116 |
updatedAt: new Date().toISOString(),
|
| 117 |
tags,
|
| 118 |
channel,
|
|
|
|
|
|
|
| 119 |
}
|
| 120 |
|
| 121 |
const newVideo: VideoInfo = {
|
|
@@ -136,6 +151,9 @@ ${prompt}
|
|
| 136 |
updatedAt: new Date().toISOString(),
|
| 137 |
tags,
|
| 138 |
channel,
|
|
|
|
|
|
|
|
|
|
| 139 |
}
|
| 140 |
|
| 141 |
return {
|
|
|
|
| 3 |
import { Blob } from "buffer"
|
| 4 |
|
| 5 |
import { Credentials, uploadFile, whoAmI } from "@/huggingface/hub/src"
|
| 6 |
+
import { ChannelInfo, VideoGenerationModel, VideoInfo, VideoOrientation, VideoRequest } from "@/types"
|
| 7 |
import { formatPromptFileName } from "../utils/formatPromptFileName"
|
| 8 |
+
import { orientationToWidthHeight } from "../utils/orientationToWidthHeight"
|
| 9 |
|
| 10 |
/**
|
| 11 |
* Save the video request to the user's own dataset
|
|
|
|
| 23 |
voice,
|
| 24 |
music,
|
| 25 |
tags,
|
| 26 |
+
duration,
|
| 27 |
+
orientation,
|
| 28 |
}: {
|
| 29 |
channel: ChannelInfo
|
| 30 |
apiKey: string
|
|
|
|
| 37 |
voice: string
|
| 38 |
music: string
|
| 39 |
tags: string[]
|
| 40 |
+
duration: number
|
| 41 |
+
orientation: VideoOrientation
|
| 42 |
}): Promise<{
|
| 43 |
videoRequest: VideoRequest
|
| 44 |
videoInfo: VideoInfo
|
|
|
|
| 86 |
|
| 87 |
${music}
|
| 88 |
|
| 89 |
+
# Duration
|
| 90 |
+
|
| 91 |
+
${duration}
|
| 92 |
+
|
| 93 |
+
# Orientation
|
| 94 |
+
|
| 95 |
+
${orientation}
|
| 96 |
+
|
| 97 |
# Tags
|
| 98 |
|
| 99 |
${tags.map(tag => `- ${tag}`).join("\n")}
|
|
|
|
| 129 |
updatedAt: new Date().toISOString(),
|
| 130 |
tags,
|
| 131 |
channel,
|
| 132 |
+
duration: 0,
|
| 133 |
+
orientation,
|
| 134 |
}
|
| 135 |
|
| 136 |
const newVideo: VideoInfo = {
|
|
|
|
| 151 |
updatedAt: new Date().toISOString(),
|
| 152 |
tags,
|
| 153 |
channel,
|
| 154 |
+
duration,
|
| 155 |
+
orientation,
|
| 156 |
+
...orientationToWidthHeight(orientation),
|
| 157 |
}
|
| 158 |
|
| 159 |
return {
|
src/app/server/actions/submitVideoRequest.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
"use server"
|
| 2 |
|
| 3 |
-
import { ChannelInfo, VideoGenerationModel, VideoInfo } from "@/types"
|
| 4 |
|
| 5 |
import { uploadVideoRequestToDataset } from "./ai-tube-hf/uploadVideoRequestToDataset"
|
| 6 |
|
|
@@ -16,6 +16,8 @@ export async function submitVideoRequest({
|
|
| 16 |
voice,
|
| 17 |
music,
|
| 18 |
tags,
|
|
|
|
|
|
|
| 19 |
}: {
|
| 20 |
channel: ChannelInfo
|
| 21 |
apiKey: string
|
|
@@ -28,6 +30,8 @@ export async function submitVideoRequest({
|
|
| 28 |
voice: string
|
| 29 |
music: string
|
| 30 |
tags: string[]
|
|
|
|
|
|
|
| 31 |
}): Promise<VideoInfo> {
|
| 32 |
if (!apiKey) {
|
| 33 |
throw new Error(`the apiKey is required`)
|
|
@@ -44,7 +48,9 @@ export async function submitVideoRequest({
|
|
| 44 |
style,
|
| 45 |
voice,
|
| 46 |
music,
|
| 47 |
-
tags
|
|
|
|
|
|
|
| 48 |
})
|
| 49 |
|
| 50 |
|
|
|
|
| 1 |
"use server"
|
| 2 |
|
| 3 |
+
import { ChannelInfo, VideoGenerationModel, VideoInfo, VideoOrientation } from "@/types"
|
| 4 |
|
| 5 |
import { uploadVideoRequestToDataset } from "./ai-tube-hf/uploadVideoRequestToDataset"
|
| 6 |
|
|
|
|
| 16 |
voice,
|
| 17 |
music,
|
| 18 |
tags,
|
| 19 |
+
duration,
|
| 20 |
+
orientation,
|
| 21 |
}: {
|
| 22 |
channel: ChannelInfo
|
| 23 |
apiKey: string
|
|
|
|
| 30 |
voice: string
|
| 31 |
music: string
|
| 32 |
tags: string[]
|
| 33 |
+
duration: number
|
| 34 |
+
orientation: VideoOrientation
|
| 35 |
}): Promise<VideoInfo> {
|
| 36 |
if (!apiKey) {
|
| 37 |
throw new Error(`the apiKey is required`)
|
|
|
|
| 48 |
style,
|
| 49 |
voice,
|
| 50 |
music,
|
| 51 |
+
tags,
|
| 52 |
+
duration,
|
| 53 |
+
orientation
|
| 54 |
})
|
| 55 |
|
| 56 |
|
src/app/server/actions/utils/isValidNumber.ts
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
export function isValidNumber(input?: any) {
|
| 2 |
+
return typeof input === "number" && !isNaN(input) && isFinite(input)
|
| 3 |
+
}
|
src/app/server/actions/utils/orientationToWidthHeight.ts
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { VideoOrientation } from "@/types"
|
| 2 |
+
|
| 3 |
+
export function orientationToWidthHeight(orientation?: VideoOrientation): { width: number; height: number } {
|
| 4 |
+
|
| 5 |
+
if (orientation === "square") {
|
| 6 |
+
return {
|
| 7 |
+
width: 512,
|
| 8 |
+
height: 512,
|
| 9 |
+
}
|
| 10 |
+
}
|
| 11 |
+
|
| 12 |
+
const longResolution = 1024
|
| 13 |
+
const shortResolution = 576
|
| 14 |
+
|
| 15 |
+
if (orientation === "portrait") {
|
| 16 |
+
return {
|
| 17 |
+
width: shortResolution,
|
| 18 |
+
height: longResolution,
|
| 19 |
+
}
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
/*
|
| 23 |
+
|
| 24 |
+
this is already the default, actually
|
| 25 |
+
|
| 26 |
+
if (orientation === "landscape") {
|
| 27 |
+
return {
|
| 28 |
+
width: longResolution,
|
| 29 |
+
height: shortResolution,
|
| 30 |
+
}
|
| 31 |
+
}
|
| 32 |
+
*/
|
| 33 |
+
|
| 34 |
+
return {
|
| 35 |
+
width: longResolution,
|
| 36 |
+
height: shortResolution,
|
| 37 |
+
}
|
| 38 |
+
}
|
src/app/server/actions/utils/parseDatasetPrompt.ts
CHANGED
|
@@ -1,10 +1,27 @@
|
|
| 1 |
|
| 2 |
import { ChannelInfo, ParsedDatasetPrompt } from "@/types"
|
| 3 |
import { parseVideoModelName } from "./parseVideoModelName"
|
|
|
|
|
|
|
| 4 |
|
| 5 |
export function parseDatasetPrompt(markdown: string, channel: ChannelInfo): ParsedDatasetPrompt {
|
| 6 |
try {
|
| 7 |
-
const {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
return {
|
| 10 |
title: typeof title === "string" && title ? title : "",
|
|
@@ -19,6 +36,7 @@ export function parseDatasetPrompt(markdown: string, channel: ChannelInfo): Pars
|
|
| 19 |
thumbnail: typeof thumbnail === "string" && thumbnail ? thumbnail : "",
|
| 20 |
voice: typeof voice === "string" && voice ? voice : (channel.voice || ""),
|
| 21 |
music: typeof music === "string" && music ? music : (channel.music || ""),
|
|
|
|
| 22 |
}
|
| 23 |
} catch (err) {
|
| 24 |
return {
|
|
@@ -26,12 +44,13 @@ export function parseDatasetPrompt(markdown: string, channel: ChannelInfo): Pars
|
|
| 26 |
description: "",
|
| 27 |
tags: channel.tags || [],
|
| 28 |
prompt: "",
|
| 29 |
-
model: channel.model ||
|
| 30 |
lora: channel.lora || "",
|
| 31 |
style: channel.style || "",
|
| 32 |
thumbnail: "",
|
| 33 |
voice: channel.voice || "",
|
| 34 |
music: channel.music || "",
|
|
|
|
| 35 |
}
|
| 36 |
}
|
| 37 |
}
|
|
@@ -52,6 +71,7 @@ function parseMarkdown(markdown: string): {
|
|
| 52 |
thumbnail: string
|
| 53 |
voice: string
|
| 54 |
music: string
|
|
|
|
| 55 |
} {
|
| 56 |
markdown = `${markdown || ""}`.trim()
|
| 57 |
|
|
@@ -77,5 +97,6 @@ function parseMarkdown(markdown: string): {
|
|
| 77 |
thumbnail: sections["thumbnail"] || "",
|
| 78 |
voice: sections["voice"] || "",
|
| 79 |
music: sections["music"] || "",
|
|
|
|
| 80 |
};
|
| 81 |
}
|
|
|
|
| 1 |
|
| 2 |
import { ChannelInfo, ParsedDatasetPrompt } from "@/types"
|
| 3 |
import { parseVideoModelName } from "./parseVideoModelName"
|
| 4 |
+
import { parseVideoOrientation } from "./parseVideoOrientation"
|
| 5 |
+
import { defaultVideoModel, defaultVideoOrientation } from "@/app/config"
|
| 6 |
|
| 7 |
export function parseDatasetPrompt(markdown: string, channel: ChannelInfo): ParsedDatasetPrompt {
|
| 8 |
try {
|
| 9 |
+
const {
|
| 10 |
+
title,
|
| 11 |
+
description,
|
| 12 |
+
tags,
|
| 13 |
+
prompt,
|
| 14 |
+
model,
|
| 15 |
+
lora,
|
| 16 |
+
style,
|
| 17 |
+
thumbnail,
|
| 18 |
+
voice,
|
| 19 |
+
music,
|
| 20 |
+
// duration,
|
| 21 |
+
// width,
|
| 22 |
+
// height,
|
| 23 |
+
orientation
|
| 24 |
+
} = parseMarkdown(markdown)
|
| 25 |
|
| 26 |
return {
|
| 27 |
title: typeof title === "string" && title ? title : "",
|
|
|
|
| 36 |
thumbnail: typeof thumbnail === "string" && thumbnail ? thumbnail : "",
|
| 37 |
voice: typeof voice === "string" && voice ? voice : (channel.voice || ""),
|
| 38 |
music: typeof music === "string" && music ? music : (channel.music || ""),
|
| 39 |
+
orientation: parseVideoOrientation(orientation, channel.orientation),
|
| 40 |
}
|
| 41 |
} catch (err) {
|
| 42 |
return {
|
|
|
|
| 44 |
description: "",
|
| 45 |
tags: channel.tags || [],
|
| 46 |
prompt: "",
|
| 47 |
+
model: channel.model || defaultVideoModel,
|
| 48 |
lora: channel.lora || "",
|
| 49 |
style: channel.style || "",
|
| 50 |
thumbnail: "",
|
| 51 |
voice: channel.voice || "",
|
| 52 |
music: channel.music || "",
|
| 53 |
+
orientation: channel.orientation || defaultVideoOrientation,
|
| 54 |
}
|
| 55 |
}
|
| 56 |
}
|
|
|
|
| 71 |
thumbnail: string
|
| 72 |
voice: string
|
| 73 |
music: string
|
| 74 |
+
orientation: string
|
| 75 |
} {
|
| 76 |
markdown = `${markdown || ""}`.trim()
|
| 77 |
|
|
|
|
| 97 |
thumbnail: sections["thumbnail"] || "",
|
| 98 |
voice: sections["voice"] || "",
|
| 99 |
music: sections["music"] || "",
|
| 100 |
+
orientation: sections["orientation"] || "",
|
| 101 |
};
|
| 102 |
}
|
src/app/server/actions/utils/parseDatasetReadme.ts
CHANGED
|
@@ -3,6 +3,8 @@ import metadataParser from "markdown-yaml-metadata-parser"
|
|
| 3 |
|
| 4 |
import { ParsedDatasetReadme, ParsedMetadataAndContent } from "@/types"
|
| 5 |
import { parseVideoModelName } from "./parseVideoModelName"
|
|
|
|
|
|
|
| 6 |
|
| 7 |
export function parseDatasetReadme(markdown: string = ""): ParsedDatasetReadme {
|
| 8 |
try {
|
|
@@ -12,14 +14,14 @@ export function parseDatasetReadme(markdown: string = ""): ParsedDatasetReadme {
|
|
| 12 |
|
| 13 |
// console.log("DEBUG README:", { metadata, content })
|
| 14 |
|
| 15 |
-
const { model, lora, style, thumbnail, voice, music, description, prompt, tags } = parseMarkdown(content)
|
| 16 |
|
| 17 |
return {
|
| 18 |
license: typeof metadata?.license === "string" ? metadata.license : "",
|
| 19 |
pretty_name: typeof metadata?.pretty_name === "string" ? metadata.pretty_name : "",
|
| 20 |
hf_tags: Array.isArray(metadata?.tags) ? metadata.tags : [],
|
| 21 |
tags: tags && typeof tags === "string" ? tags.split("-").map(x => x.trim()).filter(x => x) : [],
|
| 22 |
-
model: parseVideoModelName(model,
|
| 23 |
lora,
|
| 24 |
style: style && typeof style === "string" ? style.split("- ").map(x => x.trim()).filter(x => x).join(", ") : [].join(", "),
|
| 25 |
thumbnail,
|
|
@@ -27,6 +29,7 @@ export function parseDatasetReadme(markdown: string = ""): ParsedDatasetReadme {
|
|
| 27 |
music,
|
| 28 |
description,
|
| 29 |
prompt,
|
|
|
|
| 30 |
}
|
| 31 |
} catch (err) {
|
| 32 |
return {
|
|
@@ -34,7 +37,7 @@ export function parseDatasetReadme(markdown: string = ""): ParsedDatasetReadme {
|
|
| 34 |
pretty_name: "",
|
| 35 |
hf_tags: [], // Hugging Face tags
|
| 36 |
tags: [],
|
| 37 |
-
model:
|
| 38 |
lora: "",
|
| 39 |
style: "",
|
| 40 |
thumbnail: "",
|
|
@@ -42,6 +45,7 @@ export function parseDatasetReadme(markdown: string = ""): ParsedDatasetReadme {
|
|
| 42 |
music: "",
|
| 43 |
description: "",
|
| 44 |
prompt: "",
|
|
|
|
| 45 |
}
|
| 46 |
}
|
| 47 |
}
|
|
@@ -61,6 +65,7 @@ function parseMarkdown(markdown: string): {
|
|
| 61 |
description: string
|
| 62 |
prompt: string
|
| 63 |
tags: string
|
|
|
|
| 64 |
} {
|
| 65 |
// console.log("markdown:", markdown)
|
| 66 |
// Improved regular expression to find markdown sections and accommodate multi-line content.
|
|
@@ -84,5 +89,6 @@ function parseMarkdown(markdown: string): {
|
|
| 84 |
music: sections["music"] || "",
|
| 85 |
prompt: sections["prompt"] || "",
|
| 86 |
tags: sections["tags"] || "",
|
|
|
|
| 87 |
};
|
| 88 |
}
|
|
|
|
| 3 |
|
| 4 |
import { ParsedDatasetReadme, ParsedMetadataAndContent } from "@/types"
|
| 5 |
import { parseVideoModelName } from "./parseVideoModelName"
|
| 6 |
+
import { parseVideoOrientation } from "./parseVideoOrientation"
|
| 7 |
+
import { defaultVideoModel, defaultVideoOrientation } from "@/app/config"
|
| 8 |
|
| 9 |
export function parseDatasetReadme(markdown: string = ""): ParsedDatasetReadme {
|
| 10 |
try {
|
|
|
|
| 14 |
|
| 15 |
// console.log("DEBUG README:", { metadata, content })
|
| 16 |
|
| 17 |
+
const { model, lora, style, thumbnail, voice, music, description, prompt, tags, orientation } = parseMarkdown(content)
|
| 18 |
|
| 19 |
return {
|
| 20 |
license: typeof metadata?.license === "string" ? metadata.license : "",
|
| 21 |
pretty_name: typeof metadata?.pretty_name === "string" ? metadata.pretty_name : "",
|
| 22 |
hf_tags: Array.isArray(metadata?.tags) ? metadata.tags : [],
|
| 23 |
tags: tags && typeof tags === "string" ? tags.split("-").map(x => x.trim()).filter(x => x) : [],
|
| 24 |
+
model: parseVideoModelName(model, defaultVideoModel),
|
| 25 |
lora,
|
| 26 |
style: style && typeof style === "string" ? style.split("- ").map(x => x.trim()).filter(x => x).join(", ") : [].join(", "),
|
| 27 |
thumbnail,
|
|
|
|
| 29 |
music,
|
| 30 |
description,
|
| 31 |
prompt,
|
| 32 |
+
orientation: parseVideoOrientation(orientation, defaultVideoOrientation),
|
| 33 |
}
|
| 34 |
} catch (err) {
|
| 35 |
return {
|
|
|
|
| 37 |
pretty_name: "",
|
| 38 |
hf_tags: [], // Hugging Face tags
|
| 39 |
tags: [],
|
| 40 |
+
model: defaultVideoModel,
|
| 41 |
lora: "",
|
| 42 |
style: "",
|
| 43 |
thumbnail: "",
|
|
|
|
| 45 |
music: "",
|
| 46 |
description: "",
|
| 47 |
prompt: "",
|
| 48 |
+
orientation: defaultVideoOrientation,
|
| 49 |
}
|
| 50 |
}
|
| 51 |
}
|
|
|
|
| 65 |
description: string
|
| 66 |
prompt: string
|
| 67 |
tags: string
|
| 68 |
+
orientation: string
|
| 69 |
} {
|
| 70 |
// console.log("markdown:", markdown)
|
| 71 |
// Improved regular expression to find markdown sections and accommodate multi-line content.
|
|
|
|
| 89 |
music: sections["music"] || "",
|
| 90 |
prompt: sections["prompt"] || "",
|
| 91 |
tags: sections["tags"] || "",
|
| 92 |
+
orientation: sections["orientation"] || "",
|
| 93 |
};
|
| 94 |
}
|
src/app/server/actions/utils/parseVideoModelName.ts
CHANGED
|
@@ -3,7 +3,7 @@ import { VideoGenerationModel } from "@/types"
|
|
| 3 |
export function parseVideoModelName(text: any, defaultToUse: VideoGenerationModel): VideoGenerationModel {
|
| 4 |
const rawModelString = `${text || ""}`.trim().toLowerCase()
|
| 5 |
|
| 6 |
-
let model: VideoGenerationModel = "
|
| 7 |
|
| 8 |
if (
|
| 9 |
rawModelString === "stable video diffusion" ||
|
|
@@ -20,5 +20,5 @@ export function parseVideoModelName(text: any, defaultToUse: VideoGenerationMode
|
|
| 20 |
model = "LaVie"
|
| 21 |
}
|
| 22 |
|
| 23 |
-
return
|
| 24 |
}
|
|
|
|
| 3 |
export function parseVideoModelName(text: any, defaultToUse: VideoGenerationModel): VideoGenerationModel {
|
| 4 |
const rawModelString = `${text || ""}`.trim().toLowerCase()
|
| 5 |
|
| 6 |
+
let model: VideoGenerationModel = defaultToUse || "SVD"
|
| 7 |
|
| 8 |
if (
|
| 9 |
rawModelString === "stable video diffusion" ||
|
|
|
|
| 20 |
model = "LaVie"
|
| 21 |
}
|
| 22 |
|
| 23 |
+
return model
|
| 24 |
}
|
src/app/server/actions/utils/parseVideoOrientation.ts
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { defaultVideoOrientation } from "@/app/config"
|
| 2 |
+
import { VideoOrientation } from "@/types"
|
| 3 |
+
|
| 4 |
+
export function parseVideoOrientation(text: any, defaultToUse?: VideoOrientation): VideoOrientation {
|
| 5 |
+
const rawOrientationString = `${text || ""}`.trim().toLowerCase()
|
| 6 |
+
|
| 7 |
+
let orientation: VideoOrientation = defaultToUse || defaultVideoOrientation
|
| 8 |
+
|
| 9 |
+
if (
|
| 10 |
+
rawOrientationString === "landscape" ||
|
| 11 |
+
rawOrientationString === "horizontal"
|
| 12 |
+
) {
|
| 13 |
+
orientation = "landscape"
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
if (
|
| 17 |
+
rawOrientationString === "portrait" ||
|
| 18 |
+
rawOrientationString === "vertical"
|
| 19 |
+
) {
|
| 20 |
+
orientation = "portrait"
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
if (
|
| 24 |
+
rawOrientationString === "square"
|
| 25 |
+
) {
|
| 26 |
+
orientation = "square"
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
return orientation
|
| 31 |
+
}
|
src/app/views/user-channel-view/index.tsx
CHANGED
|
@@ -17,7 +17,8 @@ import { PendingVideoList } from "@/app/interface/pending-video-list"
|
|
| 17 |
import { getChannelVideos } from "@/app/server/actions/ai-tube-hf/getChannelVideos"
|
| 18 |
import { parseVideoModelName } from "@/app/server/actions/utils/parseVideoModelName"
|
| 19 |
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"
|
| 20 |
-
import { defaultVideoModel, defaultVoice } from "@/app/config"
|
|
|
|
| 21 |
|
| 22 |
export function UserChannelView() {
|
| 23 |
const [_isPending, startTransition] = useTransition()
|
|
@@ -26,18 +27,20 @@ export function UserChannelView() {
|
|
| 26 |
defaultSettings.huggingfaceApiKey
|
| 27 |
)
|
| 28 |
|
| 29 |
-
const [
|
| 30 |
-
const [
|
| 31 |
-
const [
|
| 32 |
-
const [
|
| 33 |
-
const [
|
| 34 |
-
const [
|
| 35 |
-
const [
|
| 36 |
-
const [
|
| 37 |
-
const [
|
|
|
|
|
|
|
| 38 |
|
| 39 |
// we do not include the tags in the list of required fields
|
| 40 |
-
const missingFields = !
|
| 41 |
|
| 42 |
const [isSubmitting, setIsSubmitting] = useState(false)
|
| 43 |
|
|
@@ -73,7 +76,7 @@ export function UserChannelView() {
|
|
| 73 |
if (!userChannel) {
|
| 74 |
return
|
| 75 |
}
|
| 76 |
-
if (!
|
| 77 |
console.log("missing title or prompt")
|
| 78 |
return
|
| 79 |
}
|
|
@@ -85,29 +88,31 @@ export function UserChannelView() {
|
|
| 85 |
const newVideo = await submitVideoRequest({
|
| 86 |
channel: userChannel,
|
| 87 |
apiKey: huggingfaceApiKey,
|
| 88 |
-
title
|
| 89 |
-
description
|
| 90 |
-
prompt
|
| 91 |
-
model
|
| 92 |
-
lora
|
| 93 |
-
style
|
| 94 |
-
voice
|
| 95 |
-
music
|
| 96 |
-
tags:
|
|
|
|
|
|
|
| 97 |
})
|
| 98 |
|
| 99 |
// in case of success we update the frontend immediately
|
| 100 |
-
// with our
|
| 101 |
setUserVideos([newVideo, ...userVideos])
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
|
| 112 |
// also renew the cache on Next's side
|
| 113 |
/*
|
|
@@ -151,9 +156,9 @@ export function UserChannelView() {
|
|
| 151 |
placeholder="Title"
|
| 152 |
className="font-mono"
|
| 153 |
onChange={(x) => {
|
| 154 |
-
|
| 155 |
}}
|
| 156 |
-
value={
|
| 157 |
/>
|
| 158 |
</div>
|
| 159 |
</div>
|
|
@@ -167,9 +172,9 @@ export function UserChannelView() {
|
|
| 167 |
className="font-mono"
|
| 168 |
rows={2}
|
| 169 |
onChange={(x) => {
|
| 170 |
-
|
| 171 |
}}
|
| 172 |
-
value={
|
| 173 |
/>
|
| 174 |
<p className="text-neutral-100/70">
|
| 175 |
Short description (visible to humans, and used as context by the AI).
|
|
@@ -185,9 +190,9 @@ export function UserChannelView() {
|
|
| 185 |
className="font-mono"
|
| 186 |
rows={6}
|
| 187 |
onChange={(x) => {
|
| 188 |
-
|
| 189 |
}}
|
| 190 |
-
value={
|
| 191 |
/>
|
| 192 |
<p className="text-neutral-100/70">
|
| 193 |
Describe your video content, in a synthetic way.
|
|
@@ -200,20 +205,58 @@ export function UserChannelView() {
|
|
| 200 |
<div className="flex flex-col space-y-2 flex-grow">
|
| 201 |
<Select
|
| 202 |
onValueChange={(value: string) => {
|
| 203 |
-
|
| 204 |
}}
|
| 205 |
defaultValue={defaultVideoModel}>
|
| 206 |
<SelectTrigger className="">
|
| 207 |
<SelectValue placeholder="Video model" />
|
| 208 |
</SelectTrigger>
|
| 209 |
<SelectContent>
|
| 210 |
-
<SelectItem value="SVD">SVD</SelectItem>
|
| 211 |
<SelectItem value="HotshotXL">HotshotXL</SelectItem>
|
| 212 |
<SelectItem value="LaVie">LaVie</SelectItem>
|
| 213 |
</SelectContent>
|
| 214 |
</Select>
|
| 215 |
</div>
|
| 216 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 217 |
|
| 218 |
<div className="flex flex-row space-x-2 items-start">
|
| 219 |
<label className="flex w-24 pt-1">Tags (optional):</label>
|
|
@@ -222,9 +265,9 @@ export function UserChannelView() {
|
|
| 222 |
placeholder="Tags"
|
| 223 |
className="font-mono"
|
| 224 |
onChange={(x) => {
|
| 225 |
-
|
| 226 |
}}
|
| 227 |
-
value={
|
| 228 |
/>
|
| 229 |
<p className="text-neutral-100/70">
|
| 230 |
Comma-separated tags (eg. "Education, Sports")
|
|
|
|
| 17 |
import { getChannelVideos } from "@/app/server/actions/ai-tube-hf/getChannelVideos"
|
| 18 |
import { parseVideoModelName } from "@/app/server/actions/utils/parseVideoModelName"
|
| 19 |
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"
|
| 20 |
+
import { defaultVideoModel, defaultVideoOrientation, defaultVoice } from "@/app/config"
|
| 21 |
+
import { parseVideoOrientation } from "@/app/server/actions/utils/parseVideoOrientation"
|
| 22 |
|
| 23 |
export function UserChannelView() {
|
| 24 |
const [_isPending, startTransition] = useTransition()
|
|
|
|
| 27 |
defaultSettings.huggingfaceApiKey
|
| 28 |
)
|
| 29 |
|
| 30 |
+
const [title, setTitle] = useState("")
|
| 31 |
+
const [description, setDescription] = useState("")
|
| 32 |
+
const [tags, setTags] = useState("")
|
| 33 |
+
const [prompt, setPrompt] = useState("")
|
| 34 |
+
const [model, setModel] = useState<VideoGenerationModel>(defaultVideoModel)
|
| 35 |
+
const [lora, setLora] = useState("")
|
| 36 |
+
const [style, setStyle] = useState("")
|
| 37 |
+
const [voice, setVoice] = useState(defaultVoice)
|
| 38 |
+
const [music, setMusic] = useState("")
|
| 39 |
+
const [duration, setDuration] = useState(0)
|
| 40 |
+
const [orientation, setOrientation] = useState(defaultVideoOrientation)
|
| 41 |
|
| 42 |
// we do not include the tags in the list of required fields
|
| 43 |
+
const missingFields = !title || !description || !prompt
|
| 44 |
|
| 45 |
const [isSubmitting, setIsSubmitting] = useState(false)
|
| 46 |
|
|
|
|
| 76 |
if (!userChannel) {
|
| 77 |
return
|
| 78 |
}
|
| 79 |
+
if (!title || !prompt) {
|
| 80 |
console.log("missing title or prompt")
|
| 81 |
return
|
| 82 |
}
|
|
|
|
| 88 |
const newVideo = await submitVideoRequest({
|
| 89 |
channel: userChannel,
|
| 90 |
apiKey: huggingfaceApiKey,
|
| 91 |
+
title,
|
| 92 |
+
description,
|
| 93 |
+
prompt,
|
| 94 |
+
model,
|
| 95 |
+
lora,
|
| 96 |
+
style,
|
| 97 |
+
voice,
|
| 98 |
+
music,
|
| 99 |
+
tags: tags.trim().split(",").map(x => x.trim()).filter(x => x),
|
| 100 |
+
duration,
|
| 101 |
+
orientation
|
| 102 |
})
|
| 103 |
|
| 104 |
// in case of success we update the frontend immediately
|
| 105 |
+
// with our video
|
| 106 |
setUserVideos([newVideo, ...userVideos])
|
| 107 |
+
setPrompt("")
|
| 108 |
+
setDescription("")
|
| 109 |
+
setTags("")
|
| 110 |
+
setTitle("")
|
| 111 |
+
setModel(defaultVideoModel)
|
| 112 |
+
setVoice(defaultVoice)
|
| 113 |
+
setMusic("")
|
| 114 |
+
setLora("")
|
| 115 |
+
setStyle("")
|
| 116 |
|
| 117 |
// also renew the cache on Next's side
|
| 118 |
/*
|
|
|
|
| 156 |
placeholder="Title"
|
| 157 |
className="font-mono"
|
| 158 |
onChange={(x) => {
|
| 159 |
+
setTitle(x.target.value)
|
| 160 |
}}
|
| 161 |
+
value={title}
|
| 162 |
/>
|
| 163 |
</div>
|
| 164 |
</div>
|
|
|
|
| 172 |
className="font-mono"
|
| 173 |
rows={2}
|
| 174 |
onChange={(x) => {
|
| 175 |
+
setDescription(x.target.value)
|
| 176 |
}}
|
| 177 |
+
value={description}
|
| 178 |
/>
|
| 179 |
<p className="text-neutral-100/70">
|
| 180 |
Short description (visible to humans, and used as context by the AI).
|
|
|
|
| 190 |
className="font-mono"
|
| 191 |
rows={6}
|
| 192 |
onChange={(x) => {
|
| 193 |
+
setPrompt(x.target.value)
|
| 194 |
}}
|
| 195 |
+
value={prompt}
|
| 196 |
/>
|
| 197 |
<p className="text-neutral-100/70">
|
| 198 |
Describe your video content, in a synthetic way.
|
|
|
|
| 205 |
<div className="flex flex-col space-y-2 flex-grow">
|
| 206 |
<Select
|
| 207 |
onValueChange={(value: string) => {
|
| 208 |
+
setModel(parseVideoModelName(value, defaultVideoModel))
|
| 209 |
}}
|
| 210 |
defaultValue={defaultVideoModel}>
|
| 211 |
<SelectTrigger className="">
|
| 212 |
<SelectValue placeholder="Video model" />
|
| 213 |
</SelectTrigger>
|
| 214 |
<SelectContent>
|
| 215 |
+
<SelectItem value="SVD">SVD (default)</SelectItem>
|
| 216 |
<SelectItem value="HotshotXL">HotshotXL</SelectItem>
|
| 217 |
<SelectItem value="LaVie">LaVie</SelectItem>
|
| 218 |
</SelectContent>
|
| 219 |
</Select>
|
| 220 |
</div>
|
| 221 |
</div>
|
| 222 |
+
|
| 223 |
+
{/*
|
| 224 |
+
|
| 225 |
+
<div className="flex flex-row space-x-2 items-start">
|
| 226 |
+
<label className="flex w-24 pt-1">Video duration:</label>
|
| 227 |
+
<div className="flex flex-col space-y-2 flex-grow">
|
| 228 |
+
<Input
|
| 229 |
+
placeholder="Duration"
|
| 230 |
+
className="font-mono"
|
| 231 |
+
onChange={(x) => {
|
| 232 |
+
// TODO: clamp the value here + on server side
|
| 233 |
+
setDuration(parseInt(x.target.value))
|
| 234 |
+
}}
|
| 235 |
+
value={title}
|
| 236 |
+
/>
|
| 237 |
+
</div>
|
| 238 |
+
</div>
|
| 239 |
+
*/}
|
| 240 |
+
|
| 241 |
+
<div className="flex flex-row space-x-2 items-start">
|
| 242 |
+
<label className="flex w-24 pt-1">Video orientation:</label>
|
| 243 |
+
<div className="flex flex-col space-y-2 flex-grow">
|
| 244 |
+
<Select
|
| 245 |
+
onValueChange={(value: string) => {
|
| 246 |
+
setOrientation(parseVideoOrientation(value, defaultVideoOrientation))
|
| 247 |
+
}}
|
| 248 |
+
defaultValue={defaultVideoOrientation}>
|
| 249 |
+
<SelectTrigger className="">
|
| 250 |
+
<SelectValue placeholder="Video orientation" />
|
| 251 |
+
</SelectTrigger>
|
| 252 |
+
<SelectContent>
|
| 253 |
+
<SelectItem value="Landscape">Landscape (default)</SelectItem>
|
| 254 |
+
<SelectItem value="Portrait">Portrait</SelectItem>
|
| 255 |
+
{/* <SelectItem value="LaVie">Square</SelectItem> */}
|
| 256 |
+
</SelectContent>
|
| 257 |
+
</Select>
|
| 258 |
+
</div>
|
| 259 |
+
</div>
|
| 260 |
|
| 261 |
<div className="flex flex-row space-x-2 items-start">
|
| 262 |
<label className="flex w-24 pt-1">Tags (optional):</label>
|
|
|
|
| 265 |
placeholder="Tags"
|
| 266 |
className="font-mono"
|
| 267 |
onChange={(x) => {
|
| 268 |
+
setTags(x.target.value)
|
| 269 |
}}
|
| 270 |
+
value={tags}
|
| 271 |
/>
|
| 272 |
<p className="text-neutral-100/70">
|
| 273 |
Comma-separated tags (eg. "Education, Sports")
|
src/types.ts
CHANGED
|
@@ -231,6 +231,11 @@ export type ChannelInfo = {
|
|
| 231 |
tags: string[]
|
| 232 |
|
| 233 |
updatedAt: string
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 234 |
}
|
| 235 |
|
| 236 |
export type VideoStatus =
|
|
@@ -308,8 +313,22 @@ export type VideoRequest = {
|
|
| 308 |
* ID of the channel
|
| 309 |
*/
|
| 310 |
channel: ChannelInfo
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 311 |
}
|
| 312 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 313 |
|
| 314 |
export type VideoInfo = {
|
| 315 |
/**
|
|
@@ -400,6 +419,26 @@ export type VideoInfo = {
|
|
| 400 |
* The channel
|
| 401 |
*/
|
| 402 |
channel: ChannelInfo
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 403 |
}
|
| 404 |
|
| 405 |
export type VideoGenerationModel =
|
|
@@ -451,6 +490,7 @@ export type ParsedDatasetReadme = {
|
|
| 451 |
hf_tags: string[]
|
| 452 |
description: string
|
| 453 |
prompt: string
|
|
|
|
| 454 |
}
|
| 455 |
|
| 456 |
export type ParsedMetadataAndContent = {
|
|
@@ -473,6 +513,7 @@ export type ParsedDatasetPrompt = {
|
|
| 473 |
thumbnail: string
|
| 474 |
voice: string
|
| 475 |
music: string
|
|
|
|
| 476 |
}
|
| 477 |
|
| 478 |
export type UpdateQueueRequest = {
|
|
|
|
| 231 |
tags: string[]
|
| 232 |
|
| 233 |
updatedAt: string
|
| 234 |
+
|
| 235 |
+
/**
|
| 236 |
+
* Default video orientation
|
| 237 |
+
*/
|
| 238 |
+
orientation: VideoOrientation
|
| 239 |
}
|
| 240 |
|
| 241 |
export type VideoStatus =
|
|
|
|
| 313 |
* ID of the channel
|
| 314 |
*/
|
| 315 |
channel: ChannelInfo
|
| 316 |
+
|
| 317 |
+
/**
|
| 318 |
+
* Video orientation
|
| 319 |
+
*/
|
| 320 |
+
orientation: VideoOrientation
|
| 321 |
+
|
| 322 |
+
/**
|
| 323 |
+
* Video duration
|
| 324 |
+
*/
|
| 325 |
+
duration: number
|
| 326 |
}
|
| 327 |
|
| 328 |
+
export type VideoOrientation =
|
| 329 |
+
| "portrait"
|
| 330 |
+
| "landscape"
|
| 331 |
+
| "square"
|
| 332 |
|
| 333 |
export type VideoInfo = {
|
| 334 |
/**
|
|
|
|
| 419 |
* The channel
|
| 420 |
*/
|
| 421 |
channel: ChannelInfo
|
| 422 |
+
|
| 423 |
+
/**
|
| 424 |
+
* Video duration
|
| 425 |
+
*/
|
| 426 |
+
duration: number
|
| 427 |
+
|
| 428 |
+
/**
|
| 429 |
+
* Video width (eg. 1024)
|
| 430 |
+
*/
|
| 431 |
+
width: number
|
| 432 |
+
|
| 433 |
+
/**
|
| 434 |
+
* Video height (eg. 576)
|
| 435 |
+
*/
|
| 436 |
+
height: number
|
| 437 |
+
|
| 438 |
+
/**
|
| 439 |
+
* General video aspect ratio
|
| 440 |
+
*/
|
| 441 |
+
orientation: VideoOrientation
|
| 442 |
}
|
| 443 |
|
| 444 |
export type VideoGenerationModel =
|
|
|
|
| 490 |
hf_tags: string[]
|
| 491 |
description: string
|
| 492 |
prompt: string
|
| 493 |
+
orientation: VideoOrientation
|
| 494 |
}
|
| 495 |
|
| 496 |
export type ParsedMetadataAndContent = {
|
|
|
|
| 513 |
thumbnail: string
|
| 514 |
voice: string
|
| 515 |
music: string
|
| 516 |
+
orientation: VideoOrientation
|
| 517 |
}
|
| 518 |
|
| 519 |
export type UpdateQueueRequest = {
|