machineuser commited on
Commit
5852013
1 Parent(s): 4e75983

Sync widgets demo

Browse files
packages/tasks/src/tasks/depth-estimation/data.ts CHANGED
@@ -28,8 +28,8 @@ const taskData: TaskDataCustom = {
28
  id: "Intel/dpt-large",
29
  },
30
  {
31
- description: "Strong Depth Estimation model trained on the KITTI dataset.",
32
- id: "facebook/dpt-dinov2-large-kitti",
33
  },
34
  {
35
  description: "A strong monocular depth estimation model.",
@@ -42,8 +42,12 @@ const taskData: TaskDataCustom = {
42
  id: "radames/dpt-depth-estimation-3d-voxels",
43
  },
44
  {
45
- description: "An application that can estimate the depth in a given image.",
46
- id: "keras-io/Monocular-Depth-Estimation",
 
 
 
 
47
  },
48
  ],
49
  summary: "Depth estimation is the task of predicting depth of the objects present in an image.",
 
28
  id: "Intel/dpt-large",
29
  },
30
  {
31
+ description: "Strong Depth Estimation model trained on a big compilation of datasets.",
32
+ id: "LiheYoung/depth-anything-large-hf",
33
  },
34
  {
35
  description: "A strong monocular depth estimation model.",
 
42
  id: "radames/dpt-depth-estimation-3d-voxels",
43
  },
44
  {
45
+ description: "An application to compare the outputs of different depth estimation models.",
46
+ id: "LiheYoung/Depth-Anything",
47
+ },
48
+ {
49
+ description: "An application to try state-of-the-art depth estimation.",
50
+ id: "merve/compare_depth_models",
51
  },
52
  ],
53
  summary: "Depth estimation is the task of predicting depth of the objects present in an image.",
packages/tasks/src/tasks/mask-generation/data.ts CHANGED
@@ -3,14 +3,51 @@ import type { TaskDataCustom } from "..";
3
  const taskData: TaskDataCustom = {
4
  datasets: [],
5
  demo: {
6
- inputs: [],
7
- outputs: [],
 
 
 
 
 
 
 
 
 
 
8
  },
9
  metrics: [],
10
- models: [],
11
- spaces: [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  summary:
13
- "Mask generation is creating a binary image that identifies a specific object or region of interest in an input image. Masks are often used in segmentation tasks, where they provide a precise way to isolate the object of interest for further processing or analysis.",
14
  widgetModels: [],
15
  youtubeId: "",
16
  };
 
3
  const taskData: TaskDataCustom = {
4
  datasets: [],
5
  demo: {
6
+ inputs: [
7
+ {
8
+ filename: "mask-generation-input.png",
9
+ type: "img",
10
+ },
11
+ ],
12
+ outputs: [
13
+ {
14
+ filename: "mask-generation-output.png",
15
+ type: "img",
16
+ },
17
+ ],
18
  },
19
  metrics: [],
20
+ models: [
21
+ {
22
+ description: "Small yet powerful mask generation model.",
23
+ id: "Zigeng/SlimSAM-uniform-50",
24
+ },
25
+ {
26
+ description: "Very strong mask generation model.",
27
+ id: "facebook/sam-vit-huge",
28
+ },
29
+ ],
30
+ spaces: [
31
+ {
32
+ description:
33
+ "An application that combines a mask generation model with an image embedding model for open-vocabulary image segmentation.",
34
+ id: "SkalskiP/SAM_and_MetaCLIP",
35
+ },
36
+ {
37
+ description: "An application that compares the performance of a large and a small mask generation model.",
38
+ id: "merve/slimsam",
39
+ },
40
+ {
41
+ description: "An application based on an improved mask generation model.",
42
+ id: "linfanluntan/Grounded-SAM",
43
+ },
44
+ {
45
+ description: "An application to remove objects from videos using mask generation models.",
46
+ id: "SkalskiP/SAM_and_ProPainter",
47
+ },
48
+ ],
49
  summary:
50
+ "Mask generation is the task of generating masks that identify a specific object or region of interest in a given image. Masks are often used in segmentation tasks, where they provide a precise way to isolate the object of interest for further processing or analysis.",
51
  widgetModels: [],
52
  youtubeId: "",
53
  };
packages/tasks/src/tasks/text-generation/data.ts CHANGED
@@ -12,12 +12,12 @@ const taskData: TaskDataCustom = {
12
  id: "the_pile",
13
  },
14
  {
15
- description: "A crowd-sourced instruction dataset to develop an AI assistant.",
16
- id: "OpenAssistant/oasst1",
17
  },
18
  {
19
- description: "A crowd-sourced instruction dataset created by Databricks employees.",
20
- id: "databricks/databricks-dolly-15k",
21
  },
22
  ],
23
  demo: {
@@ -59,66 +59,50 @@ const taskData: TaskDataCustom = {
59
  id: "bigcode/starcoder",
60
  },
61
  {
62
- description: "A model trained to follow instructions, uses Pythia-12b as base model.",
63
- id: "databricks/dolly-v2-12b",
64
  },
65
  {
66
- description: "A model trained to follow instructions curated by community, uses Pythia-12b as base model.",
67
- id: "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
68
  },
69
  {
70
- description: "A large language model trained to generate text in English.",
71
- id: "stabilityai/stablelm-tuned-alpha-7b",
72
  },
73
  {
74
- description: "A model trained to follow instructions, based on mosaicml/mpt-7b.",
75
- id: "mosaicml/mpt-7b-instruct",
76
  },
77
  {
78
- description: "A large language model trained to generate text in English.",
79
- id: "EleutherAI/pythia-12b",
80
- },
81
- {
82
- description: "A large text-to-text model trained to follow instructions.",
83
- id: "google/flan-ul2",
84
- },
85
- {
86
- description: "A large and powerful text generation model.",
87
- id: "tiiuae/falcon-40b",
88
- },
89
- {
90
- description: "State-of-the-art open-source large language model.",
91
  id: "meta-llama/Llama-2-70b-hf",
92
  },
93
  ],
94
  spaces: [
95
  {
96
- description: "A robust text generation model that can perform various tasks through natural language prompting.",
97
- id: "huggingface/bloom_demo",
98
  },
99
  {
100
- description: "An text generation based application that can write code for 80+ languages.",
101
- id: "bigcode/bigcode-playground",
102
  },
103
  {
104
- description: "An text generation based application for conversations.",
105
- id: "h2oai/h2ogpt-chatbot",
106
  },
107
  {
108
  description: "An text generation application that combines OpenAI and Hugging Face models.",
109
  id: "microsoft/HuggingGPT",
110
  },
111
  {
112
- description: "An text generation application that uses StableLM-tuned-alpha-7b.",
113
- id: "stabilityai/stablelm-tuned-alpha-chat",
114
- },
115
- {
116
- description: "An UI that uses StableLM-tuned-alpha-7b.",
117
- id: "togethercomputer/OpenChatKit",
118
  },
119
  ],
120
  summary:
121
- "Generating text is the task of producing new text. These models can, for example, fill in incomplete text or paraphrase.",
122
  widgetModels: ["HuggingFaceH4/zephyr-7b-beta"],
123
  youtubeId: "Vpjb1lu0MDk",
124
  };
 
12
  id: "the_pile",
13
  },
14
  {
15
+ description: "Truly open-source, curated and cleaned dialogue dataset.",
16
+ id: "HuggingFaceH4/ultrachat_200k",
17
  },
18
  {
19
+ description: "An instruction dataset with preference ratings on responses.",
20
+ id: "openbmb/UltraFeedback",
21
  },
22
  ],
23
  demo: {
 
59
  id: "bigcode/starcoder",
60
  },
61
  {
62
+ description: "A very powerful text generation model.",
63
+ id: "mistralai/Mixtral-8x7B-Instruct-v0.1",
64
  },
65
  {
66
+ description: "Small yet powerful text generation model.",
67
+ id: "microsoft/phi-2",
68
  },
69
  {
70
+ description: "A very powerful model that can chat, do mathematical reasoning and write code.",
71
+ id: "openchat/openchat-3.5-0106",
72
  },
73
  {
74
+ description: "Very strong yet small assistant model.",
75
+ id: "HuggingFaceH4/zephyr-7b-beta",
76
  },
77
  {
78
+ description: "Very strong open-source large language model.",
 
 
 
 
 
 
 
 
 
 
 
 
79
  id: "meta-llama/Llama-2-70b-hf",
80
  },
81
  ],
82
  spaces: [
83
  {
84
+ description: "A leaderboard to compare different open-source text generation models based on various benchmarks.",
85
+ id: "HuggingFaceH4/open_llm_leaderboard",
86
  },
87
  {
88
+ description: "An text generation based application based on a very powerful LLaMA2 model.",
89
+ id: "ysharma/Explore_llamav2_with_TGI",
90
  },
91
  {
92
+ description: "An text generation based application to converse with Zephyr model.",
93
+ id: "HuggingFaceH4/zephyr-chat",
94
  },
95
  {
96
  description: "An text generation application that combines OpenAI and Hugging Face models.",
97
  id: "microsoft/HuggingGPT",
98
  },
99
  {
100
+ description: "An chatbot to converse with a very powerful text generation model.",
101
+ id: "mlabonne/phixtral-chat",
 
 
 
 
102
  },
103
  ],
104
  summary:
105
+ "Generating text is the task of generating new text given another text. These models can, for example, fill in incomplete text or paraphrase.",
106
  widgetModels: ["HuggingFaceH4/zephyr-7b-beta"],
107
  youtubeId: "Vpjb1lu0MDk",
108
  };
packages/tasks/src/tasks/text-to-image/data.ts CHANGED
@@ -79,13 +79,17 @@ const taskData: TaskDataCustom = {
79
  id: "latent-consistency/lcm-lora-for-sdxl",
80
  },
81
  {
82
- description: "A powerful text-to-image application that can generate 3D representations.",
83
- id: "hysts/Shap-E",
84
  },
85
  {
86
  description: "An application for `text-to-image`, `image-to-image` and image inpainting.",
87
  id: "ArtGAN/Stable-Diffusion-ControlNet-WebUI",
88
  },
 
 
 
 
89
  ],
90
  summary:
91
  "Generates images from input text. These models can be used to generate and modify images based on text prompts.",
 
79
  id: "latent-consistency/lcm-lora-for-sdxl",
80
  },
81
  {
82
+ description: "A gallery to explore various text-to-image models.",
83
+ id: "multimodalart/LoraTheExplorer",
84
  },
85
  {
86
  description: "An application for `text-to-image`, `image-to-image` and image inpainting.",
87
  id: "ArtGAN/Stable-Diffusion-ControlNet-WebUI",
88
  },
89
+ {
90
+ description: "An application to generate realistic images given photos of a person and a prompt.",
91
+ id: "InstantX/InstantID",
92
+ },
93
  ],
94
  summary:
95
  "Generates images from input text. These models can be used to generate and modify images based on text prompts.",
packages/tasks/src/tasks/zero-shot-image-classification/data.ts CHANGED
@@ -52,9 +52,8 @@ const taskData: TaskDataCustom = {
52
  id: "openai/clip-vit-base-patch16",
53
  },
54
  {
55
- description:
56
- "Robust image classification model trained on publicly available image-caption data trained on additional high pixel data for better performance.",
57
- id: "openai/clip-vit-large-patch14-336",
58
  },
59
  {
60
  description: "Strong image classification model for biomedical domain.",
@@ -64,12 +63,16 @@ const taskData: TaskDataCustom = {
64
  spaces: [
65
  {
66
  description:
67
- "An application that leverages zero shot image classification to find best captions to generate an image. ",
68
  id: "pharma/CLIP-Interrogator",
69
  },
 
 
 
 
70
  ],
71
  summary:
72
- "Zero shot image classification is the task of classifying previously unseen classes during training of a model.",
73
  widgetModels: ["openai/clip-vit-large-patch14-336"],
74
  youtubeId: "",
75
  };
 
52
  id: "openai/clip-vit-base-patch16",
53
  },
54
  {
55
+ description: "Strong zero-shot image classification model.",
56
+ id: "google/siglip-base-patch16-224",
 
57
  },
58
  {
59
  description: "Strong image classification model for biomedical domain.",
 
63
  spaces: [
64
  {
65
  description:
66
+ "An application that leverages zero-shot image classification to find best captions to generate an image. ",
67
  id: "pharma/CLIP-Interrogator",
68
  },
69
+ {
70
+ description: "An application to compare different zero-shot image classification models. ",
71
+ id: "merve/compare_clip_siglip",
72
+ },
73
  ],
74
  summary:
75
+ "Zero-shot image classification is the task of classifying previously unseen classes during training of a model.",
76
  widgetModels: ["openai/clip-vit-large-patch14-336"],
77
  youtubeId: "",
78
  };
packages/tasks/src/tasks/zero-shot-object-detection/data.ts CHANGED
@@ -47,7 +47,12 @@ const taskData: TaskDataCustom = {
47
  id: "google/owlv2-base-patch16-ensemble",
48
  },
49
  ],
50
- spaces: [],
 
 
 
 
 
51
  summary:
52
  "Zero-shot object detection is a computer vision task to detect objects and their classes in images, without any prior training or knowledge of the classes. Zero-shot object detection models receive an image as input, as well as a list of candidate classes, and output the bounding boxes and labels where the objects have been detected.",
53
  widgetModels: [],
 
47
  id: "google/owlv2-base-patch16-ensemble",
48
  },
49
  ],
50
+ spaces: [
51
+ {
52
+ description: "A demo to try the state-of-the-art zero-shot object detection model, OWLv2.",
53
+ id: "merve/owlv2",
54
+ },
55
+ ],
56
  summary:
57
  "Zero-shot object detection is a computer vision task to detect objects and their classes in images, without any prior training or knowledge of the classes. Zero-shot object detection models receive an image as input, as well as a list of candidate classes, and output the bounding boxes and labels where the objects have been detected.",
58
  widgetModels: [],