mirror of
https://github.com/gradio-app/gradio.git
synced 2025-04-12 12:40:29 +08:00
Semantic search in the playground (#10511)
* wording * something * changes * some refactoring * use error modal * formatting * add changeset * fix problems with guide * formatting * oops * changes * scripts * some experiment * merge * changes * use branch until merge * add changeset * cleanup * fix preview demo width * multiple lines in the prompt * better font-sizes * remove white gap above code * fix enter creating new line * better styling for suggested demos * mark edited demos in ui * font, and arrow issue * pressing x on error modal actually closes * random typo on frontend guide * upload demo requirements * hide preview for non-lite demos * fix existing code too long error * fixes * formatting * add changeset * error regen fix --------- Co-authored-by: gradio-pr-bot <gradio-pr-bot@users.noreply.github.com>
This commit is contained in:
parent
1bfb0d6ff4
commit
c4aa8864da
7
.changeset/tired-eggs-greet.md
Normal file
7
.changeset/tired-eggs-greet.md
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
"@gradio/core": minor
|
||||
"gradio": minor
|
||||
"website": minor
|
||||
---
|
||||
|
||||
feat:Semantic search in the playground
|
@ -365,5 +365,5 @@ We can then use the `HelloWorld.svx` file in our components:
|
||||
|
||||
## Conclusion
|
||||
|
||||
You now how to create delightful frontends for your components!
|
||||
You now know how to create delightful frontends for your components!
|
||||
|
||||
|
22
js/_website/generate_jsons/check_lite_demos.py
Normal file
22
js/_website/generate_jsons/check_lite_demos.py
Normal file
@ -0,0 +1,22 @@
|
||||
import os
|
||||
|
||||
def main():
|
||||
demo_to_reqs = {}
|
||||
for demo in os.listdir("demo"):
|
||||
requirements = ""
|
||||
# only check demos with requirements, because we don't need to check the others
|
||||
if os.path.exists(os.path.join("demo", demo, "requirements.txt")):
|
||||
with open(os.path.join("demo", demo, "requirements.txt"), "r") as f:
|
||||
requirements = f.read()
|
||||
demo_to_reqs[demo] = requirements.split("\n")
|
||||
non_lite_reqs = ['spacy', 'cmake', 'opencv-python-headless', 'torch==2.5.1', 'open3d', 'tensorflow', 'torch>=2.3.1', 'psycopg2', 'torchvision==0.13.0', 'onnxruntime-gpu', 'torchaudio==0.12.0', 'xgboost==1.7.6', 'torch', 'safetensors==0.4.3', 'torchaudio', 'torch==1.12.0', 'shap', 'prophet==1.1.2', 'gradio-pdf==0.0.7', 'datasets', 'gradio-datetimerange', 'safetensors>=0.4.1', 'safetensors>=0.4.1', 'numba>=0.45.1', 'safetensors>=0.4.1', 'safetensors>=0.4.3', 'torch>=2.0.0', 'safetensors>=0.3.1', 'safetensors>=0.3.1', 'safetensors>=0.4.1', 'torch', 'torch>=1.9', 'jiter<1,>=0.4.0', 'jiter<1,>=0.4.0', 'tokenizers<0.22,>=0.21', 'tokenizers<0.22,>=0.21', 'tokenizers<0.22,>=0.21', 'tokenizers<0.22,>=0.21', 'aiortc', 'psutil', 'psutil<6,>=2', 'psutil', 'numba>=0.53', 'zstandard<0.24.0,>=0.23.0', 'combo-lock~=0.2', 'ovos-bus-client<0.2.0,>=0.0.8', 'watchdog', 'combo-lock~=0.2', 'combo-lock~=0.2', 'pyee<12.0.0,>=8.1.0', 'combo-lock<1.0.0,>=0.2.1', 'git+https://github.com/huggingface/parler-tts.git', 'git+https://github.com/huggingface/transformers', 'git+https://github.com/nielsrogge/transformers.git@add_dpt_redesign#egg=transformers']
|
||||
non_lite_demos = []
|
||||
for demo, requirements in demo_to_reqs.items():
|
||||
for req in requirements:
|
||||
if req in non_lite_reqs:
|
||||
non_lite_demos.append(demo)
|
||||
break
|
||||
print(non_lite_demos)
|
||||
print(len(non_lite_demos))
|
||||
if __name__ == "__main__":
|
||||
main()
|
210
js/_website/generate_jsons/chunking.py
Normal file
210
js/_website/generate_jsons/chunking.py
Normal file
@ -0,0 +1,210 @@
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Tuple
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
@dataclass
|
||||
class BlogChunks:
|
||||
title: str
|
||||
content: List[str]
|
||||
type: str
|
||||
url: str
|
||||
|
||||
class TextChunker:
|
||||
def __init__(self, model_name: str = "voyageai/voyage-3-large"):
|
||||
"""Initialize the chunker with a tokenizer."""
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
|
||||
def count_tokens(self, text: str) -> int:
|
||||
"""Count the number of tokens in a text string."""
|
||||
return len(self.tokenizer.encode(text))
|
||||
|
||||
def find_chunk_boundary(self, text: str, target_tokens: int, overlap_tokens: int) -> Tuple[str, str]:
|
||||
"""
|
||||
Find a boundary near the target token count, breaking at sentence boundaries when possible
|
||||
but enforcing a hard limit of 100 tokens. Includes overlap in the next chunk.
|
||||
Returns a tuple of (chunk, remainder).
|
||||
"""
|
||||
if not text:
|
||||
return "", ""
|
||||
|
||||
# First try to split by sentence
|
||||
sentence_pattern = r'(?<=[.!?])\s+(?=[A-Z])'
|
||||
sentences = re.split(sentence_pattern, text)
|
||||
|
||||
current_chunk = []
|
||||
current_tokens = 0
|
||||
overlap_start_idx = 0 # Track where to start overlap
|
||||
|
||||
for i, sentence in enumerate(sentences):
|
||||
sentence_tokens = self.count_tokens(sentence)
|
||||
|
||||
# If this would exceed our hard limit of 100 tokens
|
||||
if current_tokens + sentence_tokens > target_tokens:
|
||||
if not current_chunk:
|
||||
# Need to split the sentence by words
|
||||
words = sentence.split()
|
||||
word_chunk = []
|
||||
|
||||
for word in words:
|
||||
word_tokens = self.count_tokens(word + ' ')
|
||||
if current_tokens + word_tokens > target_tokens:
|
||||
break
|
||||
word_chunk.append(word)
|
||||
current_tokens += word_tokens
|
||||
|
||||
if not word_chunk: # If even a single word is too long
|
||||
return sentence[:target_tokens], sentence[target_tokens:]
|
||||
|
||||
chunk_text = ' '.join(word_chunk)
|
||||
# Include some of the end of this chunk in the next chunk for overlap
|
||||
overlap_point = max(0, len(word_chunk) - int(len(word_chunk) * 0.5))
|
||||
remainder = ' '.join(words[overlap_point:])
|
||||
if i < len(sentences) - 1:
|
||||
remainder += ' ' + ' '.join(sentences[i+1:])
|
||||
return chunk_text, remainder.strip()
|
||||
|
||||
chunk_text = ' '.join(current_chunk)
|
||||
# Start the next chunk from roughly halfway through this one for overlap
|
||||
overlap_start_idx = max(0, i - len(current_chunk) // 2)
|
||||
remainder = ' '.join(sentences[overlap_start_idx:])
|
||||
return chunk_text, remainder.strip()
|
||||
|
||||
# If we would exceed the target token count (but not hard limit)
|
||||
if current_tokens + sentence_tokens > target_tokens and current_chunk:
|
||||
chunk_text = ' '.join(current_chunk)
|
||||
# Start the next chunk from roughly halfway through this one for overlap
|
||||
overlap_start_idx = max(0, i - len(current_chunk) // 2)
|
||||
remainder = ' '.join(sentences[overlap_start_idx:])
|
||||
return chunk_text, remainder.strip()
|
||||
|
||||
current_chunk.append(sentence)
|
||||
current_tokens += sentence_tokens
|
||||
|
||||
# If we get here, return the entire text as one chunk
|
||||
return ' '.join(current_chunk), ''
|
||||
|
||||
def chunk_page(
|
||||
self,
|
||||
title: str,
|
||||
url: str,
|
||||
content: str,
|
||||
type: str,
|
||||
target_length: int = 100, # Token counts for different chunk sizes
|
||||
overlap_percentage: float = 0.5 # 20% overlap by default
|
||||
) -> BlogChunks:
|
||||
"""
|
||||
Chunks document content with specified overlap percentage and multiple target lengths.
|
||||
Returns a dictionary mapping target length to list of chunks.
|
||||
"""
|
||||
# Clean the content first
|
||||
content = self._clean_content(content)
|
||||
|
||||
# Dictionary to store chunks for each target length
|
||||
chunks = BlogChunks(
|
||||
title=title,
|
||||
content=[],
|
||||
type=type,
|
||||
url=url
|
||||
)
|
||||
|
||||
overlap_tokens = int(target_length * overlap_percentage)
|
||||
remaining_text = content
|
||||
current_section = None
|
||||
|
||||
while remaining_text:
|
||||
# Handle section headers
|
||||
if remaining_text.lstrip().startswith('#'):
|
||||
section_end = remaining_text.find('\n')
|
||||
if section_end == -1:
|
||||
break
|
||||
current_section = remaining_text[:section_end].lstrip('#').strip()
|
||||
remaining_text = remaining_text[section_end:].strip()
|
||||
continue
|
||||
|
||||
# Find natural break point
|
||||
chunk_text, remaining_text = self.find_chunk_boundary(
|
||||
remaining_text,
|
||||
target_length,
|
||||
overlap_tokens
|
||||
)
|
||||
|
||||
if chunk_text:
|
||||
chunks.content.append(chunk_text)
|
||||
|
||||
if not remaining_text:
|
||||
break
|
||||
for i, chunk in enumerate(chunks.content):
|
||||
if "Demos" in chunk and "demo.launch()" in chunk:
|
||||
chunks.content[i] = chunk.split("Demos")[0] + chunk.split("demo.launch()")[1]
|
||||
if "Open in" in chunk and "demo.launch()" in chunk:
|
||||
chunks.content.pop(i)
|
||||
|
||||
print(f"\nChunked: {title}")
|
||||
# print(f"\n\n\n{'*'*50}")
|
||||
# print(f"Target Length: {target_length}")
|
||||
# print(f"\n{'*'*50}")
|
||||
# print(f"{'='*50}")
|
||||
# for chunk in all_chunks:
|
||||
# print(f"Length: {self.count_tokens(chunk.content)} tokens")
|
||||
# print(f"Content: {chunk.content}")
|
||||
# print(f"{'='*50}")
|
||||
# print(f"\n\n\n{'*'*50}")
|
||||
# print([chunk.content for chunk in all_chunks])
|
||||
|
||||
return chunks
|
||||
|
||||
def _clean_content(self, content: str) -> str:
|
||||
"""Clean the content by removing code blocks and markdown links."""
|
||||
# Remove triple backtick code blocks with optional language
|
||||
content = re.sub(r'```(?:[a-zA-Z]*\s*)?[\s\S]*?```', '', content)
|
||||
|
||||
content = re.sub(r'`{1,2}\w*\n[\s\S]*?(?:`{1,2})', '', content)
|
||||
|
||||
# Remove any remaining single or double backtick blocks
|
||||
content = re.sub(r'``[^`]*(?:`[^`]+`)*[^`]*``', '', content) # Double backticks
|
||||
content = re.sub(r'`[^`]*`', '', content) # Single backticks
|
||||
|
||||
# Remove markdown links but keep text
|
||||
content = re.sub(r'\[([^\]]+)\]\([^\)]+\)', r'\1', content)
|
||||
|
||||
# Replace common HTML entities with their readable equivalents
|
||||
html_entities = {
|
||||
'"': '"',
|
||||
''': "'",
|
||||
''': "'",
|
||||
'<': '<',
|
||||
'>': '>',
|
||||
'&': '&',
|
||||
'–': '-',
|
||||
'—': '--',
|
||||
' ': ' ',
|
||||
'’': "'",
|
||||
'‘': "'",
|
||||
'”': '"',
|
||||
'“': '"',
|
||||
'’': "'",
|
||||
'‘': "'",
|
||||
'”': '"',
|
||||
'“': '"',
|
||||
'…': '...',
|
||||
''': "'",
|
||||
'•': '•',
|
||||
'·': '·',
|
||||
'•': '•'
|
||||
}
|
||||
|
||||
for entity, replacement in html_entities.items():
|
||||
content = content.replace(entity, replacement)
|
||||
|
||||
# Also handle numeric entities like " (double quote)
|
||||
content = re.sub(r'&#(\d+);', lambda m: chr(int(m.group(1))), content)
|
||||
|
||||
# Handle hex entities like "
|
||||
content = re.sub(r'&#x([0-9a-fA-F]+);', lambda m: chr(int(m.group(1), 16)), content)
|
||||
|
||||
# Normalize whitespace (including handling of newlines)
|
||||
content = re.sub(r'\s+', ' ', content)
|
||||
|
||||
return content.strip()
|
||||
|
161
js/_website/generate_jsons/embed.py
Normal file
161
js/_website/generate_jsons/embed.py
Normal file
@ -0,0 +1,161 @@
|
||||
import libsql_client as libsql
|
||||
import os
|
||||
from openai import OpenAI
|
||||
import numpy as np
|
||||
from chunking import TextChunker
|
||||
from tqdm import tqdm
|
||||
import requests
|
||||
import voyageai
|
||||
|
||||
# vo = voyageai.Client(api_key=os.getenv("VOYAGE_API_KEY"),)
|
||||
url = os.getenv("TURSO_DATABASE_URL")
|
||||
auth_token = os.getenv("TURSO_AUTH_TOKEN")
|
||||
db_client = libsql.create_client_sync(url, auth_token=auth_token)
|
||||
|
||||
# openai_deepinfra = OpenAI(
|
||||
# api_key=os.getenv("DEEPINFRA_API_TOKEN"),
|
||||
# base_url="https://api.deepinfra.com/v1/openai"
|
||||
# )
|
||||
|
||||
|
||||
# def embed_and_upload(title, _type, url, contents):
|
||||
# documents_embeddings = vo.embed(contents, model="voyage-3", input_type="document").embeddings
|
||||
|
||||
# values = [
|
||||
# (title, _type, url, content, np.array(embedding, dtype=np.float32))
|
||||
# for content, embedding in zip(contents, documents_embeddings)
|
||||
# ]
|
||||
|
||||
# placeholders = ','.join(['(?, ?, ?, ?, ?)'] * len(contents))
|
||||
|
||||
# flattened_values = [item for tup in values for item in tup]
|
||||
|
||||
# db_client.execute(
|
||||
# f"INSERT INTO EMBEDDINGS (title, type, url, content, embedding) VALUES {placeholders}",
|
||||
# flattened_values
|
||||
# )
|
||||
# return
|
||||
|
||||
# url = "http://localhost:5174/search-api"
|
||||
# response = requests.get(url)
|
||||
# data = response.json()
|
||||
|
||||
# guides = [d for d in data if d["type"] == "GUIDE"]
|
||||
# docs = [d for d in data if d["type"] == "DOCS"]
|
||||
|
||||
# chunker = TextChunker()
|
||||
|
||||
# DOCS_AND_GUIDES_DESCRIPTION_SYSTEM_PROMPT = """
|
||||
# You are a helpful assistant that summarizes pages in the Gradio website in only one sentence.
|
||||
# You are given a page that is either a guide or docs. Both will consist of natural language mixed with python code.
|
||||
# Your summaries will be used for embedding search that points to the page, so please be concise, accurate and include the most important parts. But it can only be one sentence.
|
||||
# Your sentence should clarify what type of questions the page answers.
|
||||
# Do not include 'gr.' before the function or class name. Do not ever use backticks or special code formatting in your response. For example write Interface instead of `Interface`.
|
||||
# """
|
||||
|
||||
# def describe_page(content: str):
|
||||
# description = openai_deepinfra.chat.completions.create(
|
||||
# model="Qwen/Qwen2.5-72B-Instruct",
|
||||
# messages=[
|
||||
# {"role": "system", "content": DOCS_AND_GUIDES_DESCRIPTION_SYSTEM_PROMPT},
|
||||
# {"role": "user", "content": content}
|
||||
# ],
|
||||
# )
|
||||
|
||||
# description = description.choices[0].message.content
|
||||
|
||||
# return description
|
||||
|
||||
# for guide in tqdm(guides[1:]): # ignore weird
|
||||
# description = describe_page(guide["content"])
|
||||
# chunks = chunker.chunk_page(guide["title"], guide["slug"], description, guide["type"])
|
||||
# try:
|
||||
# embed_and_upload(chunks.title, chunks.type, chunks.url, chunks.content)
|
||||
# except Exception as e:
|
||||
# print(e)
|
||||
# db_client.close()
|
||||
# 1/0
|
||||
|
||||
# for page in tqdm(docs):
|
||||
# description = describe_page(guide["content"])
|
||||
# chunks = chunker.chunk_page(guide["title"], guide["slug"], description, guide["type"])
|
||||
# try:
|
||||
# embed_and_upload(chunks.title, chunks.type, chunks.url, chunks.content)
|
||||
# except Exception as e:
|
||||
# print(e)
|
||||
# db_client.close()
|
||||
# 1/0
|
||||
|
||||
|
||||
# demo_descriptions = []
|
||||
# def get_demo_descriptions():
|
||||
# results = db_client.execute(
|
||||
# """
|
||||
# SELECT
|
||||
# MIN(id) as id,
|
||||
# title,
|
||||
# type,
|
||||
# url,
|
||||
# STRING_AGG(content, ' ') as combined_content
|
||||
# FROM EMBEDDINGS_LLM_250
|
||||
# WHERE type = 'DEMO'
|
||||
# GROUP BY title, type, url;
|
||||
# """
|
||||
# )
|
||||
# for result in results:
|
||||
# demo_descriptions.append(
|
||||
# {
|
||||
# "title": result["title"],
|
||||
# "url": result["url"],
|
||||
# "content": result["combined_content"]
|
||||
# }
|
||||
# )
|
||||
# return
|
||||
|
||||
# get_demo_descriptions()
|
||||
|
||||
# for demo in tqdm(demo_descriptions):
|
||||
# chunks = chunker.chunk_page(demo["title"], demo["url"], demo["content"], "DEMO")
|
||||
# try:
|
||||
# embed_and_upload(chunks.title, chunks.type, chunks.url, chunks.content)
|
||||
# except Exception as e:
|
||||
# print(e)
|
||||
# db_client.close()
|
||||
# 1/0
|
||||
|
||||
demo_to_reqs = {}
|
||||
for demo in os.listdir("demo"):
|
||||
if os.path.exists(os.path.join("demo", demo, "requirements.txt")):
|
||||
with open(os.path.join("demo", demo, "requirements.txt"), "r") as f:
|
||||
reqs = f.read()
|
||||
reqs = reqs.split("\n")
|
||||
demo_to_reqs[demo] = reqs
|
||||
|
||||
for title, requirements in tqdm(demo_to_reqs.items()):
|
||||
db_client.execute(
|
||||
"""UPDATE EMBEDDINGS
|
||||
SET requirements = ?
|
||||
WHERE type = 'DEMO' AND title = ?
|
||||
""",
|
||||
(requirements, title.replace("_", " ").capitalize())
|
||||
)
|
||||
print(title.replace("_", " ").capitalize())
|
||||
|
||||
# demo_to_reqs = {}
|
||||
# for demo in os.listdir("demo"):
|
||||
# if os.path.exists(os.path.join("demo", demo, "requirements.txt")):
|
||||
# with open(os.path.join("demo", demo, "requirements.txt"), "r") as f:
|
||||
# demo_to_reqs[demo] = f.read()
|
||||
|
||||
# for title, requirements in tqdm(demo_to_reqs.items()):
|
||||
# db_client.execute(
|
||||
# """UPDATE EMBEDDINGS
|
||||
# SET requirements = ?
|
||||
# WHERE type = 'DEMO' AND title = ?
|
||||
# """,
|
||||
# (requirements, title.replace("_", " ").capitalize())
|
||||
# )
|
||||
# print(title.replace("_", " ").capitalize())
|
||||
|
||||
|
||||
db_client.close()
|
@ -106,8 +106,8 @@ create_dir_if_not_exists(make_dir(WEBSITE_DIR, "src/lib/json/guides"))
|
||||
|
||||
demos.generate(make_dir(WEBSITE_DIR, "src/lib/json/demos.json"))
|
||||
guides.generate(make_dir(WEBSITE_DIR, "src/lib/json/guides/") + "/")
|
||||
SYSTEM_PROMPT = docs.generate(make_dir(WEBSITE_DIR, "src/lib/json/docs.json"))
|
||||
_ = docs.generate(make_dir(WEBSITE_DIR, "src/lib/templates/docs.json"))
|
||||
SYSTEM_PROMPT, FALLBACK_PROMPT = docs.generate(make_dir(WEBSITE_DIR, "src/lib/json/docs.json"))
|
||||
_, _ = docs.generate(make_dir(WEBSITE_DIR, "src/lib/templates/docs.json"))
|
||||
changelog.generate(make_dir(WEBSITE_DIR, "src/lib/json/changelog.json"))
|
||||
get_latest_release()
|
||||
|
||||
@ -120,6 +120,7 @@ with open(make_dir(WEBSITE_DIR, "src/lib/json/system_prompt.json"), "w+") as f:
|
||||
json.dump(
|
||||
{
|
||||
"SYSTEM": SYSTEM_PROMPT,
|
||||
"FALLBACK": FALLBACK_PROMPT,
|
||||
},
|
||||
f,
|
||||
)
|
||||
|
@ -10,7 +10,7 @@ import urllib.parse
|
||||
|
||||
from gradio_client.documentation import document_cls, generate_documentation
|
||||
import gradio
|
||||
from ..guides import guides
|
||||
from ..guides import guides, guide_names
|
||||
|
||||
DIR = os.path.dirname(__file__)
|
||||
DEMOS_DIR = os.path.abspath(os.path.join(DIR, "../../../../../demo"))
|
||||
@ -312,11 +312,12 @@ demo = gr.Interface(fn=greet, inputs="textbox", outputs="textbox")
|
||||
# Launch the interface.
|
||||
demo.launch()
|
||||
|
||||
|
||||
Below are all the class and function signatures in the Gradio library.
|
||||
|
||||
"""
|
||||
|
||||
FALLBACK_PROMPT = SYSTEM_PROMPT
|
||||
|
||||
FALLBACK_PROMPT += "Below are all the class and function signatures in the Gradio library: (these are what you will reference as docs)\n\n"
|
||||
|
||||
for key in gradio_docs:
|
||||
if key in ["events", "events_matrix"]:
|
||||
continue
|
||||
@ -327,8 +328,9 @@ for key in gradio_docs:
|
||||
': ' + p['annotation']
|
||||
+ (' = ' + p['default'] if 'default' in p else '')
|
||||
for p in o['parameters']])})"""
|
||||
SYSTEM_PROMPT += f"{signature}\n"
|
||||
SYSTEM_PROMPT += f"{o['description']}\n\n"
|
||||
FALLBACK_PROMPT += f"{signature}\n"
|
||||
FALLBACK_PROMPT += f"{o['description']}\n\n"
|
||||
|
||||
else:
|
||||
for c in gradio_docs[key]:
|
||||
o = gradio_docs[key][c]
|
||||
@ -337,8 +339,8 @@ for key in gradio_docs:
|
||||
': ' + p['annotation']
|
||||
+ (' = ' + p['default'] if 'default' in p else '')
|
||||
for p in o['parameters']])})"""
|
||||
SYSTEM_PROMPT += f"{signature}\n"
|
||||
SYSTEM_PROMPT += f"{o['description']}\n\n"
|
||||
FALLBACK_PROMPT += f"{signature}\n"
|
||||
FALLBACK_PROMPT += f"{o['description']}\n\n"
|
||||
if "fns" in o and key != "components":
|
||||
for f in o["fns"]:
|
||||
signature = f"""{o['name']}.{f['name']}({', '.join([
|
||||
@ -346,12 +348,12 @@ for key in gradio_docs:
|
||||
': ' + p['annotation']
|
||||
+ (' = ' + p['default'] if 'default' in p else '')
|
||||
for p in f['parameters']])})"""
|
||||
SYSTEM_PROMPT += f"{signature}\n"
|
||||
SYSTEM_PROMPT += f"{f['description']}\n\n"
|
||||
FALLBACK_PROMPT += f"{signature}\n"
|
||||
FALLBACK_PROMPT += f"{f['description']}\n\n"
|
||||
|
||||
SYSTEM_PROMPT += "\nEvent listeners allow Gradio to respond to user interactions with the UI components defined in a Blocks app. When a user interacts with an element, such as changing a slider value or uploading an image, a function is called.\n"
|
||||
FALLBACK_PROMPT += "\nEvent listeners allow Gradio to respond to user interactions with the UI components defined in a Blocks app. When a user interacts with an element, such as changing a slider value or uploading an image, a function is called.\n"
|
||||
|
||||
SYSTEM_PROMPT += "All event listeners have the same signature:\n"
|
||||
FALLBACK_PROMPT += "All event listeners have the same signature:\n"
|
||||
|
||||
f = gradio_docs["components"]["audio"]["fns"][0]
|
||||
signature = f"""<component_name>.<event_name>({', '.join([
|
||||
@ -359,18 +361,19 @@ signature = f"""<component_name>.<event_name>({', '.join([
|
||||
': ' + p['annotation']
|
||||
+ (' = ' + p['default'] if 'default' in p else '')
|
||||
for p in f['parameters']])})"""
|
||||
SYSTEM_PROMPT += signature
|
||||
SYSTEM_PROMPT += "\nEach component only supports some specific events. Below is a list of all gradio components and every event that each component supports. If an event is supported by a component, it is a valid method of the component."
|
||||
FALLBACK_PROMPT += signature
|
||||
FALLBACK_PROMPT += "\nEach component only supports some specific events. Below is a list of all gradio components and every event that each component supports. If an event is supported by a component, it is a valid method of the component."
|
||||
for component in gradio_docs["events_matrix"]:
|
||||
SYSTEM_PROMPT += f"{component}: {', '.join(gradio_docs['events_matrix'][component])}\n\n"
|
||||
|
||||
FALLBACK_PROMPT += f"{component}: {', '.join(gradio_docs['events_matrix'][component])}\n\n"
|
||||
|
||||
SYSTEM_PROMPT += "Below are examples of full end-to-end Gradio apps:\n\n"
|
||||
FALLBACK_PROMPT += "Below are examples of full end-to-end Gradio apps:\n\n"
|
||||
|
||||
|
||||
# 'audio_component_events', 'audio_mixer', 'blocks_essay', 'blocks_chained_events', 'blocks_xray', 'chatbot_multimodal', 'sentence_builder', 'custom_css', 'blocks_update', 'fake_gan'
|
||||
# important_demos = ["annotatedimage_component", "blocks_essay_simple", "blocks_flipper", "blocks_form", "blocks_hello", "blocks_js_load", "blocks_js_methods", "blocks_kinematics", "blocks_layout", "blocks_plug", "blocks_simple_squares", "calculator", "chatbot_consecutive", "chatbot_simple", "chatbot_streaming", "chatinterface_multimodal", "datetimes", "diff_texts", "dropdown_key_up", "fake_diffusion", "fake_gan", "filter_records", "function_values", "gallery_component_events", "generate_tone", "hangman", "hello_blocks", "hello_blocks_decorator", "hello_world", "image_editor", "matrix_transpose", "model3D", "on_listener_decorator", "plot_component", "render_merge", "render_split", "reverse_audio_2", "sales_projections", "sepia_filter", "sort_records", "streaming_simple", "tabbed_interface_lite", "tax_calculator", "theme_soft", "timer", "timer_simple", "variable_outputs", "video_identity"]
|
||||
important_demos = ['custom_css', "annotatedimage_component", "blocks_essay_simple", "blocks_flipper", "blocks_form", "blocks_hello", "blocks_js_load", "blocks_js_methods", "blocks_kinematics", "blocks_layout", "blocks_plug", "blocks_simple_squares", "calculator", "chatbot_consecutive", "chatbot_simple", "chatbot_streaming", "datetimes", "diff_texts", "dropdown_key_up", "fake_diffusion", "filter_records", "function_values", "gallery_component_events", "generate_tone", "hangman", "hello_blocks", "hello_blocks_decorator", "hello_world", "image_editor", "matrix_transpose", "model3D", "on_listener_decorator", "plot_component", "render_merge", "render_split", "reverse_audio_2", "sepia_filter", "sort_records", "streaming_simple", "tabbed_interface_lite", "tax_calculator", "theme_soft", "timer", "timer_simple", "variable_outputs", "video_identity"]
|
||||
|
||||
very_important_demos = ["blocks_essay_simple", "blocks_flipper", "blocks_form", "blocks_hello","reverse_audio_2", "sepia_filter", "sort_records", "streaming_simple", "tabbed_interface_lite", "tax_calculator", "timer_simple", "video_identity"]
|
||||
|
||||
def length(demo):
|
||||
if os.path.exists(os.path.join(DEMOS_DIR, demo, "run.py")):
|
||||
@ -386,6 +389,18 @@ def length(demo):
|
||||
# print(important_demos)
|
||||
|
||||
for demo in important_demos:
|
||||
if os.path.exists(os.path.join(DEMOS_DIR, demo, "run.py")):
|
||||
demo_file = os.path.join(DEMOS_DIR, demo, "run.py")
|
||||
else:
|
||||
continue
|
||||
with open(demo_file) as run_py:
|
||||
demo_code = run_py.read()
|
||||
demo_code = demo_code.replace("# type: ignore", "").replace('if __name__ == "__main__":\n ', "")
|
||||
FALLBACK_PROMPT += f"Name: {demo.replace('_', ' ')}\n"
|
||||
FALLBACK_PROMPT += "Code: \n\n"
|
||||
FALLBACK_PROMPT += f"{demo_code}\n\n"
|
||||
|
||||
for demo in very_important_demos:
|
||||
if os.path.exists(os.path.join(DEMOS_DIR, demo, "run.py")):
|
||||
demo_file = os.path.join(DEMOS_DIR, demo, "run.py")
|
||||
else:
|
||||
@ -397,183 +412,14 @@ for demo in important_demos:
|
||||
SYSTEM_PROMPT += "Code: \n\n"
|
||||
SYSTEM_PROMPT += f"{demo_code}\n\n"
|
||||
|
||||
|
||||
SYSTEM_PROMPT += """
|
||||
FALLBACK_PROMPT += """
|
||||
The latest verstion of Gradio includes some breaking changes, and important new features you should be aware of. Here is a list of the important changes:
|
||||
|
||||
1. Streaming audio, images, and video as input and output are now fully supported in Gradio.
|
||||
|
||||
Streaming Outputs:
|
||||
|
||||
In some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one token at a time instead of returning it all at once.
|
||||
In such cases, you can supply a generator function into Gradio instead of a regular function.
|
||||
Here's an example of a Gradio app that streams a sequence of images:
|
||||
|
||||
CODE:
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
import time
|
||||
|
||||
def fake_diffusion(steps):
|
||||
rng = np.random.default_rng()
|
||||
for i in range(steps):
|
||||
time.sleep(1)
|
||||
image = rng.random(size=(600, 600, 3))
|
||||
yield image
|
||||
image = np.ones((1000,1000,3), np.uint8)
|
||||
image[:] = [255, 124, 0]
|
||||
yield image
|
||||
|
||||
demo = gr.Interface(fake_diffusion,
|
||||
inputs=gr.Slider(1, 10, 3, step=1),
|
||||
outputs="image")
|
||||
|
||||
demo.launch()
|
||||
|
||||
|
||||
|
||||
Gradio can stream audio and video directly from your generator function. This lets your user hear your audio or see your video nearly as soon as it's yielded by your function. All you have to do is
|
||||
|
||||
Set streaming=True in your gr.Audio or gr.Video output component.
|
||||
Write a python generator that yields the next "chunk" of audio or video.
|
||||
Set autoplay=True so that the media starts playing automatically.
|
||||
|
||||
For audio, the next "chunk" can be either an .mp3 or .wav file or a bytes sequence of audio. For video, the next "chunk" has to be either .mp4 file or a file with h.264 codec with a .ts extension. For smooth playback, make sure chunks are consistent lengths and larger than 1 second.
|
||||
|
||||
Here's an example gradio app that streams audio:
|
||||
|
||||
CODE:
|
||||
|
||||
import gradio as gr
|
||||
from time import sleep
|
||||
|
||||
def keep_repeating(audio_file):
|
||||
for _ in range(10):
|
||||
sleep(0.5)
|
||||
yield audio_file
|
||||
|
||||
gr.Interface(keep_repeating,
|
||||
gr.Audio(sources=["microphone"], type="filepath"),
|
||||
gr.Audio(streaming=True, autoplay=True)
|
||||
).launch()
|
||||
|
||||
|
||||
Here's an example gradio app that streams video:
|
||||
|
||||
CODE:
|
||||
|
||||
import gradio as gr
|
||||
from time import sleep
|
||||
|
||||
def keep_repeating(video_file):
|
||||
for _ in range(10):
|
||||
sleep(0.5)
|
||||
yield video_file
|
||||
|
||||
gr.Interface(keep_repeating,
|
||||
gr.Video(sources=["webcam"], format="mp4"),
|
||||
gr.Video(streaming=True, autoplay=True)
|
||||
).launch()
|
||||
|
||||
Streaming Inputs:
|
||||
|
||||
Gradio also allows you to stream images from a user's camera or audio chunks from their microphone into your event handler. This can be used to create real-time object detection apps or conversational chat applications with Gradio.
|
||||
|
||||
Currently, the gr.Image and the gr.Audio components support input streaming via the stream event.
|
||||
|
||||
Here's an example, which simply returns the webcam stream unmodified:
|
||||
|
||||
CODE:
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
def transform_cv2(frame, transform):
|
||||
if transform == "cartoon":
|
||||
# prepare color
|
||||
img_color = cv2.pyrDown(cv2.pyrDown(frame))
|
||||
for _ in range(6):
|
||||
img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
|
||||
img_color = cv2.pyrUp(cv2.pyrUp(img_color))
|
||||
|
||||
# prepare edges
|
||||
img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
|
||||
img_edges = cv2.adaptiveThreshold(
|
||||
cv2.medianBlur(img_edges, 7),
|
||||
255,
|
||||
cv2.ADAPTIVE_THRESH_MEAN_C,
|
||||
cv2.THRESH_BINARY,
|
||||
9,
|
||||
2,
|
||||
)
|
||||
img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)
|
||||
# combine color and edges
|
||||
img = cv2.bitwise_and(img_color, img_edges)
|
||||
return img
|
||||
elif transform == "edges":
|
||||
# perform edge detection
|
||||
img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)
|
||||
return img
|
||||
else:
|
||||
return np.flipud(frame)
|
||||
|
||||
|
||||
css=".my-group {max-width: 500px !important; max-height: 500px !important;}\n.my-column {display: flex !important; justify-content: center !important; align-items: center !important};"
|
||||
|
||||
with gr.Blocks(css=css) as demo:
|
||||
with gr.Column(elem_classes=["my-column"]):
|
||||
with gr.Group(elem_classes=["my-group"]):
|
||||
transform = gr.Dropdown(choices=["cartoon", "edges", "flip"],
|
||||
value="flip", label="Transformation")
|
||||
input_img = gr.Image(sources=["webcam"], type="numpy")
|
||||
input_img.stream(transform_cv2, [input_img, transform], [input_img], time_limit=30, stream_every=0.1)
|
||||
|
||||
|
||||
demo.launch()
|
||||
|
||||
|
||||
|
||||
There are two unique keyword arguments for the stream event:
|
||||
|
||||
time_limit - This is the amount of time the gradio server will spend processing the event. Media streams are naturally unbounded so it's important to set a time limit so that one user does not hog the Gradio queue. The time limit only counts the time spent processing the stream, not the time spent waiting in the queue. The orange bar displayed at the bottom of the input image represents the remaining time. When the time limit expires, the user will automatically rejoin the queue.
|
||||
|
||||
stream_every - This is the frequency (in seconds) with which the stream will capture input and send it to the server. For demos like image detection or manipulation, setting a smaller value is desired to get a "real-time" effect. For demos like speech transcription, a higher value is useful so that the transcription algorithm has more context of what's being said.
|
||||
|
||||
|
||||
|
||||
Your streaming function should be stateless. It should take the current input and return its corresponding output. However, there are cases where you may want to keep track of past inputs or outputs. For example, you may want to keep a buffer of the previous k inputs to improve the accuracy of your transcription demo. You can do this with Gradio's gr.State() component.
|
||||
|
||||
Let's showcase this with a sample demo:
|
||||
|
||||
CODE:
|
||||
|
||||
def transcribe_handler(current_audio, state, transcript):
|
||||
next_text = transcribe(current_audio, history=state)
|
||||
state.append(current_audio)
|
||||
state = state[-3:]
|
||||
return state, transcript + next_text
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
mic = gr.Audio(sources="microphone")
|
||||
state = gr.State(value=[])
|
||||
with gr.Column():
|
||||
transcript = gr.Textbox(label="Transcript")
|
||||
mic.stream(transcribe_handler, [mic, state, transcript], [state, transcript],
|
||||
time_limit=10, stream_every=1)
|
||||
|
||||
|
||||
demo.launch()
|
||||
|
||||
|
||||
2. Audio files are no longer converted to .wav automatically
|
||||
1. Audio files are no longer converted to .wav automatically
|
||||
|
||||
Previously, the default value of the format in the gr.Audio component was wav, meaning that audio files would be converted to the .wav format before being processed by a prediction function or being returned to the user. Now, the default value of format is None, which means any audio files that have an existing format are kept as is.
|
||||
|
||||
3. The 'every' parameter is no longer supported in event listeners
|
||||
2. The 'every' parameter is no longer supported in event listeners
|
||||
|
||||
Previously, if you wanted to run an event 'every' X seconds after a certain trigger, you could set `every=` in the event listener. This is no longer supported — do the following instead:
|
||||
|
||||
@ -607,20 +453,20 @@ fast_btn = gr.Button("Fast")
|
||||
fast_btn.click(lambda: gr.Timer(0.1), None, t) # makes timer tick every 0.1s
|
||||
|
||||
|
||||
4. The `undo_btn`, `retry_btn` and `clear_btn` parameters of `ChatInterface` have been removed
|
||||
5. Passing a tuple to `gr.Code` is not supported
|
||||
6. The `concurrency_count` parameter has been removed from `.queue()`
|
||||
7. The `additional_inputs_accordion_name` parameter has been removed from `gr.ChatInterface`
|
||||
8. The `thumbnail` parameter has been removed from `gr.Interface`
|
||||
9. The `root` parameter in `gr.FileExplorer` has been removed
|
||||
10. The `signed_in_value` parameter in `gr.LoginButton` has been removed
|
||||
11. The `gr.LogoutButton` component has been removed
|
||||
12. The `gr.make_waveform` method has been removed from the library
|
||||
13. SVGs are not accepted as input images into the `gr.Image` component unless `type=filepath`
|
||||
14. The `height` parameter in `gr.DataFrame` has been renamed to `max_height`
|
||||
15. The `likeable` parameter of `gr.Chatbot` has been removed. The chatbot will display like buttons whenever the `like` event is defined.
|
||||
16. By default user messages are not likeable in the `gr.Chatbot`. To display like buttons in the user message, set the `user_like_button` parameter of the `like` event to True.
|
||||
17. The argument for lazy-caching examples has been changed
|
||||
3. The `undo_btn`, `retry_btn` and `clear_btn` parameters of `ChatInterface` have been removed
|
||||
4. Passing a tuple to `gr.Code` is not supported
|
||||
5. The `concurrency_count` parameter has been removed from `.queue()`
|
||||
6. The `additional_inputs_accordion_name` parameter has been removed from `gr.ChatInterface`
|
||||
7. The `thumbnail` parameter has been removed from `gr.Interface`
|
||||
8. The `root` parameter in `gr.FileExplorer` has been removed
|
||||
9. The `signed_in_value` parameter in `gr.LoginButton` has been removed
|
||||
10. The `gr.LogoutButton` component has been removed
|
||||
11. The `gr.make_waveform` method has been removed from the library
|
||||
12. SVGs are not accepted as input images into the `gr.Image` component unless `type=filepath`
|
||||
13. The `height` parameter in `gr.DataFrame` has been renamed to `max_height`
|
||||
14. The `likeable` parameter of `gr.Chatbot` has been removed. The chatbot will display like buttons whenever the `like` event is defined.
|
||||
15. By default user messages are not likeable in the `gr.Chatbot`. To display like buttons in the user message, set the `user_like_button` parameter of the `like` event to True.
|
||||
16. The argument for lazy-caching examples has been changed
|
||||
|
||||
Previously, to lazy-cache examples, you would pass in “lazy” to the `cache_examples` parameter in `Interface`, `Chatinterface` , or `Examples`. Now, there is a separate `cache_mode` parameter, which governs whether caching should be `"lazy"` or `"eager"` . So if your code was previously:
|
||||
|
||||
@ -635,6 +481,7 @@ chatbot = gr.ChatInterface(
|
||||
|
||||
"""
|
||||
|
||||
SYSTEM_PROMPT += "\n\n$INSERT_GUIDES_DOCS_DEMOS"
|
||||
|
||||
SYSTEM_PROMPT += """
|
||||
|
||||
@ -651,7 +498,28 @@ Only respond with one full Gradio app.
|
||||
Add comments explaining the code, but do not include any text that is not formatted as a Python comment.
|
||||
"""
|
||||
|
||||
FALLBACK_PROMPT += """
|
||||
|
||||
The following RULES must be followed. Whenever you are forming a response, after each sentence ensure all rules have been followed otherwise start over, forming a new response and repeat until the finished response follows all the rules. then send the response.
|
||||
|
||||
RULES:
|
||||
Only respond with code, not text.
|
||||
Only respond with valid Python syntax.
|
||||
Never include backticks in your response such as ``` or ```python.
|
||||
Never import any external library aside from: gradio, numpy, pandas, plotly, transformers_js and matplotlib. Do not import any other library like pytesseract or PIL unless requested in the prompt.
|
||||
Do not include any code that is not necessary for the app to run.
|
||||
Respond with a full Gradio app using correct syntax and features of the latest Gradio version. DO NOT write code that doesn't follow the signatures listed.
|
||||
Only respond with one full Gradio app.
|
||||
Add comments explaining the code, but do not include any text that is not formatted as a Python comment.
|
||||
"""
|
||||
|
||||
# print("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n")
|
||||
# print(SYSTEM_PROMPT)
|
||||
# print("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n")
|
||||
|
||||
|
||||
|
||||
def generate(json_path):
|
||||
with open(json_path, "w+") as f:
|
||||
json.dump(docs, f)
|
||||
return SYSTEM_PROMPT
|
||||
return SYSTEM_PROMPT, FALLBACK_PROMPT
|
@ -14,6 +14,20 @@
|
||||
import { onMount } from "svelte";
|
||||
import SYSTEM_PROMPT from "$lib/json/system_prompt.json";
|
||||
import WHEEL from "$lib/json/wheel.json";
|
||||
import logo_melted from "$lib/assets/img/logo-melted.png";
|
||||
|
||||
export let suggested_links: {
|
||||
title: string;
|
||||
url: string;
|
||||
type: string;
|
||||
requirements: string[];
|
||||
}[] = [];
|
||||
|
||||
$: suggested_links;
|
||||
|
||||
export let edited_demos: string[] = [];
|
||||
|
||||
$: edited_demos;
|
||||
|
||||
interface CodeState {
|
||||
status: "idle" | "generating" | "error" | "regenerating";
|
||||
@ -33,9 +47,51 @@
|
||||
|
||||
$: code_state;
|
||||
|
||||
console.log(code_state);
|
||||
let non_lite_demos = [
|
||||
"chatbot_dialogpt",
|
||||
"text_generation",
|
||||
"xgboost-income-prediction-with-explainability",
|
||||
"same-person-or-different",
|
||||
"question-answering",
|
||||
"chicago-bikeshare-dashboard",
|
||||
"image_classifier_2",
|
||||
"llm_hf_transformers",
|
||||
"progress",
|
||||
"image_classifier",
|
||||
"translation",
|
||||
"blocks_speech_text_sentiment",
|
||||
"yolov10_webcam_stream",
|
||||
"stream_asr",
|
||||
"rt-detr-object-detection",
|
||||
"depth_estimation",
|
||||
"unispeech-speaker-verification",
|
||||
"stable-diffusion",
|
||||
"text_analysis",
|
||||
"asr",
|
||||
"streaming_wav2vec",
|
||||
"magic_8_ball",
|
||||
"animeganv2",
|
||||
"generate_english_german",
|
||||
"musical_instrument_identification",
|
||||
"ner_pipeline",
|
||||
"map_airbnb",
|
||||
"english_translator",
|
||||
"unified_demo_text_generation",
|
||||
"timeseries-forecasting-with-prophet",
|
||||
"image_classification",
|
||||
"diffusers_with_batching"
|
||||
];
|
||||
|
||||
const workerUrl = "https://playground-worker.pages.dev/api/generate";
|
||||
let hide_preview = false;
|
||||
|
||||
$: hide_preview;
|
||||
|
||||
let system_prompt = SYSTEM_PROMPT.SYSTEM;
|
||||
let fallback_prompt = SYSTEM_PROMPT.FALLBACK;
|
||||
|
||||
const workerUrl =
|
||||
"https://semantic-search.playground-worker.pages.dev/api/generate";
|
||||
// const workerUrl = "https://playground-worker.pages.dev/api/generate";
|
||||
// const workerUrl = "http://localhost:5173/api/generate";
|
||||
|
||||
let abortController: AbortController | null = null;
|
||||
@ -43,6 +99,7 @@
|
||||
async function* streamFromWorker(
|
||||
query: string,
|
||||
system_prompt: string,
|
||||
fallback_prompt: string,
|
||||
signal: AbortSignal
|
||||
) {
|
||||
const response = await fetch(workerUrl, {
|
||||
@ -52,7 +109,8 @@
|
||||
},
|
||||
body: JSON.stringify({
|
||||
query: query,
|
||||
SYSTEM_PROMPT: system_prompt
|
||||
SYSTEM_PROMPT: system_prompt,
|
||||
FALLBACK_PROMPT: fallback_prompt
|
||||
}),
|
||||
signal
|
||||
});
|
||||
@ -105,6 +163,10 @@
|
||||
yield { requirements: parsed.requirements };
|
||||
} else if (parsed.choices && parsed.choices.length > 0) {
|
||||
yield parsed;
|
||||
} else if (parsed.suggested_links) {
|
||||
if (suggested_links.length == 0) {
|
||||
suggested_links = parsed.suggested_links;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("Error parsing JSON:", e);
|
||||
@ -125,6 +187,7 @@
|
||||
code_state.status = "regenerating";
|
||||
} else {
|
||||
code_state.status = "generating";
|
||||
suggested_links = [];
|
||||
}
|
||||
let out = "";
|
||||
|
||||
@ -146,7 +209,8 @@
|
||||
|
||||
for await (const chunk of streamFromWorker(
|
||||
query,
|
||||
SYSTEM_PROMPT.SYSTEM,
|
||||
system_prompt,
|
||||
fallback_prompt,
|
||||
abortController.signal
|
||||
)) {
|
||||
if (chunk.requirements) {
|
||||
@ -199,6 +263,7 @@
|
||||
|
||||
function handle_user_query_key_down(e: KeyboardEvent): void {
|
||||
if (e.key === "Enter") {
|
||||
e.preventDefault();
|
||||
run_as_update = false;
|
||||
suspend_and_resume_auto_run(() => {
|
||||
generate_code(user_query, selected_demo.name);
|
||||
@ -363,6 +428,11 @@
|
||||
|
||||
$: selected_demo =
|
||||
demos.find((demo) => demo.name === current_selection) ?? demos[0];
|
||||
$: if (non_lite_demos.includes(selected_demo.dir)) {
|
||||
hide_preview = true;
|
||||
} else {
|
||||
hide_preview = false;
|
||||
}
|
||||
$: code = selected_demo?.code || "";
|
||||
$: requirements = selected_demo?.requirements || [];
|
||||
$: requirementsStr = requirements.join("\n"); // Use the stringified version to trigger reactivity only when the array values actually change, while the `requirements` object's identity always changes.
|
||||
@ -454,6 +524,15 @@
|
||||
|
||||
const demos_copy: typeof demos = JSON.parse(JSON.stringify(demos));
|
||||
|
||||
$: edited_demos = demos_copy
|
||||
.filter((demo) => {
|
||||
const edited = demos.find(
|
||||
(d) => d.name === demo.name && d.code !== demo.code
|
||||
);
|
||||
return edited !== undefined;
|
||||
})
|
||||
.map((demo) => demo.name);
|
||||
|
||||
$: show_dialog(demos, demos_copy, shared);
|
||||
$: if (code) {
|
||||
shared = false;
|
||||
@ -588,9 +667,12 @@
|
||||
}
|
||||
if (
|
||||
app_error &&
|
||||
app_error.includes(
|
||||
(app_error.includes(
|
||||
"UserWarning: only soft file lock is available from filelock import BaseFileLock, FileLock, SoftFileLock, Timeout"
|
||||
)
|
||||
) ||
|
||||
app_error.includes(
|
||||
"Matplotlib is building the font cache; this may take a moment."
|
||||
))
|
||||
) {
|
||||
app_error = null;
|
||||
}
|
||||
@ -622,12 +704,14 @@
|
||||
|
||||
$: regenerate_on_error(app_error);
|
||||
|
||||
$: if (app_error && !user_query) {
|
||||
$: if (app_error && !user_query && !hide_preview) {
|
||||
user_query = app_error;
|
||||
}
|
||||
|
||||
let code_to_compare = code;
|
||||
$: code_to_compare;
|
||||
|
||||
$: current_selection && (user_query = "");
|
||||
</script>
|
||||
|
||||
<svelte:head>
|
||||
@ -697,6 +781,13 @@
|
||||
dark_mode={false}
|
||||
on:change={(e) => {
|
||||
code_state.code_edited = true;
|
||||
if (user_query == app_error) {
|
||||
app_error = null;
|
||||
user_query = "";
|
||||
}
|
||||
if (code_state.status == "error") {
|
||||
code_state.status = "idle";
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
@ -753,6 +844,9 @@
|
||||
style="color-scheme: light"
|
||||
>
|
||||
<ErrorModal
|
||||
on:close={() => {
|
||||
code_state.generation_error = "";
|
||||
}}
|
||||
messages={[
|
||||
{
|
||||
type: "error",
|
||||
@ -811,7 +905,7 @@
|
||||
{:else}
|
||||
✨
|
||||
{/if}
|
||||
<input
|
||||
<textarea
|
||||
bind:value={user_query}
|
||||
on:keydown={(e) => {
|
||||
handle_user_query_key_down(e);
|
||||
@ -832,8 +926,9 @@
|
||||
autocapitalize="off"
|
||||
enterkeyhint="go"
|
||||
spellcheck="false"
|
||||
type="search"
|
||||
id="user-query"
|
||||
class="w-full resize-none content-center px-2 border rounded overflow-x-none !text-[14px]"
|
||||
rows="1"
|
||||
class:grayed={code_state.status === "generating"}
|
||||
autofocus={true}
|
||||
/>
|
||||
@ -886,7 +981,11 @@
|
||||
class="flex justify-between align-middle h-8 border-b pl-4 pr-2 ml-0 sm:ml-2"
|
||||
>
|
||||
<div class="flex align-middle">
|
||||
<h3 class="pr-2 pt-1">Preview</h3>
|
||||
<h3
|
||||
class="pr-2 py-1 text-sm font-normal content-center text-[#27272a]"
|
||||
>
|
||||
Preview
|
||||
</h3>
|
||||
<p class="pt-1.5 text-sm text-gray-600 hidden sm:block">
|
||||
{preview_width - 13}px
|
||||
</p>
|
||||
@ -936,7 +1035,38 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex-1 pl-3" id="lite-demo" bind:this={lite_element} />
|
||||
{#if hide_preview}
|
||||
<div class="flex-1 bg-gray-100 flex flex-col justify-center">
|
||||
<img class="mx-auto my-5 w-48 logo grayscale" src={logo_melted} />
|
||||
<div
|
||||
class="mx-auto my-5 text-center max-h-fit leading-7 font-normal text-[14px] text-gray-500"
|
||||
>
|
||||
<p>
|
||||
This demo requires packages that we do not support in the
|
||||
Playground.
|
||||
</p>
|
||||
<p>
|
||||
Use it on Spaces: <a
|
||||
href={`https://huggingface.co/spaces/gradio/${selected_demo.dir}`}
|
||||
target="_blank"
|
||||
class="thin-link text-gray-600 font-mono font-medium text-[14px]"
|
||||
>
|
||||
<img
|
||||
class="inline-block my-0 -mr-1 w-5 max-w-full pb-[2px]"
|
||||
src="data:image/svg+xml,%3csvg%20class='mr-1%20text-gray-400'%20xmlns='http://www.w3.org/2000/svg'%20aria-hidden='true'%20viewBox='0%200%2032%2032'%3e%3cpath%20d='M7.81%2018.746v5.445h5.444v-5.445H7.809Z'%20fill='%23FF3270'/%3e%3cpath%20d='M18.746%2018.746v5.445h5.444v-5.445h-5.444Z'%20fill='%23861FFF'/%3e%3cpath%20d='M7.81%207.81v5.444h5.444V7.81H7.809Z'%20fill='%23097EFF'/%3e%3cpath%20fill-rule='evenodd'%20clip-rule='evenodd'%20d='M4%206.418A2.418%202.418%200%200%201%206.418%204h8.228c1.117%200%202.057.757%202.334%201.786a6.532%206.532%200%200%201%209.234%209.234A2.419%202.419%200%200%201%2028%2017.355v8.227A2.418%202.418%200%200%201%2025.582%2028H6.417A2.418%202.418%200%200%201%204%2025.582V6.417ZM7.81%207.81v5.444h5.444V7.81H7.81Zm0%2016.38v-5.444h5.444v5.445H7.81Zm10.936%200v-5.444h5.445v5.445h-5.445Zm0-13.658a2.722%202.722%200%201%201%205.445%200%202.722%202.722%200%200%201-5.445%200Z'/%3e%3cpath%20d='M21.468%207.81a2.722%202.722%200%201%200%200%205.444%202.722%202.722%200%200%200%200-5.444Z'%20fill='%23FFD702'/%3e%3c/svg%3e"
|
||||
/>
|
||||
gradio/{selected_demo.dir}
|
||||
</a>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
{/if}
|
||||
<div
|
||||
class:hidden={hide_preview}
|
||||
class="flex-1 pl-3"
|
||||
id="lite-demo"
|
||||
bind:this={lite_element}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</Slider>
|
||||
@ -1008,7 +1138,7 @@
|
||||
border-color: #e5e7eb;
|
||||
}
|
||||
|
||||
.search-bar input {
|
||||
.search-bar textarea {
|
||||
@apply appearance-none h-14 text-black mx-1 flex-auto min-w-0 border-none cursor-text;
|
||||
outline: none;
|
||||
box-shadow: none;
|
||||
@ -1192,4 +1322,16 @@
|
||||
position: static !important;
|
||||
width: 100% !important;
|
||||
}
|
||||
|
||||
:global(.ͼ60) {
|
||||
font-size: 13px;
|
||||
}
|
||||
|
||||
:global(.tabs.editor-tabs) {
|
||||
gap: 0px !important;
|
||||
}
|
||||
|
||||
:global(.tabitem.editor-tabitem) {
|
||||
margin-top: -4px !important;
|
||||
}
|
||||
</style>
|
||||
|
@ -10,6 +10,7 @@
|
||||
import { BaseCode as Code } from "@gradio/code";
|
||||
import version_json from "$lib/json/version.json";
|
||||
import WHEEL from "$lib/json/wheel.json";
|
||||
import { fade, fly, slide, blur } from "svelte/transition";
|
||||
|
||||
export let data: {
|
||||
demos_by_category: {
|
||||
@ -54,6 +55,35 @@
|
||||
};
|
||||
|
||||
let version = version_json.version;
|
||||
|
||||
let suggested_links = [];
|
||||
let edited_demos = [];
|
||||
|
||||
let suggested_demos = suggested_links.filter((item) => item.type === "DEMO");
|
||||
let suggested_guides_docs = suggested_links.filter(
|
||||
(item) => item.type !== "DEMO"
|
||||
);
|
||||
|
||||
$: if (suggested_links) {
|
||||
suggested_links.forEach((link) => {
|
||||
if (link.type == "DEMO") {
|
||||
console.log(all_demos);
|
||||
all_demos.push({
|
||||
name: link.title,
|
||||
dir: link.title.replaceAll(" ", "_").toLowerCase(),
|
||||
code: link.url,
|
||||
requirements: link.requirements.split("\n")
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
$: all_demos;
|
||||
$: suggested_links;
|
||||
$: suggested_demos = suggested_links.filter((item) => item.type === "DEMO");
|
||||
$: suggested_guides_docs = suggested_links.filter(
|
||||
(item) => item.type !== "DEMO"
|
||||
);
|
||||
$: edited_demos;
|
||||
</script>
|
||||
|
||||
<MetaTags
|
||||
@ -107,14 +137,16 @@
|
||||
>
|
||||
<div class="w-full border border-gray-200 shadow-xl h-full relative">
|
||||
<div
|
||||
class="w-[200px] rounded-tr-none rounded-bl-xl overflow-y-scroll mb-0 p-0 pb-4 text-md block rounded-t-xl bg-gradient-to-r from-white to-gray-50 overflow-x-clip"
|
||||
class="w-[200px] rounded-tr-none rounded-bl-xl mb-0 p-0 pb-4 text-md block rounded-t-xl bg-gradient-to-r from-white to-gray-50 overflow-x-clip overflow-y-auto"
|
||||
style="word-break: normal; overflow-wrap: break-word; white-space:nowrap; height: 100%; width: {show_nav
|
||||
? 200
|
||||
: 37}px;"
|
||||
>
|
||||
<div class="flex justify-between align-middle h-8 border-b px-2">
|
||||
{#if show_nav}
|
||||
<h3 class="pl-2 pt-1">Demos</h3>
|
||||
<h3 class="pl-2 py-1 my-auto text-sm font-medium text-[#27272a]">
|
||||
Demos
|
||||
</h3>
|
||||
{/if}
|
||||
<button
|
||||
on:click={() => (show_nav = !show_nav)}
|
||||
@ -123,27 +155,101 @@
|
||||
>
|
||||
</div>
|
||||
{#if show_nav}
|
||||
<button
|
||||
on:click={() => (current_selection = "Blank")}
|
||||
class:current-playground-demo={current_selection == "Blank"}
|
||||
class:shared-link={shared == "Blank"}
|
||||
class="thin-link font-light px-4 block my-2">New Demo</button
|
||||
>
|
||||
{#each suggested_guides_docs as link}
|
||||
<a
|
||||
class:bg-orange-100={link.type == "GUIDE"}
|
||||
class:border-orange-100={link.type == "GUIDE"}
|
||||
class:bg-green-100={link.type == "DOCS"}
|
||||
class:border-green-100={link.type == "DOCS"}
|
||||
class="sug-block my-2"
|
||||
href={link.url}
|
||||
target="_blank"
|
||||
in:slide
|
||||
out:slide
|
||||
>
|
||||
<div class="flex items-center flex-row">
|
||||
<p
|
||||
class:text-orange-700={link.type == "GUIDE"}
|
||||
class:text-green-700={link.type == "DOCS"}
|
||||
class="text-xs font-semibold flex-grow"
|
||||
>
|
||||
{link.type}
|
||||
</p>
|
||||
<p class="float-right text-xs font-semibold mx-1">✨</p>
|
||||
</div>
|
||||
<p
|
||||
class="font-light break-words w-full text-sm"
|
||||
style="white-space: initial"
|
||||
>
|
||||
{link.title}
|
||||
</p>
|
||||
</a>
|
||||
{/each}
|
||||
{#if suggested_demos.length > 0}
|
||||
<div in:slide out:slide>
|
||||
<div class="my-1 mx-2 pb-2">
|
||||
<div class="flex items-center flex-row px-2">
|
||||
<p class="my-2 font-medium text-sm text-[#27272a] flex-grow">
|
||||
Related Demos
|
||||
</p>
|
||||
<p class="float-right text-xs font-semibold mx-1">✨</p>
|
||||
</div>
|
||||
{#each suggested_demos as link}
|
||||
<button
|
||||
on:click={() => (current_selection = link.title)}
|
||||
class:current-playground-demo={current_selection ==
|
||||
link.title}
|
||||
class:shared-link={shared == link.title}
|
||||
class="thin-link font-light !px-2 block text-sm text-[#27272a] break-words w-full text-left capitalize"
|
||||
style="white-space: initial"
|
||||
>{link.title.replaceAll("-", " ")}</button
|
||||
>
|
||||
{/each}
|
||||
</div>
|
||||
<div class="border-b border-gray-400 ml-4 mr-5"></div>
|
||||
</div>
|
||||
{/if}
|
||||
<div>
|
||||
{#if edited_demos.includes("Blank")}
|
||||
<div class="dot float-left !mt-[7px]"></div>
|
||||
{/if}
|
||||
<button
|
||||
on:click={() => (current_selection = "Blank")}
|
||||
class:!pl-1={edited_demos.includes("Blank")}
|
||||
class:current-playground-demo={current_selection == "Blank"}
|
||||
class:shared-link={shared == "Blank"}
|
||||
class="thin-link font-light px-4 block my-2 text-sm text-[#27272a]"
|
||||
>New Demo</button
|
||||
>
|
||||
</div>
|
||||
{#each data.demos_by_category as { category, demos } (category)}
|
||||
<p class="px-4 my-2 font-medium">{category}</p>
|
||||
<p class="px-4 my-2 font-medium text-sm text-[#27272a]">
|
||||
{category}
|
||||
</p>
|
||||
{#each demos as demo, i}
|
||||
{#if edited_demos.includes(demo.name)}
|
||||
<div class="dot float-left"></div>
|
||||
{/if}
|
||||
<button
|
||||
on:click={() => (current_selection = demo.name)}
|
||||
class:!pl-1={edited_demos.includes(demo.name)}
|
||||
class:current-playground-demo={current_selection == demo.name}
|
||||
class:shared-link={shared == demo.name}
|
||||
class="thin-link font-light px-4 block">{demo.name}</button
|
||||
class="thin-link font-light px-4 block text-sm text-[#27272a]"
|
||||
>{demo.name}</button
|
||||
>
|
||||
{/each}
|
||||
{/each}
|
||||
{/if}
|
||||
</div>
|
||||
|
||||
<DemosLite demos={all_demos} {current_selection} {show_nav} />
|
||||
<DemosLite
|
||||
demos={all_demos}
|
||||
{current_selection}
|
||||
{show_nav}
|
||||
bind:suggested_links
|
||||
bind:edited_demos
|
||||
/>
|
||||
</div>
|
||||
</main>
|
||||
{:else}
|
||||
@ -375,4 +481,10 @@
|
||||
height: 100%;
|
||||
overflow: hidden !important;
|
||||
}
|
||||
.sug-block {
|
||||
@apply block m-2 p-2 border rounded-md hover:scale-[1.02] drop-shadow-md;
|
||||
}
|
||||
.dot {
|
||||
@apply w-[0.4rem] h-[0.4rem] bg-gray-500 rounded-full mt-[6.5px] ml-[6px];
|
||||
}
|
||||
</style>
|
||||
|
@ -37,10 +37,23 @@ export async function GET() {
|
||||
path = path.match(/(?:\d{2}_)?(.+)/i)[1];
|
||||
path = "/main/docs/gradio/" + path.split(".svx")[0];
|
||||
|
||||
// content = content.replace(/<div class="codeblock"*>([^]*?)<\/div>/g, '')
|
||||
content = content.replace(/<gradio-lite*?>([^]*?)<\/gradio-lite>/g, "");
|
||||
content = content.replace(
|
||||
/<pre[^>]*><code[^>]*>([^]*?)<\/code><\/pre>/g,
|
||||
"```\n$1\n```"
|
||||
);
|
||||
content = content.replace(
|
||||
/<span[^>]*>|<\/span>|<\/?[^>]*(token)[^>]*>/g,
|
||||
""
|
||||
);
|
||||
content = content.replace(/<[^>]*>?/gm, "");
|
||||
content = content.replace(/Open in 🎢.*?\n\t\t/g, "");
|
||||
|
||||
return {
|
||||
title: title,
|
||||
slug: path,
|
||||
content: content.replaceAll(/<[^>]*>?/gm, ""),
|
||||
content: content,
|
||||
type: "DOCS"
|
||||
};
|
||||
})
|
||||
@ -62,10 +75,24 @@ export async function GET() {
|
||||
path = path.match(/(?:\d{2}_)?(.+)/i)[1];
|
||||
path = "/main/docs/python-client/" + path.split(".svx")[0];
|
||||
|
||||
content = content.replace(
|
||||
/<pre[^>]*?language-(\w+)[^>]*?><code[^>]*?>([^]*?)<\/code><\/pre>/g,
|
||||
"```$1\n$2\n```"
|
||||
);
|
||||
content = content.replace(
|
||||
/<span[^>]*>|<\/span>|<\/?[^>]*(token)[^>]*>/g,
|
||||
""
|
||||
);
|
||||
content = content.replace(
|
||||
/<gradio-lite[^>]*>([^]*?)<\/gradio-lite>/g,
|
||||
"```python\n$1\n```"
|
||||
);
|
||||
content = content.replace(/<[^>]*>?/gm, "");
|
||||
|
||||
return {
|
||||
title: title,
|
||||
slug: path,
|
||||
content: content.replaceAll(/<[^>]*>?/gm, ""),
|
||||
content: content,
|
||||
type: "DOCS"
|
||||
};
|
||||
})
|
||||
|
@ -118,7 +118,6 @@
|
||||
}
|
||||
|
||||
main {
|
||||
margin: 0 auto;
|
||||
display: flex;
|
||||
flex-grow: 1;
|
||||
flex-direction: column;
|
||||
|
Loading…
x
Reference in New Issue
Block a user