Referencing guides in docstrings and other fixes (#1905)

* tie guides to docs in docstrings

* reference guides in more sections

* fix missing guides in interface and blocks

* fix broken html div

* remove pinned metadata for ordering

* fix annoying comma space in guide tags

* retrigger codecov check

* fix guides bug in flagging

* add controlling layout guide to row, column, and tab
This commit is contained in:
Ali Abdalla 2022-08-04 17:55:09 -07:00 committed by GitHub
parent 361837753c
commit f93f4c9cfe
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 40 additions and 43 deletions

View File

@ -293,6 +293,7 @@ class Blocks(BlockContext):
demo.launch()
Demos: blocks_hello, blocks_flipper, blocks_speech_text_sentiment, generate_english_german
Guides: blocks_and_event_listeners, controlling_layout, state_in_blocks, custom_CSS_and_JS, custom_interpretations_with_blocks, using_blocks_like_functions
"""
def __init__(

View File

@ -286,6 +286,7 @@ class Textbox(Changeable, Submittable, IOComponent):
Examples-format: a {str} representing the textbox input.
Demos: hello_world, diff_texts, sentence_builder
Guides: creating_a_chatbot, real_time_speech_recognition
"""
def __init__(
@ -665,6 +666,7 @@ class Slider(Changeable, IOComponent):
Examples-format: A {float} or {int} representing the slider's value.
Demos: sentence_builder, generate_tone, titanic_survival
Guides: create_your_own_friends_with_a_gan
"""
def __init__(
@ -1351,6 +1353,7 @@ class Image(Editable, Clearable, Changeable, Streamable, IOComponent):
Postprocessing: expects a {numpy.array}, {PIL.Image} or {str} filepath to an image and displays the image.
Examples-format: a {str} filepath to a local file that contains the image.
Demos: image_mod, image_mod_default_image
Guides: Gradio_and_ONNX_on_Hugging_Face, image_classification_in_pytorch, image_classification_in_tensorflow, image_classification_with_vision_transformers, building_a_pictionary_app, create_your_own_friends_with_a_gan
"""
def __init__(
@ -1912,6 +1915,7 @@ class Audio(Changeable, Clearable, Playable, Streamable, IOComponent):
Postprocessing: expects a {Tuple(int, numpy.array)} corresponding to (sample rate, data) or as a {str} filepath to an audio file, which gets displayed
Examples-format: a {str} filepath to a local file that contains audio.
Demos: main_note, generate_tone, reverse_audio
Guides: real_time_speech_recognition
"""
def __init__(
@ -2854,6 +2858,7 @@ class Variable(IOComponent):
Preprocessing: No preprocessing is performed
Postprocessing: No postprocessing is performed
Demos: chatbot_demo, blocks_simple_squares
Guides: creating_a_chatbot, real_time_speech_recognition
"""
allow_string_shortcut = False
@ -3075,6 +3080,7 @@ class Label(Changeable, IOComponent):
Postprocessing: expects a {Dict[str, float]} of classes and confidences, or {str} with just the class or an {int}/{float} for regression outputs.
Demos: main_note, titanic_survival
Guides: Gradio_and_ONNX_on_Hugging_Face, image_classification_in_pytorch, image_classification_in_tensorflow, image_classification_with_vision_transformers, building_a_pictionary_app
"""
CONFIDENCES_KEY = "confidences"
@ -3218,6 +3224,7 @@ class HighlightedText(Changeable, IOComponent):
Postprocessing: expects a {List[Tuple[str, float | str]]]} consisting of spans of text and their associated labels, or a {Dict} with two keys: (1) "text" whose value is the complete text, and "entities", which is a list of dictionaries, each of which have the keys: "entity" (consisting of the entity label), "start" (the character index where the label starts), and "end" (the character index where the label ends).
Demos: diff_texts, text_analysis
Guides: named_entity_recognition
"""
def __init__(
@ -3458,6 +3465,7 @@ class HTML(Changeable, IOComponent):
Postprocessing: expects a valid HTML {str}.
Demos: text_analysis
Guides: key_features
"""
def __init__(
@ -3869,6 +3877,7 @@ class Model3D(Changeable, Editable, Clearable, IOComponent):
Postprocessing: expects function to return a {str} path to a file of type (.obj, glb, or .gltf)
Demos: model3D
Guides: how_to_use_3D_model_component
"""
def __init__(
@ -4102,6 +4111,7 @@ class Markdown(IOComponent, Changeable):
Postprocessing: expects a valid {str} that can be rendered as Markdown.
Demos: blocks_hello, blocks_kinematics
Guides: key_features
"""
def __init__(
@ -4249,6 +4259,8 @@ class Interpretation(Component):
Used to create an interpretation widget for a component.
Preprocessing: this component does *not* accept input.
Postprocessing: expects a {dict} with keys "original" and "interpretation".
Guides: custom_interpretations_with_blocks
"""
def __init__(

View File

@ -31,6 +31,7 @@ class Examples:
components. Optionally handles example caching for fast inference.
Demos: blocks_inputs, fake_gan
Guides: more_on_examples_and_flagging, using_hugging_face_integrations, image_classification_in_pytorch, image_classification_in_tensorflow, image_classification_with_vision_transformers, create_your_own_friends_with_a_gan
"""
def __init__(

View File

@ -120,6 +120,7 @@ class CSVLogger(FlaggingCallback):
return {'cat': 0.3, 'dog': 0.7}
demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label",
flagging_callback=CSVLogger())
Guides: using_flagging
"""
def __init__(self):
@ -239,6 +240,7 @@ class HuggingFaceDatasetSaver(FlaggingCallback):
return {'cat': 0.3, 'dog': 0.7}
demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label",
allow_flagging="manual", flagging_callback=hf_writer)
Guides: using_flagging
"""
def __init__(

View File

@ -60,6 +60,7 @@ class Interface(Blocks):
demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label")
demo.launch()
Demos: hello_world, hello_world_3, gpt_j
Guides: quickstart, key_features, sharing_your_app, interface_state, reactive_interfaces, advanced_interface_features
"""
# stores references to all currently existing Interface instances

View File

@ -37,6 +37,7 @@ class Row(BlockContext):
gr.Image("lion.jpg")
gr.Image("tiger.jpg")
demo.launch()
Guides: controlling_layout
"""
def get_config(self):
@ -77,6 +78,7 @@ class Column(BlockContext):
with gradio.Column():
btn1 = gr.Button("Button 1")
btn2 = gr.Button("Button 2")
Guides: controlling_layout
"""
def __init__(
@ -126,6 +128,7 @@ class Tabs(BlockContext):
with gradio.TabItem("Tiger"):
gr.Image("tiger.jpg")
gr.Button("New Tiger")
Guides: controlling_layout
"""
def __init__(self, selected: Optional[int | str] = None, **kwargs):

View File

@ -19,6 +19,7 @@ class Parallel(gradio.Interface):
The Interfaces to put in Parallel must share the same input components (but can have different output components).
Demos: interface_parallel, interface_parallel_load
Guides: advanced_interface_features
"""
def __init__(self, *interfaces: gradio.Interface, **options):
@ -61,6 +62,7 @@ class Series(gradio.Interface):
and so the input and output components must agree between the interfaces).
Demos: interface_series, interface_series_load
Guides: advanced_interface_features
"""
def __init__(self, *interfaces: gradio.Interface, **options):

View File

@ -78,5 +78,3 @@ gr.Series(generator, translator).launch() # this demo generates text, then tran
```
And of course, you can also mix `Parallel` and `Series` together whenever that makes sense!
Docs: parallel, series

View File

@ -2,7 +2,6 @@
Related spaces: https://huggingface.co/spaces/farukozderim/Model-Comparator-Space-Builder, https://huggingface.co/spaces/osanseviero/helsinki_translation_en_es, https://huggingface.co/spaces/osanseviero/remove-bg-webcam, https://huggingface.co/spaces/mrm8488/GPT-J-6B, https://huggingface.co/spaces/akhaliq/T0pp, https://huggingface.co/spaces/osanseviero/mix_match_gradio
Tags: HUB, SPACES, EMBED
Docs: examples
Contributed by <a href="https://huggingface.co/osanseviero">Omar Sanseviero</a> 🦙 and <a href="https://huggingface.co/farukozderim">Ömer Faruk Özdemir</a>

View File

@ -3,8 +3,6 @@
Related spaces: https://huggingface.co/spaces/onnx/EfficientNet-Lite4
Tags: ONNX, SPACES
Contributed by Gradio and the <a href="https://onnx.ai/">ONNX</a> team
Docs: image, label
## Introduction

View File

@ -2,7 +2,6 @@
Related spaces: https://huggingface.co/spaces/abidlabs/pytorch-image-classifier, https://huggingface.co/spaces/pytorch/ResNet, https://huggingface.co/spaces/pytorch/ResNext, https://huggingface.co/spaces/pytorch/SqueezeNet
Tags: VISION, RESNET, PYTORCH
Docs: image, label, example
## Introduction

View File

@ -2,7 +2,6 @@
Related spaces: https://huggingface.co/spaces/abidlabs/keras-image-classifier
Tags: VISION, MOBILENET, TENSORFLOW
Docs: image, label, examples
## Introduction

View File

@ -2,7 +2,6 @@
Related spaces: https://huggingface.co/spaces/abidlabs/vision-transformer
Tags: VISION, TRANSFORMERS, HUB
Docs: image, label, examples
## Introduction

View File

@ -2,7 +2,6 @@
Related spaces: https://huggingface.co/spaces/nateraw/quickdraw
Tags: SKETCHPAD, LABELS, LIVE
Docs: image, label
## Introduction

View File

@ -2,7 +2,6 @@
Related spaces: https://huggingface.co/spaces/NimaBoscarino/cryptopunks, https://huggingface.co/spaces/nateraw/cryptopunks-generator
Tags: GAN, IMAGE, HUB
Docs: slider, image, examples
Contributed by <a href="https://huggingface.co/NimaBoscarino">Nima Boscarino</a> and <a href="https://huggingface.co/nateraw">Nate Raw</a>

View File

@ -2,7 +2,6 @@
Related spaces: https://huggingface.co/spaces/abidlabs/chatbot-minimal, https://huggingface.co/spaces/ThomasSimonini/Chat-with-Gandalf-GPT-J6B, https://huggingface.co/spaces/gorkemgoknar/moviechatbot, https://huggingface.co/spaces/Kirili4ik/chat-with-Kirill
Tags: NLP, TEXT, HTML
Docs: textbox, variable
## Introduction

View File

@ -1,7 +1,5 @@
# How to Create a New Component
Docs: component
## Introduction
The purpose of this guide is to illustrate how to add a new component, which you can use in your Gradio applications. The guide will be complemented by code snippets showing step by step how the [ColorPicker](https://gradio.app/docs/#colorpicker) component was added.

View File

@ -1,7 +1,6 @@
<script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/3.1.0/gradio.js"></script>
# Custom Machine Learning Interpretations with Blocks
Docs: blocks, interpretation
Tags: INTERPRETATION, SENTIMENT ANALYSIS
**Prerequisite**: This Guide requires you to know about Blocks and the interpretation feature of Interfaces.

View File

@ -2,7 +2,6 @@
Related spaces: https://huggingface.co/spaces/dawood/Model3D, https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization, https://huggingface.co/spaces/radames/dpt-depth-estimation-3d-obj
Tags: VISION, IMAGE
Docs: model3d
## Introduction

View File

@ -2,7 +2,6 @@
Related spaces: https://huggingface.co/spaces/rajistics/biobert_ner_demo, https://huggingface.co/spaces/abidlabs/ner, https://huggingface.co/spaces/rajistics/Financial_Analyst_AI
Tags: NER, TEXT, HIGHLIGHT
Docs: highlightedtext
## Introduction

View File

@ -2,7 +2,6 @@
Related spaces: https://huggingface.co/spaces/abidlabs/streaming-asr-paused, https://huggingface.co/spaces/abidlabs/full-context-asr
Tags: ASR, SPEECH, STREAMING
Docs: audio, variable, textbox
## Introduction

View File

@ -2,8 +2,6 @@
Tags: TRANSLATION, HUB, SPACES
Docs: Blocks
**Prerequisite**: This Guide builds on the Blocks Introduction. Make sure to [read that guide first](/introduction_to_blocks).
## Introduction

View File

@ -63,8 +63,15 @@ add_supported_events()
def add_guides():
for mode in docs:
for obj in docs[mode]:
obj["guides"] = [guide for guide in guides if obj["name"].lower() in guide["docs"]]
for cls in docs[mode]:
if "guides" not in cls["tags"]:
continue
cls["guides"] = []
docstring_guides = [guide.strip() for guide in cls["tags"]["guides"].split(",")]
for docstring_guide in docstring_guides:
for guide in guides:
if docstring_guide == guide["name"]:
cls["guides"].append(guide)
add_guides()

View File

@ -35,7 +35,7 @@
<section id="building_demos" class="pt-2 flex flex-col gap-10 mb-8">
<section class="pt-2">
<h2 class="text-4xl font-light mb-2 pt-2 text-orange-500" id="building-demos">Building Demos</h2>
{% with obj=find_cls("Interface"), parent="gradio" %}
{% with obj=find_cls("Interface"), is_class=True, parent="gradio" %}
{% include "docs/obj_doc_template.html" %}
{% endwith %}
</section>
@ -59,15 +59,6 @@
{% endwith %}
{% endfor %}
</div>
<h4 class="my-2 font-semibold">Flagging Guides</h4>
<div class="guides-list grid grid-cols-1 lg:grid-cols-4 gap-4">
<a class="guide-box flex lg:col-span-1 flex-col group overflow-hidden relative rounded-xl shadow-sm hover:shadow-alternate transition-shadow my-4 bg-gradient-to-r" target="_blank" href="/using_flagging/">
<div class="flex flex-col p-4 h-min">
<p class="font-semibold group-hover:underline text-l">Flagging</p>
</div>
</a>
</div>
</section>
<section id="combining_interfaces" class="pt-2">
@ -93,7 +84,7 @@
</section>
<section id="blocks" class="pt-2 mb-8">
<div class="flex flex-col gap-10">
{% with obj=find_cls("Blocks"), parent="gradio" %}
{% with obj=find_cls("Blocks"), is_class=True, parent="gradio" %}
{% include "docs/obj_doc_template.html" %}
{% endwith %}
</div>
@ -101,7 +92,7 @@
<p class="mb-12">Customize the layout of your Blocks UI with the layout classes below.</p>
<div class="flex flex-col gap-10">
{% for layout in docs["layout"] %}
{% with obj=layout, parent="gradio" %}
{% with obj=layout, is_class=True, parent="gradio" %}
{% include "docs/obj_doc_template.html" %}
{% endwith %}
{% endfor %}

View File

@ -69,8 +69,6 @@ for guide_folder in guide_folders:
tags = get_labeled_metadata("Tags:")
spaces = get_labeled_metadata("Related spaces:")
contributor = get_labeled_metadata("Contributed by", is_list=False)
docs = get_labeled_metadata("Docs:")
pinned = get_labeled_metadata("Pinned:", is_list=False)
url = f"/{guide_name}/"
@ -109,8 +107,6 @@ for guide_folder in guide_folders:
"spaces": spaces,
"url": url,
"contributor": contributor,
"docs": docs,
"pinned": pinned,
}
guides.append(guide_data)
guides_by_category[-1]["guides"].append(guide_data)

View File

@ -34,12 +34,12 @@
<h2 class="group-hover:underline text-lg">{{ guide.pretty_name }}</h2>
<div class="tags-holder">
{% if guide.tags is not none %}
<p class="text-gray-600">
{% for tag in guide.tags %}
{{ tag }}
{% if not loop.last %},{% endif %}
{% endfor %}
</p>
<p class="text-gray-600"><!--
-->{% for tag in guide.tags %}<!--
-->{{ tag }}<!--
-->{% if not loop.last %}, {% endif %}<!--
-->{% endfor %}<!--
--></p>
{% endif %}
</div>
</div>