diff --git a/.github/workflows/ui.yml b/.github/workflows/ui.yml index 543463966e..c29a4a2569 100644 --- a/.github/workflows/ui.yml +++ b/.github/workflows/ui.yml @@ -39,7 +39,6 @@ jobs: run: pnpm format:check - name: typecheck run: pnpm ts:check - continue-on-error: true - name: unit tests run: pnpm test:run functional-test: diff --git a/demo/blocks_flag/requirements.txt b/demo/blocks_flag/requirements.txt new file mode 100644 index 0000000000..296d654528 --- /dev/null +++ b/demo/blocks_flag/requirements.txt @@ -0,0 +1 @@ +numpy \ No newline at end of file diff --git a/demo/blocks_flag/run.py b/demo/blocks_flag/run.py new file mode 100644 index 0000000000..2083d39edf --- /dev/null +++ b/demo/blocks_flag/run.py @@ -0,0 +1,33 @@ +import numpy as np +import gradio as gr + +def sepia(input_img, strength): + sepia_filter = strength * np.array( + [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]] + ) + (1-strength) * np.identity(3) + sepia_img = input_img.dot(sepia_filter.T) + sepia_img /= sepia_img.max() + return sepia_img + +callback = gr.CSVLogger() + +with gr.Blocks() as demo: + with gr.Row(): + with gr.Column(): + img_input = gr.Image() + strength = gr.Slider(0, 1, 0.5) + img_output = gr.Image() + with gr.Row(): + btn = gr.Button("Flag") + + # This needs to be called at some point prior to the first call to callback.flag() + callback.setup([img_input, strength, img_output], "flagged_data_points") + + img_input.change(sepia, [img_input, strength], img_output) + strength.change(sepia, [img_input, strength], img_output) + + # We can choose which components to flag -- in this case, we'll flag all of them + btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, _preprocess=False) + +if __name__ == "__main__": + demo.launch() diff --git a/demo/blocks_interpretation/requirements.txt b/demo/blocks_interpretation/requirements.txt new file mode 100644 index 0000000000..dd211e6e33 --- /dev/null +++ b/demo/blocks_interpretation/requirements.txt @@ -0,0 +1 @@ +shap \ No newline at end of file diff --git a/demo/blocks_interpretation/run.py b/demo/blocks_interpretation/run.py new file mode 100644 index 0000000000..6907267737 --- /dev/null +++ b/demo/blocks_interpretation/run.py @@ -0,0 +1,57 @@ +import gradio as gr +import shap +from transformers import pipeline +import matplotlib +import matplotlib.pyplot as plt +matplotlib.use('Agg') + + +sentiment_classifier = pipeline("text-classification", return_all_scores=True) + + +def classifier(text): + pred = sentiment_classifier(text) + return {p["label"]: p["score"] for p in pred[0]} + + +def interpretation_function(text): + explainer = shap.Explainer(sentiment_classifier) + shap_values = explainer([text]) + # Dimensions are (batch size, text size, number of classes) + # Since we care about positive sentiment, use index 1 + scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1])) + + scores_desc = sorted(scores, key=lambda t: t[1])[::-1] + + # Filter out empty string added by shap + scores_desc = [t for t in scores_desc if t[0] != ""] + + fig_m = plt.figure() + plt.bar(x=[s[0] for s in scores_desc[:5]], + height=[s[1] for s in scores_desc[:5]]) + plt.title("Top words contributing to positive sentiment") + plt.ylabel("Shap Value") + plt.xlabel("Word") + return {"original": text, "interpretation": scores}, fig_m + + +with gr.Blocks() as demo: + with gr.Row(): + with gr.Column(): + input_text = gr.Textbox(label="Input Text") + with gr.Row(): + classify = gr.Button("Classify Sentiment") + interpret = gr.Button("Interpret") + with gr.Column(): + label = gr.Label(label="Predicted Sentiment") + with gr.Column(): + with gr.Tabs(): + with gr.TabItem("Display interpretation with built-in component"): + interpretation = gr.components.Interpretation(input_text) + with gr.TabItem("Display interpretation with plot"): + interpretation_plot = gr.Plot() + + classify.click(classifier, input_text, label) + interpret.click(interpretation_function, input_text, [interpretation, interpretation_plot]) + +demo.launch() \ No newline at end of file diff --git a/demo/blocks_xray/config.json b/demo/blocks_xray/config.json index 8217fbfaa7..f4153b9097 100644 --- a/demo/blocks_xray/config.json +++ b/demo/blocks_xray/config.json @@ -1,5 +1,5 @@ { - "version": "3.0.24\n", + "version": "3.1.1\n", "mode": "blocks", "dev_mode": true, "components": [ @@ -64,6 +64,7 @@ "source": "upload", "tool": "editor", "streaming": false, + "mirror_webcam": true, "show_label": true, "name": "image", "visible": true, @@ -127,6 +128,7 @@ "source": "upload", "tool": "editor", "streaming": false, + "mirror_webcam": true, "show_label": true, "name": "image", "visible": true, @@ -188,6 +190,7 @@ ], "theme": "default", "css": null, + "title": "Gradio", "enable_queue": false, "layout": { "id": 0, @@ -277,11 +280,11 @@ "show_progress": true, "documentation": [ [ - "(List[str]): list of selected choices", - "(str | dict): base64 url data, or (if tool == \"sketch) a dict of image and mask base64 url data" + null, + null ], [ - "(Dict | List): JSON output" + null ] ] }, @@ -306,11 +309,11 @@ "show_progress": true, "documentation": [ [ - "(List[str]): list of selected choices", - "(str | dict): base64 url data, or (if tool == \"sketch) a dict of image and mask base64 url data" + null, + null ], [ - "(Dict | List): JSON output" + null ] ] }, diff --git a/demo/color_generator/run.py b/demo/color_generator/run.py new file mode 100644 index 0000000000..3b18bd0839 --- /dev/null +++ b/demo/color_generator/run.py @@ -0,0 +1,63 @@ +import gradio as gr +import cv2 +import numpy as np +import random + + +# Convert decimal color to hexadecimal color +def RGB_to_Hex(rgb): + color = "#" + for i in rgb: + num = int(i) + color += str(hex(num))[-2:].replace("x", "0").upper() + return color + + +# Randomly generate light or dark colors +def random_color(is_light=True): + return ( + random.randint(0, 127) + int(is_light) * 128, + random.randint(0, 127) + int(is_light) * 128, + random.randint(0, 127) + int(is_light) * 128, + ) + + +def switch_color(color_style): + if color_style == "light": + is_light = True + elif color_style == "dark": + is_light = False + back_color_ = random_color(is_light) # Randomly generate colors + back_color = RGB_to_Hex(back_color_) # Convert to hexadecimal + + # Draw color pictures. + w, h = 50, 50 + img = np.zeros((h, w, 3), np.uint8) + cv2.rectangle(img, (0, 0), (w, h), back_color_, thickness=-1) + + return back_color, back_color, img + + +inputs = [gr.Radio(["light", "dark"], value="light")] + +outputs = [ + gr.ColorPicker(label="color"), + gr.Textbox(label="hexadecimal color"), + gr.Image(type="numpy", label="color picture"), +] + +title = "Color Generator" +description = ( + "Click the Submit button, and a dark or light color will be randomly generated." +) + +demo = gr.Interface( + fn=switch_color, + inputs=inputs, + outputs=outputs, + title=title, + description=description, +) + +if __name__ == "__main__": + demo.launch() diff --git a/demo/color_picker/lion.jpg b/demo/color_picker/lion.jpg deleted file mode 100644 index e9bf9f5d08..0000000000 Binary files a/demo/color_picker/lion.jpg and /dev/null differ diff --git a/demo/color_picker/rabbit.png b/demo/color_picker/rabbit.png new file mode 100644 index 0000000000..0ccd7618e0 Binary files /dev/null and b/demo/color_picker/rabbit.png differ diff --git a/demo/color_picker/run.py b/demo/color_picker/run.py index 94804bb6bb..b2e460a852 100644 --- a/demo/color_picker/run.py +++ b/demo/color_picker/run.py @@ -35,8 +35,8 @@ demo = gr.Interface( inputs=inputs, outputs=outputs, examples=[ - [os.path.join(os.path.dirname(__file__), "lion.jpg"), "#ff0000"], - [os.path.join(os.path.dirname(__file__), "lion.jpg"), "#0000FF"], + [os.path.join(os.path.dirname(__file__), "rabbit.png"), "#ff0000"], + [os.path.join(os.path.dirname(__file__), "rabbit.png"), "#0000FF"], ], ) diff --git a/demo/gender_sentence_custom_interpretation/run.py b/demo/gender_sentence_custom_interpretation/run.py index d94123007d..93a8f6c6cf 100644 --- a/demo/gender_sentence_custom_interpretation/run.py +++ b/demo/gender_sentence_custom_interpretation/run.py @@ -14,6 +14,8 @@ def gender_of_sentence(sentence): return {"male": male_count / total, "female": female_count / total} +# Number of arguments to interpretation function must +# match number of inputs to prediction function def interpret_gender(sentence): result = gender_of_sentence(sentence) is_male = result["male"] > result["female"] @@ -28,7 +30,9 @@ def interpret_gender(sentence): ): score = -1 interpretation.append((word, score)) - return interpretation + # Output must be a list of lists containing the same number of elements as inputs + # Each element corresponds to the interpretation scores for the given input + return [interpretation] demo = gr.Interface( diff --git a/demo/ner_pipeline/requirements.txt b/demo/ner_pipeline/requirements.txt new file mode 100644 index 0000000000..39dab0fdd9 --- /dev/null +++ b/demo/ner_pipeline/requirements.txt @@ -0,0 +1,2 @@ +torch +transformers \ No newline at end of file diff --git a/demo/ner_pipeline/run.py b/demo/ner_pipeline/run.py new file mode 100644 index 0000000000..23315913f5 --- /dev/null +++ b/demo/ner_pipeline/run.py @@ -0,0 +1,18 @@ +from transformers import pipeline + +import gradio as gr + +ner_pipeline = pipeline("ner") + +examples = [ + "Does Chicago have any stores and does Joe live here?", +] + +def ner(text): + output = ner_pipeline(text) + return {"text": text, "entities": output} + +gr.Interface(ner, + gr.Textbox(placeholder="Enter sentence here..."), + gr.HighlightedText(), + examples=examples).launch() \ No newline at end of file diff --git a/demo/outbreak_forecast/config.json b/demo/outbreak_forecast/config.json index 65839f8060..64bf6b2321 100644 --- a/demo/outbreak_forecast/config.json +++ b/demo/outbreak_forecast/config.json @@ -1,13 +1,18 @@ { - "mode": "blocks", + "version": "3.1.1\n", + "mode": "interface", + "dev_mode": true, "components": [ { "id": 7, "type": "row", "props": { "type": "row", - "css": {}, - "default_value": true + "visible": true, + "style": { + "equal_height": false, + "mobile_collapse": true + } } }, { @@ -16,8 +21,8 @@ "props": { "type": "column", "variant": "panel", - "css": {}, - "default_value": true + "visible": true, + "style": {} } }, { @@ -26,8 +31,8 @@ "props": { "type": "column", "variant": "default", - "css": {}, - "default_value": true + "visible": true, + "style": {} } }, { @@ -39,11 +44,11 @@ "Plotly", "Bokeh" ], - "default_value": "Matplotlib", "label": "Plot Type", "show_label": true, "name": "dropdown", - "css": {} + "visible": true, + "style": {} } }, { @@ -53,11 +58,12 @@ "minimum": 1, "maximum": 4, "step": 0.01, - "default_value": 3.2, + "value": 3.2, "label": "R", "show_label": true, "name": "slider", - "css": {} + "visible": true, + "style": {} } }, { @@ -71,11 +77,11 @@ "April", "May" ], - "default_value": "January", "label": "Month", "show_label": true, "name": "dropdown", - "css": {} + "visible": true, + "style": {} } }, { @@ -88,25 +94,27 @@ "Mexico", "UK" ], - "default_value": [ + "value": [ "USA", "Canada" ], "label": "Countries", "show_label": true, "name": "checkboxgroup", - "css": {} + "visible": true, + "style": {} } }, { "id": 4, "type": "checkbox", "props": { - "default_value": false, + "value": false, "label": "Social Distancing?", "show_label": true, "name": "checkbox", - "css": {} + "visible": true, + "style": {} } }, { @@ -114,26 +122,32 @@ "type": "row", "props": { "type": "row", - "css": {}, - "default_value": true + "visible": true, + "style": { + "mobile_collapse": false + } } }, { "id": 11, "type": "button", "props": { - "default_value": "Clear", + "value": "Clear", + "variant": "secondary", "name": "button", - "css": {} + "visible": true, + "style": {} } }, { "id": 12, "type": "button", "props": { - "default_value": "Submit", + "value": "Submit", + "variant": "primary", "name": "button", - "css": {} + "visible": true, + "style": {} } }, { @@ -142,8 +156,8 @@ "props": { "type": "column", "variant": "panel", - "css": {}, - "default_value": true + "visible": true, + "style": {} } }, { @@ -152,7 +166,8 @@ "props": { "cover_container": true, "name": "statustracker", - "css": {} + "visible": true, + "style": {} } }, { @@ -163,7 +178,8 @@ "show_label": true, "interactive": false, "name": "plot", - "css": {} + "visible": true, + "style": {} } }, { @@ -171,21 +187,83 @@ "type": "row", "props": { "type": "row", - "css": {}, - "default_value": true + "visible": true, + "style": { + "mobile_collapse": false + } } }, { "id": 16, "type": "button", "props": { - "default_value": "Flag", + "value": "Flag", + "variant": "secondary", "name": "button", - "css": {} + "visible": true, + "style": {} + } + }, + { + "id": 17, + "type": "dataset", + "props": { + "components": [ + "dropdown", + "slider", + "dropdown", + "checkboxgroup", + "checkbox" + ], + "headers": [ + "Plot Type", + "R", + "Month", + "Countries", + "Social Distancing?" + ], + "samples": [ + [ + "Matplotlib", + 2, + "March", + [ + "Mexico", + "UK" + ], + true + ], + [ + "Plotly", + 3.6, + "February", + [ + "Canada", + "Mexico", + "UK" + ], + false + ], + [ + "Bokeh", + 1.2, + "May", + [ + "UK" + ], + true + ] + ], + "type": "index", + "name": "dataset", + "visible": true, + "style": {} } } ], "theme": "default", + "css": null, + "title": "Gradio", "enable_queue": false, "layout": { "id": 6, @@ -249,27 +327,13 @@ ] } ] + }, + { + "id": 17 } ] }, "dependencies": [ - { - "targets": [ - 16 - ], - "trigger": "click", - "inputs": [ - 0, - 1, - 2, - 3, - 4, - 5 - ], - "outputs": [], - "status_tracker": null, - "queue": null - }, { "targets": [ 12 @@ -285,8 +349,25 @@ "outputs": [ 5 ], + "backend_fn": true, + "js": null, "status_tracker": 14, - "queue": null + "queue": null, + "api_name": "predict", + "scroll_to_output": true, + "show_progress": true, + "documentation": [ + [ + null, + null, + null, + null, + null + ], + [ + null + ] + ] }, { "targets": [ @@ -303,8 +384,59 @@ 5, 9 ], + "backend_fn": false, + "js": "() => [null, 3.2, null, [], null, null, {\"variant\": null, \"visible\": true, \"__type__\": \"update\"}]\n ", "status_tracker": null, - "queue": null + "queue": null, + "api_name": null, + "scroll_to_output": false, + "show_progress": true + }, + { + "targets": [ + 16 + ], + "trigger": "click", + "inputs": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "outputs": [], + "backend_fn": true, + "js": null, + "status_tracker": null, + "queue": false, + "api_name": null, + "scroll_to_output": false, + "show_progress": true + }, + { + "targets": [ + 17 + ], + "trigger": "click", + "inputs": [ + 17 + ], + "outputs": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "backend_fn": true, + "js": null, + "status_tracker": null, + "queue": false, + "api_name": null, + "scroll_to_output": false, + "show_progress": true } ] } \ No newline at end of file diff --git a/gradio/__init__.py b/gradio/__init__.py index b330b0fc83..64d8f52cfc 100644 --- a/gradio/__init__.py +++ b/gradio/__init__.py @@ -26,6 +26,7 @@ from gradio.components import ( Highlightedtext, HighlightedText, Image, + Interpretation, Json, Label, Markdown, diff --git a/gradio/blocks.py b/gradio/blocks.py index 046d941385..5529920385 100644 --- a/gradio/blocks.py +++ b/gradio/blocks.py @@ -202,11 +202,13 @@ class class_or_instancemethod(classmethod): @document() def update(**kwargs) -> dict: """ - Updates component parameters. + Updates component properties. This is a shorthand for using the update method on a component. For example, rather than using gr.Number.update(...) you can just use gr.update(...). + Note that your editor's autocompletion will suggest proper parameters + if you use the update method on the component. - Demos: blocks_update, blocks_essay_update + Demos: blocks_essay, blocks_update, blocks_essay_update Parameters: kwargs: Key-word arguments used to update the component's properties. diff --git a/gradio/components.py b/gradio/components.py index 7f6e697f09..e095e283cb 100644 --- a/gradio/components.py +++ b/gradio/components.py @@ -2894,7 +2894,7 @@ class ColorPicker(Changeable, Submittable, IOComponent): Preprocessing: passes selected color value as a {str} into the function. Postprocessing: expects a {str} returned from function and sets color picker value to it. Examples-format: a {str} with a hexadecimal representation of a color, e.g. "#ff0000" for red. - Demos: color_picker + Demos: color_picker, color_generator """ def __init__( @@ -3148,14 +3148,14 @@ class HighlightedText(Changeable, IOComponent): """ Displays text that contains spans that are highlighted by category or numerical value. Preprocessing: this component does *not* accept input. - Postprocessing: expects a {List[Tuple[str, float | str]]]} consisting of spans of text and their associated labels. + Postprocessing: expects a {List[Tuple[str, float | str]]]} consisting of spans of text and their associated labels, or a {Dict} with two keys: (1) "text" whose value is the complete text, and "entities", which is a list of dictionaries, each of which have the keys: "entity" (consisting of the entity label), "start" (the character index where the label starts), and "end" (the character index where the label ends). Demos: diff_texts, text_analysis """ def __init__( self, - value: Optional[str] = None, + value: Optional[List[Tuple[str, str | float | None]] | Dict] = None, *, color_map: Dict[str, str] = None, # Parameter moved to HighlightedText.style() show_legend: bool = False, @@ -3206,7 +3206,7 @@ class HighlightedText(Changeable, IOComponent): @staticmethod def update( - value: Optional[Any] = None, + value: Optional[List[Tuple[str, str | float | None]] | Dict] = None, color_map: Optional[Dict[str, str]] = None, show_legend: Optional[bool] = None, label: Optional[str] = None, @@ -3225,8 +3225,8 @@ class HighlightedText(Changeable, IOComponent): return updated_config def postprocess( - self, y: List[Tuple[str, str | float | None]] - ) -> List[Tuple[str, str | float | None]]: + self, y: Optional[List[Tuple[str, str | float | None]] | Dict] + ) -> Optional[List[Tuple[str, str | float | None]]]: """ Parameters: y: List of (word, category) tuples @@ -3235,6 +3235,22 @@ class HighlightedText(Changeable, IOComponent): """ if y is None: return None + if isinstance(y, dict): + text = y["text"] + entities = y["entities"] + if len(entities) == 0: + y = [(text, None)] + else: + list_format = [] + index = 0 + for entity in entities: + list_format.append((text[index : entity["start"]], None)) + list_format.append( + (text[entity["start"] : entity["end"]], entity["entity"]) + ) + index = entity["end"] + list_format.append((text[index:], None)) + y = list_format if self.combine_adjacent: output = [] running_text, running_category = None, None @@ -4015,10 +4031,13 @@ class Markdown(IOComponent, Changeable): ############################ +@document() class Dataset(Clickable, Component): """ - Used to create a output widget for showing datasets. Used to render the examples - box in the interface. + Used to create an output widget for showing datasets. Used to render the examples + box. + Preprocessing: this component does *not* accept input. + Postprocessing: expects a {list} of {lists} corresponding to the dataset data. """ def __init__( @@ -4088,9 +4107,12 @@ class Dataset(Clickable, Component): ) +@document() class Interpretation(Component): """ Used to create an interpretation widget for a component. + Preprocessing: this component does *not* accept input. + Postprocessing: expects a {dict} with keys "original" and "interpretation". """ def __init__( @@ -4101,6 +4123,12 @@ class Interpretation(Component): elem_id: Optional[str] = None, **kwargs, ): + """ + Parameters: + component: Which component to show in the interpretation widget. + visible: Whether or not the interpretation is visible. + elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. + """ Component.__init__(self, visible=visible, elem_id=elem_id, **kwargs) self.component = component diff --git a/gradio/examples.py b/gradio/examples.py index 882f9cf47c..87ae443c32 100644 --- a/gradio/examples.py +++ b/gradio/examples.py @@ -8,6 +8,7 @@ import os import shutil from typing import TYPE_CHECKING, Any, Callable, List, Optional, Tuple +from gradio import utils from gradio.components import Dataset from gradio.documentation import document, set_documentation_group from gradio.flagging import CSVLogger @@ -153,13 +154,13 @@ class Examples: self.cache_interface_examples() def load_example(example_id): - processed_example = self.processed_examples[example_id] if cache_examples: - processed_example += self.load_from_cache(example_id) - if len(processed_example) == 1: - return processed_example[0] + processed_example = self.processed_examples[ + example_id + ] + self.load_from_cache(example_id) else: - return processed_example + processed_example = self.processed_examples[example_id] + return utils.resolve_singleton(processed_example) dataset.click( load_example, diff --git a/gradio/flagging.py b/gradio/flagging.py index b34f5609ed..b7a4d113da 100644 --- a/gradio/flagging.py +++ b/gradio/flagging.py @@ -10,10 +10,13 @@ from typing import TYPE_CHECKING, Any, List, Optional import gradio as gr from gradio import encryptor, utils +from gradio.documentation import document, set_documentation_group if TYPE_CHECKING: from gradio.components import Component +set_documentation_group("flagging") + class FlaggingCallback(ABC): """ @@ -54,12 +57,23 @@ class FlaggingCallback(ABC): pass +@document() class SimpleCSVLogger(FlaggingCallback): """ - A simple example implementation of the FlaggingCallback abstract class - provided for illustrative purposes. + A simplified implementation of the FlaggingCallback abstract class + provided for illustrative purposes. Each flagged sample (both the input and output data) + is logged to a CSV file on the machine running the gradio app. + Example: + import gradio as gr + def image_classifier(inp): + return {'cat': 0.3, 'dog': 0.7} + demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label", + flagging_callback=SimpleCSVLogger()) """ + def __init__(self): + pass + def setup(self, components: List[Component], flagging_dir: str): self.components = components self.flagging_dir = flagging_dir @@ -95,12 +109,22 @@ class SimpleCSVLogger(FlaggingCallback): return line_count +@document() class CSVLogger(FlaggingCallback): """ - The default implementation of the FlaggingCallback abstract class. - Logs the input and output data to a CSV file. Supports encryption. + The default implementation of the FlaggingCallback abstract class. Each flagged + sample (both the input and output data) is logged to a CSV file with headers on the machine running the gradio app. + Example: + import gradio as gr + def image_classifier(inp): + return {'cat': 0.3, 'dog': 0.7} + demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label", + flagging_callback=CSVLogger()) """ + def __init__(self): + pass + def setup( self, components: List[Component], @@ -163,7 +187,7 @@ class CSVLogger(FlaggingCallback): if self.encryption_key: output = io.StringIO() if not is_new: - with open(log_filepath, "rb") as csvfile: + with open(log_filepath, "rb", encoding="utf-8") as csvfile: encrypted_csv = csvfile.read() decrypted_csv = encryptor.decrypt( self.encryption_key, encrypted_csv @@ -177,13 +201,13 @@ class CSVLogger(FlaggingCallback): if is_new: writer.writerow(headers) writer.writerow(csv_data) - with open(log_filepath, "wb") as csvfile: + with open(log_filepath, "wb", encoding="utf-8") as csvfile: csvfile.write( encryptor.encrypt(self.encryption_key, output.getvalue().encode()) ) else: if flag_index is None: - with open(log_filepath, "a", newline="") as csvfile: + with open(log_filepath, "a", newline="", encoding="utf-8") as csvfile: writer = csv.writer( csvfile, quoting=csv.QUOTE_NONNUMERIC, quotechar="'" ) @@ -191,49 +215,50 @@ class CSVLogger(FlaggingCallback): writer.writerow(headers) writer.writerow(csv_data) else: - with open(log_filepath) as csvfile: + with open(log_filepath, encoding="utf-8") as csvfile: file_content = csvfile.read() file_content = replace_flag_at_index(file_content) with open( - log_filepath, "w", newline="" + log_filepath, "w", newline="", encoding="utf-8" ) as csvfile: # newline parameter needed for Windows csvfile.write(file_content) - with open(log_filepath, "r") as csvfile: + with open(log_filepath, "r", encoding="utf-8") as csvfile: line_count = len([None for row in csv.reader(csvfile)]) - 1 return line_count +@document() class HuggingFaceDatasetSaver(FlaggingCallback): """ - A FlaggingCallback that saves flagged data to a HuggingFace dataset. + A callback that saves each flagged sample (both the input and output data) + to a HuggingFace dataset. + Example: + import gradio as gr + hf_writer = gr.HuggingFaceDatasetSaver(HF_API_TOKEN, "image-classification-mistakes") + def image_classifier(inp): + return {'cat': 0.3, 'dog': 0.7} + demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label", + allow_flagging="manual", flagging_callback=hf_writer) """ def __init__( self, - hf_foken: str, + hf_token: str, dataset_name: str, organization: Optional[str] = None, private: bool = False, - verbose: bool = True, ): """ - Params: - hf_token (str): The token to use to access the huggingface API. - dataset_name (str): The name of the dataset to save the data to, e.g. - "image-classifier-1" - organization (str): The name of the organization to which to attach - the datasets. If None, the dataset attaches to the user only. - private (bool): If the dataset does not already exist, whether it - should be created as a private dataset or public. Private datasets - may require paid huggingface.co accounts - verbose (bool): Whether to print out the status of the dataset - creation. + Parameters: + hf_token: The HuggingFace token to use to create (and write the flagged sample to) the HuggingFace dataset. + dataset_name: The name of the dataset to save the data to, e.g. "image-classifier-1" + organization: The organization to save the dataset under. The hf_token must provide write access to this organization. If not provided, saved under the name of the user corresponding to the hf_token. + private: Whether the dataset should be private (defaults to False). """ - self.hf_foken = hf_foken + self.hf_token = hf_token self.dataset_name = dataset_name self.organization_name = organization self.dataset_private = private - self.verbose = verbose def setup(self, components: List[Component], flagging_dir: str): """ @@ -250,7 +275,7 @@ class HuggingFaceDatasetSaver(FlaggingCallback): ) path_to_dataset_repo = huggingface_hub.create_repo( name=self.dataset_name, - token=self.hf_foken, + token=self.hf_token, private=self.dataset_private, repo_type="dataset", exist_ok=True, @@ -262,7 +287,7 @@ class HuggingFaceDatasetSaver(FlaggingCallback): self.repo = huggingface_hub.Repository( local_dir=self.dataset_dir, clone_from=path_to_dataset_repo, - use_auth_token=self.hf_foken, + use_auth_token=self.hf_token, ) self.repo.git_pull() @@ -282,7 +307,7 @@ class HuggingFaceDatasetSaver(FlaggingCallback): is_new = not os.path.exists(self.log_file) infos = {"flagged": {"features": {}}} - with open(self.log_file, "a", newline="") as csvfile: + with open(self.log_file, "a", newline="", encoding="utf-8") as csvfile: writer = csv.writer(csvfile) # File previews for certain input and output types @@ -338,7 +363,7 @@ class HuggingFaceDatasetSaver(FlaggingCallback): if is_new: json.dump(infos, open(self.infos_file, "w")) - with open(self.log_file, "r") as csvfile: + with open(self.log_file, "r", encoding="utf-8") as csvfile: line_count = len([None for row in csv.reader(csvfile)]) - 1 self.repo.push_to_hub(commit_message="Flagged sample #{}".format(line_count)) diff --git a/gradio/interface.py b/gradio/interface.py index 0dcc4bcc2a..4b8835690a 100644 --- a/gradio/interface.py +++ b/gradio/interface.py @@ -45,10 +45,10 @@ if TYPE_CHECKING: # Only import for type checking (is False at runtime). @document("launch", "load", "from_pipeline", "integrate") class Interface(Blocks): """ - The Interface class is Gradio's main high-level abstraction, and allows you to create a - web-based GUI / demo around a machine learning model (or any Python function). You must specify - three parameters: (1) the function to create a GUI for (2) the desired input components and - (3) the desired output components. Further parameters can be specified to control the appearance + Interface is Gradio's main high-level class, and allows you to create a web-based GUI / demo + around a machine learning model (or any Python function) in a few lines of code. + You must specify three parameters: (1) the function to create a GUI for (2) the desired input components and + (3) the desired output components. Additional parameters can be used to control the appearance and behavior of the demo. Example: @@ -160,7 +160,7 @@ class Interface(Blocks): cache_examples: If True, caches examples in the server for fast runtime in examples. The default option in HuggingFace Spaces is True. The default option elsewhere is False. examples_per_page: If examples are provided, how many to display per page. live: whether the interface should automatically rerun if any of the inputs change. - interpretation: function that provides interpretation explaining prediction output. Pass "default" to use simple built-in interpreter, "shap" to use a built-in shapley-based interpreter, or your own custom interpretation function. + interpretation: function that provides interpretation explaining prediction output. Pass "default" to use simple built-in interpreter, "shap" to use a built-in shapley-based interpreter, or your own custom interpretation function. For more information on the different interpretation methods, see the Advanced Interface Features guide. num_shap: a multiplier that determines how many examples are computed for shap-based interpretation. Increasing this value will increase shap runtime, but improve results. Only applies if interpretation is "shap". title: a title for the interface; if provided, appears above the input and output components in large font. Also used as the tab title when opened in a browser window. description: a description for the interface; if provided, appears above the input and output components and beneath the title in regular font. Accepts Markdown and HTML content. diff --git a/gradio/strings.py b/gradio/strings.py index 3fb2bfcb05..b3d444e99d 100644 --- a/gradio/strings.py +++ b/gradio/strings.py @@ -18,7 +18,7 @@ en = { "COLAB_DEBUG_TRUE": "Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. " "To turn off, set debug=False in launch().", "COLAB_DEBUG_FALSE": "Colab notebook detected. To show errors in colab notebook, set debug=True in launch()", - "SHARE_LINK_MESSAGE": "\nThis share link expires in 72 hours. For free permanent hosting, check out Spaces (https://www.huggingface.co/spaces)", + "SHARE_LINK_MESSAGE": "\nThis share link expires in 72 hours. For free permanent hosting, check out Spaces: https://www.huggingface.co/spaces", "PRIVATE_LINK_MESSAGE": "Since this is a private endpoint, this share link will never expire.", "INLINE_DISPLAY_BELOW": "Interface loading below...", "MEDIA_PERMISSIONS_IN_COLAB": "Your interface requires microphone or webcam permissions - this may cause issues in Colab. Use the External URL in case of issues.", diff --git a/gradio/version.txt b/gradio/version.txt index fd2a01863f..94ff29cc4d 100644 --- a/gradio/version.txt +++ b/gradio/version.txt @@ -1 +1 @@ -3.1.0 +3.1.1 diff --git a/guides/4)other_guides/creating_a_new_component.md b/guides/4)other_guides/creating_a_new_component.md new file mode 100644 index 0000000000..5bc4818087 --- /dev/null +++ b/guides/4)other_guides/creating_a_new_component.md @@ -0,0 +1,388 @@ +# How to Create a New Component + +Docs: component + +## Introduction + +The purpose of this guide is to illustrate how to add a new component, which you can use in your Gradio applications. The guide will be complemented by code snippets showing step by step how the [ColorPicker](https://gradio.app/docs/#colorpicker) component was added. + +## Prerequisites + +Make sure you have followed the [CONTRIBUTING.md](../CONTRIBUTING.md) guide in order to setup your local development environment (both client and server side). + +## Step 1 - Create a New Python Class and Import it + +The first thing to do is to create a new class within the [components.py](https://github.com/gradio-app/gradio/blob/main/gradio/components.py) file. This Python class should inherit from a list of base components and should be placed within the file in the correct section with respect to the type of component you want to add (e.g. input, output or static components). +In general, it is advisable to take an existing component as a reference (e.g. [TextBox](https://github.com/gradio-app/gradio/blob/main/gradio/components.py#L290)), copy its code as a skeleton and then adapt it to the case at hand. + +Let's take a look at the class added to the [components.py](https://github.com/gradio-app/gradio/blob/main/gradio/components.py) file for the ColorPicker component: + +```python +@document() +class ColorPicker(Changeable, Submittable, IOComponent): + """ + Creates a color picker for user to select a color as string input. + Preprocessing: passes selected color value as a {str} into the function. + Postprocessing: expects a {str} returned from function and sets color picker value to it. + Examples-format: a {str} with a hexadecimal representation of a color, e.g. "#ff0000" for red. + Demos: color_picker, color_generator + """ + + def __init__( + self, + value: str = None, + *, + label: Optional[str] = None, + show_label: bool = True, + interactive: Optional[bool] = None, + visible: bool = True, + elem_id: Optional[str] = None, + **kwargs, + ): + """ + Parameters: + value: default text to provide in color picker. + label: component name in interface. + show_label: if True, will display label. + interactive: if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output. + visible: If False, component will be hidden. + elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. + """ + self.value = self.postprocess(value) + self.cleared_value = "#000000" + self.test_input = value + IOComponent.__init__( + self, + label=label, + show_label=show_label, + interactive=interactive, + visible=visible, + elem_id=elem_id, + **kwargs, + ) + + def get_config(self): + return { + "value": self.value, + **IOComponent.get_config(self), + } + + @staticmethod + def update( + value: Optional[Any] = None, + label: Optional[str] = None, + show_label: Optional[bool] = None, + visible: Optional[bool] = None, + interactive: Optional[bool] = None, + ): + updated_config = { + "value": value, + "label": label, + "show_label": show_label, + "visible": visible, + "__type__": "update", + } + return IOComponent.add_interactive_to_config(updated_config, interactive) + + # Input Functionalities + def preprocess(self, x: str | None) -> Any: + """ + Any preprocessing needed to be performed on function input. + Parameters: + x (str): text + Returns: + (str): text + """ + if x is None: + return None + else: + return str(x) + + def preprocess_example(self, x: str | None) -> Any: + """ + Any preprocessing needed to be performed on an example before being passed to the main function. + """ + if x is None: + return None + else: + return str(x) + + def generate_sample(self) -> str: + return "#000000" + + # Output Functionalities + def postprocess(self, y: str | None): + """ + Any postprocessing needed to be performed on function output. + Parameters: + y (str | None): text + Returns: + (str | None): text + """ + if y is None: + return None + else: + return str(y) + + def deserialize(self, x): + """ + Convert from serialized output (e.g. base64 representation) from a call() to the interface to a human-readable version of the output (path of an image, etc.) + """ + return x +``` + + +Once defined, it is necessary to import the new class inside the [\_\_init\_\_](https://github.com/gradio-app/gradio/blob/main/gradio/__init__.py) module class in order to make it module visible. + +```python + +from gradio.components import ( + ... + ColorPicker, + ... +) + +``` + +### Step 1.1 - Writing Unit Test for Python Class + +When developing new components, you should also write a suite of unit tests for it. The tests should be placed in the [gradio/test/test_components.py](https://github.com/gradio-app/gradio/blob/main/test/test_components.py) file. Again, as above, take a cue from the tests of other components (e.g. [Textbox](https://github.com/gradio-app/gradio/blob/main/test/test_components.py)) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component. For example, the following tests were added for the ColorPicker component: + +```python +class TestColorPicker(unittest.TestCase): + def test_component_functions(self): + """ + Preprocess, postprocess, serialize, save_flagged, restore_flagged, tokenize, generate_sample, get_config + """ + color_picker_input = gr.ColorPicker() + self.assertEqual(color_picker_input.preprocess("#000000"), "#000000") + self.assertEqual(color_picker_input.preprocess_example("#000000"), "#000000") + self.assertEqual(color_picker_input.postprocess(None), None) + self.assertEqual(color_picker_input.postprocess("#FFFFFF"), "#FFFFFF") + self.assertEqual(color_picker_input.serialize("#000000", True), "#000000") + + color_picker_input.interpretation_replacement = "unknown" + + self.assertEqual( + color_picker_input.get_config(), + { + "value": None, + "show_label": True, + "label": None, + "style": {}, + "elem_id": None, + "visible": True, + "interactive": None, + "name": "colorpicker", + }, + ) + self.assertIsInstance(color_picker_input.generate_sample(), str) + + def test_in_interface_as_input(self): + """ + Interface, process, interpret, + """ + iface = gr.Interface(lambda x: x, "colorpicker", "colorpicker") + self.assertEqual(iface.process(["#000000"]), ["#000000"]) + + def test_in_interface_as_output(self): + """ + Interface, process + + """ + iface = gr.Interface(lambda x: x, "colorpicker", gr.ColorPicker()) + self.assertEqual(iface.process(["#000000"]), ["#000000"]) + + def test_static(self): + """ + postprocess + """ + component = gr.ColorPicker("#000000") + self.assertEqual(component.get_config().get("value"), "#000000") +``` + +## Step 2 - Create a New Svelte Component + +Let's see the steps you need to follow to create the frontend of your new component and to map it to its python code: +- Create a new UI-side Svelte component and figure out where to place it. The options are: create a package for the new component in the [ui/packages folder](https://github.com/gradio-app/gradio/tree/main/ui/packages), if this is completely different from existing components or add the new component to an existing package, such as to the [form package](https://github.com/gradio-app/gradio/tree/main/ui/packages/form). The ColorPicker component for example, was included in the form package because it is similar to components that already exist. +- Create a file with an appropriate name in the src folder of the package where you placed the Svelte component, note: the name must start with a capital letter. This is the 'core' component and it's the generic component that has no knowledge of Gradio specific functionality. Initially add any text/html to this file so that the component renders something. The Svelte application code for the ColorPicker looks like this: + +```typescript + + + + +``` + +- Export this file inside the index.ts file of the package where you placed the Svelte component by doing `export { default as FileName } from "./FileName.svelte"`. The ColorPicker file is exported in the [index.ts](https://github.com/gradio-app/gradio/blob/main/ui/packages/form/src/index.ts) file and the export is performed by doing: `export { default as ColorPicker } from "./ColorPicker.svelte";`. +- Create the Gradio specific component in [ui/packages/app/src/components](https://github.com/gradio-app/gradio/tree/main/ui/packages/app/src/components). This is a Gradio wrapper that handles the specific logic of the library, passes the necessary data down to the core component and attaches any necessary event listeners. Copy the folder of another component, rename it and edit the code inside it, keeping the structure. + +Here you will have three files, the first file is for the Svelte application, and it will look like this: + +```typescript + + + + + + + + + +``` + +The second one contains the tests for the frontend, for example for the ColorPicker component: + +```typescript +import { test, describe, assert, afterEach } from "vitest"; +import { cleanup, render } from "@gradio/tootils"; + +import ColorPicker from "./ColorPicker.svelte"; +import type { LoadingStatus } from "../StatusTracker/types"; + +const loading_status = { + eta: 0, + queue_position: 1, + status: "complete" as LoadingStatus["status"], + scroll_to_output: false, + visible: true, + fn_index: 0 +}; + +describe("ColorPicker", () => { + afterEach(() => cleanup()); + + test("renders provided value", () => { + const { getByDisplayValue } = render(ColorPicker, { + loading_status, + show_label: true, + mode: "dynamic", + value: "#000000", + label: "ColorPicker" + }); + + const item: HTMLInputElement = getByDisplayValue("#000000"); + assert.equal(item.value, "#000000"); + }); + + test("changing the color should update the value", async () => { + const { component, getByDisplayValue } = render(ColorPicker, { + loading_status, + show_label: true, + mode: "dynamic", + value: "#000000", + label: "ColorPicker" + }); + + const item: HTMLInputElement = getByDisplayValue("#000000"); + + assert.equal(item.value, "#000000"); + + await component.$set({ + value: "#FFFFFF" + }); + + assert.equal(component.value, "#FFFFFF"); + }); +}); +``` + +The third one is the index.ts file: + +```typescript +export { default as Component } from "./ColorPicker.svelte"; +export const modes = ["static", "dynamic"]; +``` + +- Add the mapping for your component in the [directory.ts file](https://github.com/gradio-app/gradio/blob/main/ui/packages/app/src/components/directory.ts). To do this, copy and paste the mapping line of any component and edit its text. The key name must be the lowercase version of the actual component name in the Python library. So for example, for the ColorPicker component the mapping looks like this: + +```typescript +export const component_map = { +... +colorpicker: () => import("./ColorPicker"), +... +} +``` + +### Step 2.1 . Writing Unit Test for Svelte Component + +When developing new components, you should also write a suite of unit tests for it. The tests should be placed in the new component's folder in a file named MyAwesomeComponent.test.ts. Again, as above, take a cue from the tests of other components (e.g. [Textbox.test.ts](https://github.com/gradio-app/gradio/blob/main/ui/packages/app/src/components/Textbox/Textbox.test.ts)) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component. + +### Step 3 - Create a New Demo + +The last step is to create a demo in the [gradio/demo folder](https://github.com/gradio-app/gradio/tree/main/demo), which will use the newly added component. Again, the suggestion is to reference an existing demo. Write the code for the demo in a file called run.py, add the necessary requirements and an image showing the application interface. Finally add a gif showing its usage. +You can take a look at the [demo](https://github.com/gradio-app/gradio/tree/main/demo/color_picker) created for the ColorPicker, where an icon and a color selected through the new component is taken as input, and the same icon colored with the selected color is returned as output. + +To test the application: + +- run on a terminal `python path/demo/run.py` which starts the backend at the address [http://localhost:7860](http://localhost:7860); +- in another terminal, from the ui folder, run `pnpm dev` to start the frontend at [localhost:3000](localhost:3000) with hot reload functionalities. + +## Conclusion + +In this guide, we have shown how simple it is to add a new component to Gradio, seeing step by step how the ColorPicker component was added. For further details, you can refer to PR: [#1695](https://github.com/gradio-app/gradio/pull/1695). diff --git a/guides/4)other_guides/custom_interpretations_with_blocks.md b/guides/4)other_guides/custom_interpretations_with_blocks.md new file mode 100644 index 0000000000..e43cda5f7c --- /dev/null +++ b/guides/4)other_guides/custom_interpretations_with_blocks.md @@ -0,0 +1,195 @@ + + +# Custom Machine Learning Interpretations with Blocks +Docs: blocks, interpretation +Tags: INTERPRETATION, SENTIMENT ANALYSIS + +**Prerequisite**: This Guide requires you to know about Blocks and the interpretation feature of Interfaces. +Make sure to [read the Guide to Blocks first](/introduction_to_blocks) as well as the +interpretation section of the [Advanced Interface Features Guide](/advanced_interface_features#interpreting-your-predictions). + +## Introduction + +If you have experience working with the Interface class, then you know that interpreting the prediction of your machine learning model +is as easy as setting the `interpretation` parameter to either "default" or "shap". + +You may be wondering if it is possible to add the same interpretation functionality to an app built with the Blocks API. +Not only is it possible, but the flexibility of Blocks lets you display the interpretation output in ways that are +impossible to do with Interfaces! + +This guide will show how to: + +1. Recreate the behavior of Interfaces's interpretation feature in a Blocks app. +2. Customize how interpretations are displayed in a Blocks app. + +Let's get started! + +## Setting up the Blocks app + +Let's build a sentiment classification app with the Blocks API. +This app will take text as input and output the probability that this text expresses either negative or positive sentiment. +We'll have a single input `Textbox` and a single output `Label` component. +Below is the code for the app as well as the app itself. + +```python +import gradio as gr +from transformers import pipeline + +sentiment_classifier = pipeline("text-classification", return_all_scores=True) + +def classifier(text): + pred = sentiment_classifier(text) + return {p["label"]: p["score"] for p in pred[0]} + +with gr.Blocks() as demo: + with gr.Row(): + with gr.Column(): + input_text = gr.Textbox(label="Input Text") + with gr.Row(): + classify = gr.Button("Classify Sentiment") + with gr.Column(): + label = gr.Label(label="Predicted Sentiment") + + classify.click(classifier, input_text, label) +demo.launch() +``` + + + +## Adding interpretations to the app + +Our goal is to present to our users how the words in the input contribute to the model's prediction. +This will help our users understand how the model works and also evaluate its effectiveness. +For example, we should expect our model to identify the words "happy" and "love" with positive sentiment - if not it's a sign we made a mistake in training it! + +For each word in the input, we will compute a score of how much the model's prediction of positive sentiment is changed by that word. +Once we have those `(word, score)` pairs we can use gradio to visualize them to the user. + +The [shap](https://shap.readthedocs.io/en/stable/index.html) library will help us compute the `(word, score)` pairs and +gradio will take care of displaying the output to the user. + +The following code computes the `(word, score)` pairs: + +```python +def interpretation_function(text): + explainer = shap.Explainer(sentiment_classifier) + shap_values = explainer([text]) + + # Dimensions are (batch size, text size, number of classes) + # Since we care about positive sentiment, use index 1 + scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1])) + # Scores contains (word, score) pairs + + + # Format expected by gr.components.Interpretation + return {"original": text, "interpretation": scores} +``` + +Now, all we have to do is add a button that runs this function when clicked. +To display the interpretation, we will use `gr.components.Interpretation`. +This will color each word in the input either red or blue. +Red if it contributes to positive sentiment and blue if it contributes to negative sentiment. +This is how `Interface` displays the interpretation output for text. + +```python +with gr.Blocks() as demo: + with gr.Row(): + with gr.Column(): + input_text = gr.Textbox(label="Input Text") + with gr.Row(): + classify = gr.Button("Classify Sentiment") + interpret = gr.Button("Interpret") + with gr.Column(): + label = gr.Label(label="Predicted Sentiment") + with gr.Column(): + interpretation = gr.components.Interpretation(input_text) + classify.click(classifier, input_text, label) + interpret.click(interpretation_function, input_text, interpretation) + +demo.launch() +``` + + + + +## Customizing how the interpretation is displayed + +The `gr.components.Interpretation` component does a good job of showing how individual words contribute to the sentiment prediction, +but what if we also wanted to display the score themselves along with the words? + +One way to do this would be to generate a bar plot where the words are on the horizontal axis and the bar height corresponds +to the shap score. + +We can do this by modifying our `interpretation_function` to additionally return a matplotlib bar plot. +We will display it with the `gr.Plot` component in a separate tab. + +This is how the interpretation function will look: +```python +def interpretation_function(text): + explainer = shap.Explainer(sentiment_classifier) + shap_values = explainer([text]) + # Dimensions are (batch size, text size, number of classes) + # Since we care about positive sentiment, use index 1 + scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1])) + + scores_desc = sorted(scores, key=lambda t: t[1])[::-1] + + # Filter out empty string added by shap + scores_desc = [t for t in scores_desc if t[0] != ""] + + fig_m = plt.figure() + + # Select top 5 words that contribute to positive sentiment + plt.bar(x=[s[0] for s in scores_desc[:5]], + height=[s[1] for s in scores_desc[:5]]) + plt.title("Top words contributing to positive sentiment") + plt.ylabel("Shap Value") + plt.xlabel("Word") + return {"original": text, "interpretation": scores}, fig_m +``` + +And this is how the app code will look: +```python +with gr.Blocks() as demo: + with gr.Row(): + with gr.Column(): + input_text = gr.Textbox(label="Input Text") + with gr.Row(): + classify = gr.Button("Classify Sentiment") + interpret = gr.Button("Interpret") + with gr.Column(): + label = gr.Label(label="Predicted Sentiment") + with gr.Column(): + with gr.Tabs(): + with gr.TabItem("Display interpretation with built-in component"): + interpretation = gr.components.Interpretation(input_text) + with gr.TabItem("Display interpretation with plot"): + interpretation_plot = gr.Plot() + + classify.click(classifier, input_text, label) + interpret.click(interpretation_function, input_text, [interpretation, interpretation_plot]) + +demo.launch() +``` + +You can see the demo below! + + + +## Beyond Sentiment Classification +Although we have focused on sentiment classification so far, you can add interpretations to almost any machine learning model. +The output must be an `gr.Image` or `gr.Label` but the input can be almost anything (`gr.Number`, `gr.Slider`, `gr.Radio`, `gr.Image`). + +Here is a demo built with blocks of interpretations for an image classification model: + + + + +## Closing remarks + +We did a deep dive 🤿 into how interpretations work and how you can add them to your Blocks app. + +We also showed how the Blocks API gives you the power to control how the interpretation is visualized in your app. + +Adding interpretations is a helpful way to make your users understand and gain trust in your model. +Now you have all the tools you need to add them to all of your apps! diff --git a/guides/4)other_guides/named_entity_recognition.md b/guides/4)other_guides/named_entity_recognition.md new file mode 100644 index 0000000000..eedf010a82 --- /dev/null +++ b/guides/4)other_guides/named_entity_recognition.md @@ -0,0 +1,86 @@ +# Named-Entity Recognition + +Related spaces: https://huggingface.co/spaces/rajistics/biobert_ner_demo, https://huggingface.co/spaces/abidlabs/ner, https://huggingface.co/spaces/rajistics/Financial_Analyst_AI +Tags: NER, TEXT, HIGHLIGHT +Docs: highlightedtext + +## Introduction + +Named-entity recognition (NER), also known as token classification or text tagging, is the task of taking a sentence and classifying every word (or "token") into different categories, such as names of people or names of locations, or different parts of speech. + +For example, given the sentence: + +> Does Chicago have any Pakistani restaurants? + +A named-entity recognition algorithm may identify: + +* "Chicago" as a **location** +* "Pakistani" as an **ethnicity** + + +and so on. + +Using `gradio` (specifically the `HighlightedText` component), you can easily build a web demo of your NER model and share that with the rest of your team. + +Here is an example of a demo that you'll be able to build: + +$demo_ner_pipeline + +This tutorial will show how to take a pretrained NER model and deploy it with a Gradio interface. We will show two different ways to use the `HighlightedText` component -- depending on your NER model, either of these two ways may be easier to learn! + +### Prerequisites + +Make sure you have the `gradio` Python package already [installed](/getting_started). You will also need a pretrained named-entity recognition model. You can use your own, or this in this tutorial, we will use one from the `transformers` library. + +### Approach 1: List of Entity Dictionaries + +Many named-entity recognition models output a list of dictionaries. Each dictionary consists of an *entity*, a "start" index, and an "end" index. This is, for example, how NER models in the `transformers` library operate: + +```py +from transformers import pipeline +ner_pipeline = pipeline("ner") +ner_pipeline("Does Chicago have any Pakistani restaurants") +``` + +Output: + +```bash +[{'entity': 'I-LOC', + 'score': 0.9988978, + 'index': 2, + 'word': 'Chicago', + 'start': 5, + 'end': 12}, + {'entity': 'I-MISC', + 'score': 0.9958592, + 'index': 5, + 'word': 'Pakistani', + 'start': 22, + 'end': 31}] +``` + +If you have such a model, it is very easy to hook it up to Gradio's `HighlightedText` component. All you need to do is pass in this **list of entities**, along with the **original text** to the model, together as dictionary, with the keys being `"entities"` and `"text"` respectively. + +Here is a complete example: + +$code_ner_pipeline +$demo_ner_pipeline + +### Approach 2: List of Tuples + +An alternative way to pass data into the `HighlightedText` component is a list of tuples. The first element of each tuple should be the word or words that are being classified into a particular entity. The second element should be the entity label (or `None` if they should be unlabeled). The `HighlightedText` component automatically strings together the words and labels to display the entities. + +In some cases, this can be easier than the first approach. Here is a demo showing this approach using Spacy's parts-of-speech tagger: + +$code_text_analysis +$demo_text_analysis + + +-------------------------------------------- + + +And you're done! That's all you need to know to build a web-based GUI for your NER model. + +Fun tip: you can share your NER demo instantly with others simply by setting `share=True` in `launch()`. + + diff --git a/guides/4)other_guides/using_flagging.md b/guides/4)other_guides/using_flagging.md new file mode 100644 index 0000000000..5124ea8c90 --- /dev/null +++ b/guides/4)other_guides/using_flagging.md @@ -0,0 +1,191 @@ +# Using Flagging + +Related spaces: https://huggingface.co/spaces/gradio/calculator-flagging-crowdsourced, https://huggingface.co/spaces/gradio/calculator-flagging-options, https://huggingface.co/spaces/gradio/calculator-flag-basic +Tags: FLAGGING, DATA + +## Introduction + +When you demo a machine learning model, you might want to collect data from users who try the model, particularly data points in which the model is not behaving as expected. Capturing these "hard" data points is valuable because it allows you to improve your machine learning model and make it more reliable and robust. + +Gradio simplifies the collection of this data by including a **Flag** button with every `Interface`. This allows a user or tester to easily send data back to the machine where the demo is running. In this Guide, we discuss more about how to use the flagging feature, both with `gradio.Interface` as well as with `gradio.Blocks`. + +## The **Flag** button in `gradio.Interface` + +Flagging with Gradio's `Interface` is especially easy. By default, underneath the output components, there is a button marked **Flag**. When a user testing your model sees input with interesting output, they can click the flag button to send the input and output data back to the machine where the demo is running. The sample is saved to a CSV log file (by default). If the demo involves images, audio, video, or other types of files, these are saved separately in a parallel directory and the paths to these files are saved in the CSV file. + +There are [four parameters](/docs/#interface-header) in `gradio.Interface` that control how flagging works. We will go over them in greater detail. + +* `allow_flagging`: this parameter can be set to either `"manual"` (default), `"auto"`, or `"never"`. + * `manual`: users will see a button to flag, and samples are only flagged when the button is clicked. + * `auto`: users will not see a button to flag, but every sample will be flagged automatically. + * `never`: users will not see a button to flag, and no sample will be flagged. +* `flagging_options`: this parameter can be either `None` (default) or a list of strings. + * If `None`, then the user simply clicks on the **Flag** button and no additional options are shown. + * If a list of strings are provided, then the user sees several buttons, corresponding to each of the strings that are provided. For example, if the value of this parameter is `["Incorrect", "Ambiguous"]`, then buttons labeled **Flag as Incorrect** and **Flag as Ambiguous** appear. This only applies if `allow_flagging` is `"manual"`. + * The chosen option is then logged along with the input and output. +* `flagging_dir`: this parameter takes a string. + * It represents what to name the directory where flagged data is stored. +* `flagging_callback`: this parameter takes an instance of a subclass of the `FlaggingCallback` class + * Using this parameter allows you to write custom code that gets run when the flag button is clicked + * By default, this is set to an instance of `gr.CSVLogger` + * One example is setting it to an instance of `gr.HuggingFaceDatasetSaver` which can allow you to pipe any flagged data into a HuggingFace Dataset. (See more below.) + +## What happens to flagged data? + +Within the directory provided by the `flagging_dir` argument, a CSV file will log the flagged data. + +Here's an example: The code below creates the calculator interface embedded below it: + +```python +import gradio as gr + + +def calculator(num1, operation, num2): + if operation == "add": + return num1 + num2 + elif operation == "subtract": + return num1 - num2 + elif operation == "multiply": + return num1 * num2 + elif operation == "divide": + return num1 / num2 + + +iface = gr.Interface( + calculator, + ["number", gr.Radio(["add", "subtract", "multiply", "divide"]), "number"], + "number", + allow_flagging="manual" +) + +iface.launch() +``` + + + +When you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged. + +```directory ++-- flagged/ +| +-- logs.csv +``` +_flagged/logs.csv_ +```csv +num1,operation,num2,Output,timestamp +5,add,7,12,2022-01-31 11:40:51.093412 +6,subtract,1.5,4.5,2022-01-31 03:25:32.023542 +``` + +If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well. For example an `image` input to `image` output interface will create the following structure. + +```directory ++-- flagged/ +| +-- logs.csv +| +-- image/ +| | +-- 0.png +| | +-- 1.png +| +-- Output/ +| | +-- 0.png +| | +-- 1.png +``` +_flagged/logs.csv_ +```csv +im,Output timestamp +im/0.png,Output/0.png,2022-02-04 19:49:58.026963 +im/1.png,Output/1.png,2022-02-02 10:40:51.093412 +``` + +If you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of these choices when flagging, and the option will be saved as an additional column to the CSV. + +If we go back to the calculator example, the following code will create the interface embedded below it. +```python +iface = gr.Interface( + calculator, + ["number", gr.Radio(["add", "subtract", "multiply", "divide"]), "number"], + "number", + allow_flagging="manual", + flagging_options=["wrong sign", "off by one", "other"] +) + +iface.launch() +``` + + +When users click the flag button, the csv file will now include a column indicating the selected option. + +_flagged/logs.csv_ +```csv +num1,operation,num2,Output,flag,timestamp +5,add,7,-12,wrong sign,2022-02-04 11:40:51.093412 +6,subtract,1.5,3.5,off by one,2022-02-04 11:42:32.062512 +``` + +## The HuggingFaceDatasetSaver Callback + +Sometimes, saving the data to a local CSV file doesn't make sense. For example, on Hugging Face +Spaces, developers typically don't have access to the underlying ephemeral machine hosting the Gradio +demo. That's why, by default, flagging is turned off in Hugging Face Space. However, +you may want to do something else with the flagged data. + +We've made this super easy with the `flagging_callback` parameter. + +For example, below we're going to pipe flagged data from our calculator example into a Hugging Face Dataset, e.g. so that we can build a "crowd-sourced" dataset: + + +```python +import os + +HF_TOKEN = os.getenv('HF_TOKEN') +hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "crowdsourced-calculator-demo") + +iface = gr.Interface( + calculator, + ["number", gr.Radio(["add", "subtract", "multiply", "divide"]), "number"], + "number", + description="Check out the crowd-sourced dataset at: [https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo)", + allow_flagging="manual", + flagging_options=["wrong sign", "off by one", "other"], + flagging_callback=hf_writer +) + +iface.launch() +``` + +Notice that we define our own +instance of `gradio.HuggingFaceDatasetSaver` using our Hugging Face token and +the name of a dataset we'd like to save samples to. In addition, we also set `allow_flagging="manual"` +because on Hugging Face Spaces, `allow_flagging` is set to `"never"` by default. Here's our demo: + + + +You can now see all the examples flagged above in this [public Hugging Face dataset](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo). + +![flagging callback hf](/assets/guides/flagging-callback-hf.png) + +We created the `gradio.HuggingFaceDatasetSaver` class, but you can pass your own custom class as long as it inherits from `FLaggingCallback` defined in [this file](https://github.com/gradio-app/gradio/blob/master/gradio/flagging.py). If you create a cool callback, contribute it to the repo! + +## Flagging with Blocks + +What about if you are using `gradio.Blocks`? On one hand, you have even more flexibility +with Blocks -- you can write whatever Python code you want to run when a button is clicked, +and assign that using the built-in events in Blocks. + +At the same time, you might want to use an existing `FlaggingCallback` to avoid writing extra code. +This requires two steps: + +1. You have to run your callback's `.setup()` somewhere in the code prior to the +first time you flag data +2. When the flagging button is clicked, then you trigger the callback's `.flag()` method, +making sure to collect the arguments correctly and disabling the typical preprocessing. + +Here is an example with an image sepia filter Blocks demo that lets you flag +data using the default `CSVLogger`: + +$code_blocks_flag +$demo_blocks_flag + +## Privacy + +Important Note: please make sure your users understand when the data they submit is being saved, and what you plan on doing with it. This is especially important when you use `allow_flagging=auto` (when all of the data submitted through the demo is being flagged) + +### That's all! Happy building :) diff --git a/test/test_components.py b/test/test_components.py index 3b6b7e1201..d8bc3b10b2 100644 --- a/test/test_components.py +++ b/test/test_components.py @@ -1508,6 +1508,41 @@ class TestLabel(unittest.TestCase): class TestHighlightedText(unittest.TestCase): + def test_postprocess(self): + """ + postprocess + """ + component = gr.HighlightedText() + result = [ + ("", None), + ("Wolfgang", "PER"), + (" lives in ", None), + ("Berlin", "LOC"), + ("", None), + ] + result_ = component.postprocess(result) + self.assertEqual(result, result_) + + text = "Wolfgang lives in Berlin" + entities = [ + {"entity": "PER", "start": 0, "end": 8}, + {"entity": "LOC", "start": 18, "end": 24}, + ] + result_ = component.postprocess({"text": text, "entities": entities}) + self.assertEqual(result, result_) + + text = "I live there" + entities = [] + result_ = component.postprocess({"text": text, "entities": entities}) + self.assertEqual([(text, None)], result_) + + text = "Wolfgang" + entities = [ + {"entity": "PER", "start": 0, "end": 8}, + ] + result_ = component.postprocess({"text": text, "entities": entities}) + self.assertEqual([("", None), (text, "PER"), ("", None)], result_) + def test_component_functions(self): """ get_config, save_flagged, restore_flagged diff --git a/ui/global.d.ts b/ui/JSX.d.ts similarity index 63% rename from ui/global.d.ts rename to ui/JSX.d.ts index 92d180429e..90dfd97629 100644 --- a/ui/global.d.ts +++ b/ui/JSX.d.ts @@ -2,5 +2,7 @@ declare namespace svelte.JSX { interface DOMAttributes { theme?: string; "item-type"?: string; + webkitdirectory?: boolean | string; + mozdirectory?: boolean | string; } } diff --git a/ui/globals.d.ts b/ui/globals.d.ts new file mode 100644 index 0000000000..d9da4bae3a --- /dev/null +++ b/ui/globals.d.ts @@ -0,0 +1,14 @@ +declare global { + interface Window { + __gradio_mode__: "app" | "website"; + launchGradio: Function; + launchGradioFromSpaces: Function; + gradio_config: Config; + scoped_css_attach: (link: HTMLLinkElement) => void; + __gradio_loader__: Array<{ + $set: (args: any) => any; + }>; + } +} + +export {}; diff --git a/ui/package.json b/ui/package.json index 2c76e5e885..6e0b5505a8 100644 --- a/ui/package.json +++ b/ui/package.json @@ -47,16 +47,18 @@ "prettier-plugin-svelte": "^2.7.0", "sirv": "^2.0.2", "sirv-cli": "^2.0.2", - "svelte": "^3.47.0", - "svelte-check": "^2.7.0", + "svelte": "^3.49.0", + "svelte-check": "^2.8.0", "svelte-i18n": "^3.3.13", "svelte-preprocess": "^4.10.6", - "tailwindcss": "^3.0.23", + "tailwindcss": "^3.1.6", "tinyspy": "^0.3.0", + "typescript": "^4.7.4", "vite": "^2.9.5", "vitest": "^0.12.7" }, "devDependencies": { + "@types/tailwindcss": "^3.1.0", "@types/three": "^0.138.0" } } diff --git a/ui/packages/_cdn-test/main.js b/ui/packages/_cdn-test/main.js deleted file mode 100644 index b8f0eb2538..0000000000 --- a/ui/packages/_cdn-test/main.js +++ /dev/null @@ -1,6 +0,0 @@ -import "./style.css"; - -document.querySelector("#app").innerHTML = ` -

Hello Vite!

- Documentation -`; diff --git a/ui/packages/app/src/ApiDocs.svelte b/ui/packages/app/src/ApiDocs.svelte index b22771e490..d8d1bf0461 100644 --- a/ui/packages/app/src/ApiDocs.svelte +++ b/ui/packages/app/src/ApiDocs.svelte @@ -1,18 +1,14 @@ @@ -24,12 +23,5 @@ > - + diff --git a/ui/packages/app/src/components/ColorPicker/ColorPicker.svelte b/ui/packages/app/src/components/ColorPicker/ColorPicker.svelte index ec13b7ab65..efacd8c132 100644 --- a/ui/packages/app/src/components/ColorPicker/ColorPicker.svelte +++ b/ui/packages/app/src/components/ColorPicker/ColorPicker.svelte @@ -5,6 +5,7 @@ import { Block } from "@gradio/atoms"; import StatusTracker from "../StatusTracker/StatusTracker.svelte"; import type { LoadingStatus } from "../StatusTracker/types"; + import type { Styles } from "@gradio/utils"; export let label: string = "ColorPicker"; export let elem_id: string = ""; @@ -13,7 +14,7 @@ export let form_position: "first" | "last" | "mid" | "single" = "single"; export let show_label: boolean; - export let style: Record = {}; + export let style: Styles = {}; export let loading_status: LoadingStatus; diff --git a/ui/packages/app/src/components/ColorPicker/ColorPicker.test.ts b/ui/packages/app/src/components/ColorPicker/ColorPicker.test.ts index a6c13220e6..a7bbe1299e 100644 --- a/ui/packages/app/src/components/ColorPicker/ColorPicker.test.ts +++ b/ui/packages/app/src/components/ColorPicker/ColorPicker.test.ts @@ -2,12 +2,25 @@ import { test, describe, assert, afterEach } from "vitest"; import { cleanup, render } from "@gradio/tootils"; import ColorPicker from "./ColorPicker.svelte"; +import type { LoadingStatus } from "../StatusTracker/types"; + +const loading_status = { + eta: 0, + queue_position: 1, + status: "complete" as LoadingStatus["status"], + scroll_to_output: false, + visible: true, + fn_index: 0 +}; describe("ColorPicker", () => { afterEach(() => cleanup()); test("renders provided value", () => { const { getByDisplayValue } = render(ColorPicker, { + loading_status, + show_label: true, + mode: "dynamic", value: "#000000", label: "ColorPicker" }); @@ -18,6 +31,9 @@ describe("ColorPicker", () => { test("changing the color should update the value", async () => { const { component, getByDisplayValue } = render(ColorPicker, { + loading_status, + show_label: true, + mode: "dynamic", value: "#000000", label: "ColorPicker" }); diff --git a/ui/packages/app/src/components/Column/Column.svelte b/ui/packages/app/src/components/Column/Column.svelte index d0cb1a6690..98ad300012 100644 --- a/ui/packages/app/src/components/Column/Column.svelte +++ b/ui/packages/app/src/components/Column/Column.svelte @@ -1,11 +1,11 @@
>; + headers: Array; + }) { value = detail; await tick(); dispatch("change", detail); @@ -46,7 +49,7 @@ {col_count} values={value} {headers} - on:change={handle_change} + on:change={({ detail }) => handle_change(detail)} editable={mode === "dynamic"} {style} {wrap} diff --git a/ui/packages/app/src/components/Dataset/Dataset.svelte b/ui/packages/app/src/components/Dataset/Dataset.svelte index 473ab89d0d..e2c7f3a7b1 100644 --- a/ui/packages/app/src/components/Dataset/Dataset.svelte +++ b/ui/packages/app/src/components/Dataset/Dataset.svelte @@ -1,27 +1,25 @@ @@ -108,7 +118,7 @@ - {#each selected_samples as sample_row, i} + {#each component_meta as sample_row, i} { @@ -116,15 +126,12 @@ dispatch("click", value); }} > - {#each sample_row as sample_cell, j} - - - + {#each sample_row as { value, component }, j} + {#if components[j] !== undefined && component_map[components[j]] !== undefined} + + + + {/if} {/each} {/each} diff --git a/ui/packages/app/src/components/Dataset/ExampleComponents/Video.svelte b/ui/packages/app/src/components/Dataset/ExampleComponents/Video.svelte index bd89591a36..845e3de3f0 100644 --- a/ui/packages/app/src/components/Dataset/ExampleComponents/Video.svelte +++ b/ui/packages/app/src/components/Dataset/ExampleComponents/Video.svelte @@ -18,7 +18,7 @@ -{#if playable(value)} +{#if playable()}
diff --git a/ui/packages/app/src/components/Group/Group.svelte b/ui/packages/app/src/components/Group/Group.svelte index 835b0dbe43..91130a2055 100644 --- a/ui/packages/app/src/components/Group/Group.svelte +++ b/ui/packages/app/src/components/Group/Group.svelte @@ -1,7 +1,7 @@ + import type { ComponentType } from "svelte"; + import type { SvelteComponentDev } from "svelte/internal"; import { component_map } from "./directory"; - export let component: string; + export let component: keyof typeof component_map; export let component_props: Record; export let value: any; + + $: _component = component_map[component] as ComponentType; {#if value} import { getSaliencyColor } from "../utils"; export let interpretation: Array; - export let theme: string; -
+
{#each interpretation as interpret_value}
- - diff --git a/ui/packages/app/src/components/Interpretation/InterpretationComponents/Checkbox.svelte b/ui/packages/app/src/components/Interpretation/InterpretationComponents/Checkbox.svelte index f729f3d072..1c17b29fe6 100644 --- a/ui/packages/app/src/components/Interpretation/InterpretationComponents/Checkbox.svelte +++ b/ui/packages/app/src/components/Interpretation/InterpretationComponents/Checkbox.svelte @@ -3,10 +3,9 @@ export let original: boolean; export let interpretation: [number, number]; - export let theme: string; -
+