mirror of
https://github.com/gradio-app/gradio.git
synced 2024-12-21 02:19:59 +08:00
Merge branch 'abidlabs/similarity' of github.com:gradio-app/gradio into abidlabs/similarity
This commit is contained in:
commit
10c8db8a8b
@ -1 +1,5 @@
|
||||
from gradio.interface import * # This makes it possible to import `Interface` as `gradio.Interface`.
|
||||
import pkg_resources
|
||||
|
||||
current_pkg_version = pkg_resources.require("gradio")[0].version
|
||||
__version__ = current_pkg_version
|
@ -588,7 +588,7 @@ class Image(InputComponent):
|
||||
def __init__(self, shape=None, image_mode='RGB', invert_colors=False, source="upload", tool="editor", type="numpy", label=None):
|
||||
'''
|
||||
Parameters:
|
||||
shape (Tuple[int, int]): shape to crop and resize image to; if None, matches input image size.
|
||||
shape (Tuple[int, int]): (width, height) shape to crop and resize image to; if None, matches input image size.
|
||||
image_mode (str): "RGB" if color, or "L" if black and white.
|
||||
invert_colors (bool): whether to invert the image as a preprocessing step.
|
||||
source (str): Source of image. "upload" creates a box where user can drop an image file, "webcam" allows user to take snapshot from their webcam, "canvas" defaults to a white image that can be edited and drawn upon with tools.
|
||||
@ -616,6 +616,7 @@ class Image(InputComponent):
|
||||
def get_template_context(self):
|
||||
return {
|
||||
"image_mode": self.image_mode,
|
||||
"shape": self.shape,
|
||||
"source": self.source,
|
||||
"tool": self.tool,
|
||||
**super().get_template_context()
|
||||
@ -628,8 +629,7 @@ class Image(InputComponent):
|
||||
warnings.simplefilter("ignore")
|
||||
im = im.convert(self.image_mode)
|
||||
if self.shape is not None:
|
||||
im = processing_utils.resize_and_crop(
|
||||
im, (self.shape[0], self.shape[1]))
|
||||
im = processing_utils.resize_and_crop(im, self.shape)
|
||||
if self.invert_colors:
|
||||
im = PIL.ImageOps.invert(im)
|
||||
if self.type == "pil":
|
||||
@ -667,6 +667,8 @@ class Image(InputComponent):
|
||||
|
||||
def get_interpretation_neighbors(self, x):
|
||||
x = processing_utils.decode_base64_to_image(x)
|
||||
if self.shape is not None:
|
||||
x = processing_utils.resize_and_crop(x, self.shape)
|
||||
image = np.array(x)
|
||||
segments_slic = slic(image, self.interpretation_segments, compactness=10, sigma=1)
|
||||
leave_one_out_tokens, masks = [], []
|
||||
@ -686,6 +688,8 @@ class Image(InputComponent):
|
||||
(List[List[float]]): A 2D array representing the interpretation score of each pixel of the image.
|
||||
"""
|
||||
x = processing_utils.decode_base64_to_image(x)
|
||||
if self.shape is not None:
|
||||
x = processing_utils.resize_and_crop(x, self.shape)
|
||||
x = np.array(x)
|
||||
output_scores = np.zeros((x.shape[0], x.shape[1]))
|
||||
|
||||
|
@ -51,8 +51,7 @@ function gradio(config, fn, target, example_file_path) {
|
||||
<button class="run_examples">Run All</button>
|
||||
<button class="load_prev">Load Previous <em>(CTRL + ←)</em></button>
|
||||
<button class="load_next">Load Next <em>(CTRL + →)</em></button>
|
||||
<button class="order_similar">Order by Similarity</button>
|
||||
<button class="view_embeddings">Plot Embeddings</button>
|
||||
<button class="order_similar">Order by Similarity</em></button>
|
||||
<div class="pages invisible">Page:</div>
|
||||
<table>
|
||||
</table>
|
||||
@ -168,7 +167,7 @@ function gradio(config, fn, target, example_file_path) {
|
||||
}
|
||||
io_master.input_interfaces = input_interfaces;
|
||||
io_master.output_interfaces = output_interfaces;
|
||||
target.find(".clear").click(function() {
|
||||
function clear_all() {
|
||||
for (let input_interface of input_interfaces) {
|
||||
input_interface.clear();
|
||||
}
|
||||
@ -183,7 +182,8 @@ function gradio(config, fn, target, example_file_path) {
|
||||
target.find(".output_interfaces").css("opacity", 1);
|
||||
io_master.last_input = null;
|
||||
io_master.last_output = null;
|
||||
});
|
||||
}
|
||||
target.find(".clear").click(clear_all);
|
||||
|
||||
if (!config["allow_screenshot"] && !config["allow_flagging"] && !config["allow_interpretation"]) {
|
||||
target.find(".screenshot, .record, .flag, .interpret").css("visibility", "hidden");
|
||||
@ -217,6 +217,7 @@ function gradio(config, fn, target, example_file_path) {
|
||||
}
|
||||
}
|
||||
function load_example(example_id) {
|
||||
clear_all();
|
||||
for (let [i, value] of config["examples"][example_id].entries()) {
|
||||
input_interfaces[i].load_example(value);
|
||||
};
|
||||
|
@ -47,6 +47,7 @@ const image_input = {
|
||||
`,
|
||||
init: function(opts) {
|
||||
var io = this;
|
||||
this.shape = opts.shape;
|
||||
this.source = opts.source;
|
||||
this.tool = opts.tool;
|
||||
if (this.tool == "select") {
|
||||
@ -200,7 +201,10 @@ const image_input = {
|
||||
show_interpretation: function(data) {
|
||||
if (this.target.find(".image_preview").attr("src")) {
|
||||
var img = this.target.find(".image_preview")[0];
|
||||
var size = getObjectFitSize(true, img.width, img.height, img.naturalWidth, img.naturalHeight)
|
||||
var size = getObjectFitSize(true, img.width, img.height, img.naturalWidth, img.naturalHeight);
|
||||
if (this.shape) {
|
||||
size = getObjectFitSize(true, size.width, size.height, this.shape[0], this.shape[1])
|
||||
}
|
||||
var width = size.width;
|
||||
var height = size.height;
|
||||
this.target.find(".saliency_holder").removeClass("hide").html(`
|
||||
|
@ -1 +1,5 @@
|
||||
from gradio.interface import * # This makes it possible to import `Interface` as `gradio.Interface`.
|
||||
import pkg_resources
|
||||
|
||||
current_pkg_version = pkg_resources.require("gradio")[0].version
|
||||
__version__ = current_pkg_version
|
@ -596,7 +596,7 @@ class Image(InputComponent):
|
||||
def __init__(self, shape=None, image_mode='RGB', invert_colors=False, source="upload", tool="editor", type="numpy", label=None):
|
||||
'''
|
||||
Parameters:
|
||||
shape (Tuple[int, int]): shape to crop and resize image to; if None, matches input image size.
|
||||
shape (Tuple[int, int]): (width, height) shape to crop and resize image to; if None, matches input image size.
|
||||
image_mode (str): "RGB" if color, or "L" if black and white.
|
||||
invert_colors (bool): whether to invert the image as a preprocessing step.
|
||||
source (str): Source of image. "upload" creates a box where user can drop an image file, "webcam" allows user to take snapshot from their webcam, "canvas" defaults to a white image that can be edited and drawn upon with tools.
|
||||
@ -624,6 +624,7 @@ class Image(InputComponent):
|
||||
def get_template_context(self):
|
||||
return {
|
||||
"image_mode": self.image_mode,
|
||||
"shape": self.shape,
|
||||
"source": self.source,
|
||||
"tool": self.tool,
|
||||
**super().get_template_context()
|
||||
@ -636,8 +637,7 @@ class Image(InputComponent):
|
||||
warnings.simplefilter("ignore")
|
||||
im = im.convert(self.image_mode)
|
||||
if self.shape is not None:
|
||||
im = processing_utils.resize_and_crop(
|
||||
im, (self.shape[0], self.shape[1]))
|
||||
im = processing_utils.resize_and_crop(im, self.shape)
|
||||
if self.invert_colors:
|
||||
im = PIL.ImageOps.invert(im)
|
||||
if self.type == "pil":
|
||||
@ -675,6 +675,8 @@ class Image(InputComponent):
|
||||
|
||||
def get_interpretation_neighbors(self, x):
|
||||
x = processing_utils.decode_base64_to_image(x)
|
||||
if self.shape is not None:
|
||||
x = processing_utils.resize_and_crop(x, self.shape)
|
||||
image = np.array(x)
|
||||
segments_slic = slic(image, self.interpretation_segments, compactness=10, sigma=1)
|
||||
leave_one_out_tokens, masks = [], []
|
||||
@ -694,6 +696,8 @@ class Image(InputComponent):
|
||||
(List[List[float]]): A 2D array representing the interpretation score of each pixel of the image.
|
||||
"""
|
||||
x = processing_utils.decode_base64_to_image(x)
|
||||
if self.shape is not None:
|
||||
x = processing_utils.resize_and_crop(x, self.shape)
|
||||
x = np.array(x)
|
||||
output_scores = np.zeros((x.shape[0], x.shape[1]))
|
||||
|
||||
|
@ -172,7 +172,7 @@ function gradio(config, fn, target, example_file_path) {
|
||||
}
|
||||
io_master.input_interfaces = input_interfaces;
|
||||
io_master.output_interfaces = output_interfaces;
|
||||
target.find(".clear").click(function() {
|
||||
function clear_all() {
|
||||
for (let input_interface of input_interfaces) {
|
||||
input_interface.clear();
|
||||
}
|
||||
@ -187,7 +187,8 @@ function gradio(config, fn, target, example_file_path) {
|
||||
target.find(".output_interfaces").css("opacity", 1);
|
||||
io_master.last_input = null;
|
||||
io_master.last_output = null;
|
||||
});
|
||||
}
|
||||
target.find(".clear").click(clear_all);
|
||||
|
||||
if (!config["allow_screenshot"] && !config["allow_flagging"] && !config["allow_interpretation"]) {
|
||||
target.find(".screenshot, .record, .flag, .interpret").css("visibility", "hidden");
|
||||
@ -221,6 +222,7 @@ function gradio(config, fn, target, example_file_path) {
|
||||
}
|
||||
}
|
||||
function load_example(example_id) {
|
||||
clear_all();
|
||||
for (let [i, value] of config["examples"][example_id].entries()) {
|
||||
input_interfaces[i].load_example(value);
|
||||
};
|
||||
|
@ -47,6 +47,7 @@ const image_input = {
|
||||
`,
|
||||
init: function(opts) {
|
||||
var io = this;
|
||||
this.shape = opts.shape;
|
||||
this.source = opts.source;
|
||||
this.tool = opts.tool;
|
||||
if (this.tool == "select") {
|
||||
@ -200,7 +201,10 @@ const image_input = {
|
||||
show_interpretation: function(data) {
|
||||
if (this.target.find(".image_preview").attr("src")) {
|
||||
var img = this.target.find(".image_preview")[0];
|
||||
var size = getObjectFitSize(true, img.width, img.height, img.naturalWidth, img.naturalHeight)
|
||||
var size = getObjectFitSize(true, img.width, img.height, img.naturalWidth, img.naturalHeight);
|
||||
if (this.shape) {
|
||||
size = getObjectFitSize(true, size.width, size.height, this.shape[0], this.shape[1])
|
||||
}
|
||||
var width = size.width;
|
||||
var height = size.height;
|
||||
this.target.find(".saliency_holder").removeClass("hide").html(`
|
||||
|
Loading…
Reference in New Issue
Block a user