mirror of
https://github.com/gradio-app/gradio.git
synced 2024-12-27 02:30:17 +08:00
514 lines
18 KiB
Python
514 lines
18 KiB
Python
from __future__ import absolute_import, division, print_function
|
|
|
|
import collections
|
|
import math
|
|
|
|
import numpy as np
|
|
import torch
|
|
from pytorch_transformers.tokenization_bert import (BasicTokenizer,
|
|
whitespace_tokenize)
|
|
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
|
|
|
|
|
|
class SquadExample(object):
|
|
"""
|
|
A single training/test example for the Squad dataset.
|
|
For examples without an answer, the start and end position are -1.
|
|
"""
|
|
|
|
def __init__(self,
|
|
qas_id,
|
|
question_text,
|
|
doc_tokens,
|
|
orig_answer_text=None,
|
|
start_position=None,
|
|
end_position=None):
|
|
self.qas_id = qas_id
|
|
self.question_text = question_text
|
|
self.doc_tokens = doc_tokens
|
|
self.orig_answer_text = orig_answer_text
|
|
self.start_position = start_position
|
|
self.end_position = end_position
|
|
|
|
def __str__(self):
|
|
return self.__repr__()
|
|
|
|
def __repr__(self):
|
|
s = ""
|
|
s += "qas_id: %s" % (self.qas_id)
|
|
s += ", question_text: %s" % (
|
|
self.question_text)
|
|
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
|
|
if self.start_position:
|
|
s += ", start_position: %d" % (self.start_position)
|
|
if self.end_position:
|
|
s += ", end_position: %d" % (self.end_position)
|
|
return s
|
|
|
|
class InputFeatures(object):
|
|
"""A single set of features of data."""
|
|
|
|
def __init__(self,
|
|
unique_id,
|
|
example_index,
|
|
doc_span_index,
|
|
tokens,
|
|
token_to_orig_map,
|
|
token_is_max_context,
|
|
input_ids,
|
|
input_mask,
|
|
segment_ids,
|
|
paragraph_len,
|
|
start_position=None,
|
|
end_position=None,):
|
|
self.unique_id = unique_id
|
|
self.example_index = example_index
|
|
self.doc_span_index = doc_span_index
|
|
self.tokens = tokens
|
|
self.token_to_orig_map = token_to_orig_map
|
|
self.token_is_max_context = token_is_max_context
|
|
self.input_ids = input_ids
|
|
self.input_mask = input_mask
|
|
self.segment_ids = segment_ids
|
|
self.paragraph_len = paragraph_len
|
|
self.start_position = start_position
|
|
self.end_position = end_position
|
|
|
|
def input_to_squad_example(passage, question):
|
|
"""Convert input passage and question into a SquadExample."""
|
|
|
|
def is_whitespace(c):
|
|
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
|
|
return True
|
|
return False
|
|
|
|
paragraph_text = passage
|
|
doc_tokens = []
|
|
char_to_word_offset = []
|
|
prev_is_whitespace = True
|
|
for c in paragraph_text:
|
|
if is_whitespace(c):
|
|
prev_is_whitespace = True
|
|
else:
|
|
if prev_is_whitespace:
|
|
doc_tokens.append(c)
|
|
else:
|
|
doc_tokens[-1] += c
|
|
prev_is_whitespace = False
|
|
char_to_word_offset.append(len(doc_tokens) - 1)
|
|
|
|
qas_id = 0
|
|
question_text = question
|
|
start_position = None
|
|
end_position = None
|
|
orig_answer_text = None
|
|
|
|
example = SquadExample(
|
|
qas_id=qas_id,
|
|
question_text=question_text,
|
|
doc_tokens=doc_tokens,
|
|
orig_answer_text=orig_answer_text,
|
|
start_position=start_position,
|
|
end_position=end_position)
|
|
|
|
return example
|
|
|
|
def _check_is_max_context(doc_spans, cur_span_index, position):
|
|
"""Check if this is the 'max context' doc span for the token."""
|
|
|
|
# Because of the sliding window approach taken to scoring documents, a single
|
|
# token can appear in multiple documents. E.g.
|
|
# Doc: the man went to the store and bought a gallon of milk
|
|
# Span A: the man went to the
|
|
# Span B: to the store and bought
|
|
# Span C: and bought a gallon of
|
|
# ...
|
|
#
|
|
# Now the word 'bought' will have two scores from spans B and C. We only
|
|
# want to consider the score with "maximum context", which we define as
|
|
# the *minimum* of its left and right context (the *sum* of left and
|
|
# right context will always be the same, of course).
|
|
#
|
|
# In the example the maximum context for 'bought' would be span C since
|
|
# it has 1 left context and 3 right context, while span B has 4 left context
|
|
# and 0 right context.
|
|
best_score = None
|
|
best_span_index = None
|
|
for (span_index, doc_span) in enumerate(doc_spans):
|
|
end = doc_span.start + doc_span.length - 1
|
|
if position < doc_span.start:
|
|
continue
|
|
if position > end:
|
|
continue
|
|
num_left_context = position - doc_span.start
|
|
num_right_context = end - position
|
|
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
|
|
if best_score is None or score > best_score:
|
|
best_score = score
|
|
best_span_index = span_index
|
|
|
|
return cur_span_index == best_span_index
|
|
|
|
def squad_examples_to_features(example, tokenizer, max_seq_length,
|
|
doc_stride, max_query_length,cls_token_at_end=False,
|
|
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
|
|
sequence_a_segment_id=0, sequence_b_segment_id=1,
|
|
cls_token_segment_id=0, pad_token_segment_id=0,
|
|
mask_padding_with_zero=True):
|
|
"""Loads a data file into a list of `InputBatch`s."""
|
|
|
|
unique_id = 1000000000
|
|
# cnt_pos, cnt_neg = 0, 0
|
|
# max_N, max_M = 1024, 1024
|
|
# f = np.zeros((max_N, max_M), dtype=np.float32)
|
|
example_index = 0
|
|
features = []
|
|
# if example_index % 100 == 0:
|
|
# logger.info('Converting %s/%s pos %s neg %s', example_index, len(examples), cnt_pos, cnt_neg)
|
|
|
|
query_tokens = tokenizer.tokenize(example.question_text)
|
|
|
|
if len(query_tokens) > max_query_length:
|
|
query_tokens = query_tokens[0:max_query_length]
|
|
|
|
tok_to_orig_index = []
|
|
orig_to_tok_index = []
|
|
all_doc_tokens = []
|
|
for (i, token) in enumerate(example.doc_tokens):
|
|
orig_to_tok_index.append(len(all_doc_tokens))
|
|
sub_tokens = tokenizer.tokenize(token)
|
|
for sub_token in sub_tokens:
|
|
tok_to_orig_index.append(i)
|
|
all_doc_tokens.append(sub_token)
|
|
|
|
# The -3 accounts for [CLS], [SEP] and [SEP]
|
|
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
|
|
|
|
# We can have documents that are longer than the maximum sequence length.
|
|
# To deal with this we do a sliding window approach, where we take chunks
|
|
# of the up to our max length with a stride of `doc_stride`.
|
|
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
|
|
"DocSpan", ["start", "length"])
|
|
doc_spans = []
|
|
start_offset = 0
|
|
while start_offset < len(all_doc_tokens):
|
|
length = len(all_doc_tokens) - start_offset
|
|
if length > max_tokens_for_doc:
|
|
length = max_tokens_for_doc
|
|
doc_spans.append(_DocSpan(start=start_offset, length=length))
|
|
if start_offset + length == len(all_doc_tokens):
|
|
break
|
|
start_offset += min(length, doc_stride)
|
|
|
|
for (doc_span_index, doc_span) in enumerate(doc_spans):
|
|
tokens = []
|
|
token_to_orig_map = {}
|
|
token_is_max_context = {}
|
|
segment_ids = []
|
|
|
|
# CLS token at the beginning
|
|
if not cls_token_at_end:
|
|
tokens.append(cls_token)
|
|
segment_ids.append(cls_token_segment_id)
|
|
|
|
# Query
|
|
for token in query_tokens:
|
|
tokens.append(token)
|
|
segment_ids.append(sequence_a_segment_id)
|
|
|
|
# SEP token
|
|
tokens.append(sep_token)
|
|
segment_ids.append(sequence_a_segment_id)
|
|
|
|
# Paragraph
|
|
for i in range(doc_span.length):
|
|
split_token_index = doc_span.start + i
|
|
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
|
|
|
|
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
|
|
split_token_index)
|
|
token_is_max_context[len(tokens)] = is_max_context
|
|
tokens.append(all_doc_tokens[split_token_index])
|
|
segment_ids.append(sequence_b_segment_id)
|
|
paragraph_len = doc_span.length
|
|
|
|
# SEP token
|
|
tokens.append(sep_token)
|
|
segment_ids.append(sequence_b_segment_id)
|
|
|
|
# CLS token at the end
|
|
if cls_token_at_end:
|
|
tokens.append(cls_token)
|
|
segment_ids.append(cls_token_segment_id)
|
|
|
|
input_ids = tokenizer.convert_tokens_to_ids(tokens)
|
|
|
|
# The mask has 1 for real tokens and 0 for padding tokens. Only real
|
|
# tokens are attended to.
|
|
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
|
|
|
|
# Zero-pad up to the sequence length.
|
|
while len(input_ids) < max_seq_length:
|
|
input_ids.append(pad_token)
|
|
input_mask.append(0 if mask_padding_with_zero else 1)
|
|
segment_ids.append(pad_token_segment_id)
|
|
|
|
assert len(input_ids) == max_seq_length
|
|
assert len(input_mask) == max_seq_length
|
|
assert len(segment_ids) == max_seq_length
|
|
|
|
start_position = None
|
|
end_position = None
|
|
|
|
features.append(
|
|
InputFeatures(
|
|
unique_id=unique_id,
|
|
example_index=example_index,
|
|
doc_span_index=doc_span_index,
|
|
tokens=tokens,
|
|
token_to_orig_map=token_to_orig_map,
|
|
token_is_max_context=token_is_max_context,
|
|
input_ids=input_ids,
|
|
input_mask=input_mask,
|
|
segment_ids=segment_ids,
|
|
paragraph_len=paragraph_len,
|
|
start_position=start_position,
|
|
end_position=end_position))
|
|
unique_id += 1
|
|
|
|
return features
|
|
|
|
def to_list(tensor):
|
|
return tensor.detach().cpu().tolist()
|
|
|
|
def _get_best_indexes(logits, n_best_size):
|
|
"""Get the n-best logits from a list."""
|
|
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
|
|
|
|
best_indexes = []
|
|
for i in range(len(index_and_score)):
|
|
if i >= n_best_size:
|
|
break
|
|
best_indexes.append(index_and_score[i][0])
|
|
return best_indexes
|
|
|
|
RawResult = collections.namedtuple("RawResult",["unique_id", "start_logits", "end_logits"])
|
|
|
|
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
|
|
"""Project the tokenized prediction back to the original text."""
|
|
|
|
# When we created the data, we kept track of the alignment between original
|
|
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
|
|
# now `orig_text` contains the span of our original text corresponding to the
|
|
# span that we predicted.
|
|
#
|
|
# However, `orig_text` may contain extra characters that we don't want in
|
|
# our prediction.
|
|
#
|
|
# For example, let's say:
|
|
# pred_text = steve smith
|
|
# orig_text = Steve Smith's
|
|
#
|
|
# We don't want to return `orig_text` because it contains the extra "'s".
|
|
#
|
|
# We don't want to return `pred_text` because it's already been normalized
|
|
# (the SQuAD eval script also does punctuation stripping/lower casing but
|
|
# our tokenizer does additional normalization like stripping accent
|
|
# characters).
|
|
#
|
|
# What we really want to return is "Steve Smith".
|
|
#
|
|
# Therefore, we have to apply a semi-complicated alignment heuristic between
|
|
# `pred_text` and `orig_text` to get a character-to-character alignment. This
|
|
# can fail in certain cases in which case we just return `orig_text`.
|
|
|
|
def _strip_spaces(text):
|
|
ns_chars = []
|
|
ns_to_s_map = collections.OrderedDict()
|
|
for (i, c) in enumerate(text):
|
|
if c == " ":
|
|
continue
|
|
ns_to_s_map[len(ns_chars)] = i
|
|
ns_chars.append(c)
|
|
ns_text = "".join(ns_chars)
|
|
return (ns_text, ns_to_s_map)
|
|
|
|
# We first tokenize `orig_text`, strip whitespace from the result
|
|
# and `pred_text`, and check if they are the same length. If they are
|
|
# NOT the same length, the heuristic has failed. If they are the same
|
|
# length, we assume the characters are one-to-one aligned.
|
|
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
|
|
|
|
tok_text = " ".join(tokenizer.tokenize(orig_text))
|
|
|
|
start_position = tok_text.find(pred_text)
|
|
if start_position == -1:
|
|
return orig_text
|
|
end_position = start_position + len(pred_text) - 1
|
|
|
|
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
|
|
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
|
|
|
|
if len(orig_ns_text) != len(tok_ns_text):
|
|
return orig_text
|
|
|
|
# We then project the characters in `pred_text` back to `orig_text` using
|
|
# the character-to-character alignment.
|
|
tok_s_to_ns_map = {}
|
|
for (i, tok_index) in tok_ns_to_s_map.items():
|
|
tok_s_to_ns_map[tok_index] = i
|
|
|
|
orig_start_position = None
|
|
if start_position in tok_s_to_ns_map:
|
|
ns_start_position = tok_s_to_ns_map[start_position]
|
|
if ns_start_position in orig_ns_to_s_map:
|
|
orig_start_position = orig_ns_to_s_map[ns_start_position]
|
|
|
|
if orig_start_position is None:
|
|
return orig_text
|
|
|
|
orig_end_position = None
|
|
if end_position in tok_s_to_ns_map:
|
|
ns_end_position = tok_s_to_ns_map[end_position]
|
|
if ns_end_position in orig_ns_to_s_map:
|
|
orig_end_position = orig_ns_to_s_map[ns_end_position]
|
|
|
|
if orig_end_position is None:
|
|
return orig_text
|
|
|
|
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
|
|
return output_text
|
|
|
|
def _compute_softmax(scores):
|
|
"""Compute softmax probability over raw logits."""
|
|
if not scores:
|
|
return []
|
|
|
|
max_score = None
|
|
for score in scores:
|
|
if max_score is None or score > max_score:
|
|
max_score = score
|
|
|
|
exp_scores = []
|
|
total_sum = 0.0
|
|
for score in scores:
|
|
x = math.exp(score - max_score)
|
|
exp_scores.append(x)
|
|
total_sum += x
|
|
|
|
probs = []
|
|
for score in exp_scores:
|
|
probs.append(score / total_sum)
|
|
return probs
|
|
|
|
def get_answer(example, features, all_results, n_best_size,
|
|
max_answer_length, do_lower_case):
|
|
example_index_to_features = collections.defaultdict(list)
|
|
for feature in features:
|
|
example_index_to_features[feature.example_index].append(feature)
|
|
|
|
unique_id_to_result = {}
|
|
for result in all_results:
|
|
unique_id_to_result[result.unique_id] = result
|
|
|
|
_PrelimPrediction = collections.namedtuple( "PrelimPrediction",["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
|
|
|
|
example_index = 0
|
|
features = example_index_to_features[example_index]
|
|
|
|
prelim_predictions = []
|
|
|
|
for (feature_index, feature) in enumerate(features):
|
|
result = unique_id_to_result[feature.unique_id]
|
|
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
|
|
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
|
|
for start_index in start_indexes:
|
|
for end_index in end_indexes:
|
|
# We could hypothetically create invalid predictions, e.g., predict
|
|
# that the start of the span is in the question. We throw out all
|
|
# invalid predictions.
|
|
if start_index >= len(feature.tokens):
|
|
continue
|
|
if end_index >= len(feature.tokens):
|
|
continue
|
|
if start_index not in feature.token_to_orig_map:
|
|
continue
|
|
if end_index not in feature.token_to_orig_map:
|
|
continue
|
|
if not feature.token_is_max_context.get(start_index, False):
|
|
continue
|
|
if end_index < start_index:
|
|
continue
|
|
length = end_index - start_index + 1
|
|
if length > max_answer_length:
|
|
continue
|
|
prelim_predictions.append(
|
|
_PrelimPrediction(
|
|
feature_index=feature_index,
|
|
start_index=start_index,
|
|
end_index=end_index,
|
|
start_logit=result.start_logits[start_index],
|
|
end_logit=result.end_logits[end_index]))
|
|
prelim_predictions = sorted(prelim_predictions,key=lambda x: (x.start_logit + x.end_logit),reverse=True)
|
|
_NbestPrediction = collections.namedtuple("NbestPrediction",
|
|
["text", "start_logit", "end_logit","start_index","end_index"])
|
|
seen_predictions = {}
|
|
nbest = []
|
|
for pred in prelim_predictions:
|
|
if len(nbest) >= n_best_size:
|
|
break
|
|
feature = features[pred.feature_index]
|
|
orig_doc_start = -1
|
|
orig_doc_end = -1
|
|
if pred.start_index > 0: # this is a non-null prediction
|
|
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
|
|
orig_doc_start = feature.token_to_orig_map[pred.start_index]
|
|
orig_doc_end = feature.token_to_orig_map[pred.end_index]
|
|
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
|
|
tok_text = " ".join(tok_tokens)
|
|
|
|
# De-tokenize WordPieces that have been split off.
|
|
tok_text = tok_text.replace(" ##", "")
|
|
tok_text = tok_text.replace("##", "")
|
|
|
|
# Clean whitespace
|
|
tok_text = tok_text.strip()
|
|
tok_text = " ".join(tok_text.split())
|
|
orig_text = " ".join(orig_tokens)
|
|
|
|
final_text = get_final_text(tok_text, orig_text,do_lower_case)
|
|
if final_text in seen_predictions:
|
|
continue
|
|
|
|
seen_predictions[final_text] = True
|
|
else:
|
|
final_text = ""
|
|
seen_predictions[final_text] = True
|
|
|
|
nbest.append(
|
|
_NbestPrediction(
|
|
text=final_text,
|
|
start_logit=pred.start_logit,
|
|
end_logit=pred.end_logit,
|
|
start_index=orig_doc_start,
|
|
end_index=orig_doc_end))
|
|
|
|
if not nbest:
|
|
nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0,start_index=-1,
|
|
end_index=-1))
|
|
|
|
assert len(nbest) >= 1
|
|
|
|
total_scores = []
|
|
for entry in nbest:
|
|
total_scores.append(entry.start_logit + entry.end_logit)
|
|
|
|
probs = _compute_softmax(total_scores)
|
|
|
|
answer = {"answer" : nbest[0].text,
|
|
"start" : nbest[0].start_index,
|
|
"end" : nbest[0].end_index,
|
|
"confidence" : probs[0],
|
|
"document" : example.doc_tokens
|
|
}
|
|
return answer |