Unverified Commit 28f5f835 authored by Cagri Eryilmaz's avatar Cagri Eryilmaz Committed by GitHub
Browse files

Bert squad example (#779)



* initial push for bert-squad example

* migraphx and ort implementation + json input sample

* notebook draft

* first working example for bert-squad with migraphx

* cleaning up ORT example

* updated inputs file, 3 questions

* Simple and rather ugly readme. Requirements file

* formatting

* updates to readme file

* Update README.md

* Update README.md

* cleanup

* no need timer function for now

* jupyter notebook example

* updates to notebook file

* readme flow change

* typo in notebook

* another example input file

* cleanup

* benchmark file

* formatting

* bert update to examples readme file

* formatting

* missed another formatting

* removed path workaround from .py and notebook

* renaming requirements file to requirements_bertsquad.txt

* no need for bench and ort files

* reflecting requirement file name change in notebook

* removing duplicates of import json

* formatting
Co-authored-by: default avatarroot <root@rocm-framework-1.amd.com>
Co-authored-by: default avatarkahmed10 <15948690+kahmed10@users.noreply.github.com>
parent 35d1bcc2
...@@ -11,3 +11,4 @@ This directory contains examples of common use cases for MIGraphX. ...@@ -11,3 +11,4 @@ This directory contains examples of common use cases for MIGraphX.
- [MIGraphX Docker Container](./migraphx_docker) - [MIGraphX Docker Container](./migraphx_docker)
- [MIGraphX Driver](./migraphx_driver) - [MIGraphX Driver](./migraphx_driver)
- [Python Resnet50 Inference](./python_api_inference) - [Python Resnet50 Inference](./python_api_inference)
- [Python BERT SQuAD Inference](./python_bert_squad_example)
\ No newline at end of file
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# BERT-SQuAD Inference Example with AMD MIGraphX"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This tutorial shows how to run the BERT-Squad model on ONNX-Runtime with MIGraphX backend."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Requirements "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip3 install -r requirements_bertsquad.txt"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import json\n",
"import time\n",
"import os.path\n",
"from os import path\n",
"import sys\n",
"\n",
"import tokenization\n",
"from run_onnx_squad import *\n",
"\n",
"import migraphx"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Download BERT ONNX file"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!wget -nc https://github.com/onnx/models/raw/master/text/machine_comprehension/bert-squad/model/bertsquad-10.onnx"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Download uncased file / vocabulary"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!apt-get install unzip\n",
"!wget -q -nc https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip\n",
"!unzip -n uncased_L-12_H-768_A-12.zip"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Input data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"input_file = 'inputs.json'\n",
"with open(input_file) as json_file:\n",
" test_data = json.load(json_file)\n",
" print(json.dumps(test_data, indent=2))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Configuration for inference"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"max_seq_length = 256\n",
"doc_stride = 128\n",
"max_query_length = 64\n",
"batch_size = 1\n",
"n_best_size = 20\n",
"max_answer_length = 30"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Read vocabulary file and tokenize"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vocab_file = os.path.join('uncased_L-12_H-768_A-12', 'vocab.txt')\n",
"tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file,\n",
" do_lower_case=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Convert the example to features to input"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# preprocess input\n",
"predict_file = 'inputs.json'\n",
"\n",
"# Use read_squad_examples method from run_onnx_squad to read the input file\n",
"eval_examples = read_squad_examples(input_file=predict_file)\n",
"\n",
"# Use convert_examples_to_features method from run_onnx_squad to get parameters from the input\n",
"input_ids, input_mask, segment_ids, extra_data = convert_examples_to_features(\n",
" eval_examples, tokenizer, max_seq_length, doc_stride, max_query_length)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compile with MIGraphX for GPU"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = migraphx.parse_onnx(\"bertsquad-10.onnx\")\n",
"model.compile(migraphx.get_target(\"gpu\"))\n",
"#model.print()\n",
"\n",
"model.get_parameter_names()\n",
"model.get_parameter_shapes()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Run the input through the model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"n = len(input_ids)\n",
"bs = batch_size\n",
"all_results = []\n",
"\n",
"for idx in range(0, n):\n",
" item = eval_examples[idx]\n",
" print(item)\n",
"\n",
" result = model.run({\n",
" \"unique_ids_raw_output___9:0\":\n",
" np.array([item.qas_id], dtype=np.int64),\n",
" \"input_ids:0\":\n",
" input_ids[idx:idx + bs],\n",
" \"input_mask:0\":\n",
" input_mask[idx:idx + bs],\n",
" \"segment_ids:0\":\n",
" segment_ids[idx:idx + bs]\n",
" })\n",
"\n",
" in_batch = result[1].get_shape().lens()[0]\n",
" print(in_batch)\n",
" start_logits = [float(x) for x in result[1].tolist()]\n",
" end_logits = [float(x) for x in result[0].tolist()]\n",
" # print(start_logits)\n",
" # print(end_logits)\n",
" for i in range(0, in_batch):\n",
" unique_id = len(all_results)\n",
" all_results.append(\n",
" RawResult(unique_id=unique_id,\n",
" start_logits=start_logits,\n",
" end_logits=end_logits))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Get the predictions"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output_dir = 'predictions'\n",
"os.makedirs(output_dir, exist_ok=True)\n",
"output_prediction_file = os.path.join(output_dir, \"predictions.json\")\n",
"output_nbest_file = os.path.join(output_dir, \"nbest_predictions.json\")\n",
"write_predictions(eval_examples, extra_data, all_results, n_best_size,\n",
" max_answer_length, True, output_prediction_file,\n",
" output_nbest_file)\n",
"\n",
"with open(output_prediction_file) as json_file:\n",
" test_data = json.load(json_file)\n",
" print(json.dumps(test_data, indent=2))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
# BERT-SQuAD Example with MIGraphX
Question answering with BERT using MIGraphX optimizations on ROCm platform.
There are two ways to run the example:
1) Install MIGraphX and Jupyter notebook to your system and then utilize `BERT-Squad.ipynb` notebook file.
2) Install MIGraphx to your system and follow the steps executing the python script `bert-squad-migraphx.py`.
# Steps
1) Install MIGraphX to your environment. Please follow the steps to build MIGraphX given at https://github.com/ROCmSoftwarePlatform/AMDMIGraphX
2) Install the requirements file
```
pip3 install -r requirements_migraphx.txt
```
3) Install `unzip` and fetch the uncased file (vocabulary):
```
apt-get install unzip
wget -q https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip
unzip uncased_L-12_H-768_A-12.zip
```
4) Get BERT ONNX model (bertsquad-10.onnx):
```
wget https://github.com/onnx/models/raw/master/text/machine_comprehension/bert-squad/model/bertsquad-10.onnx
```
5) Run the inference, it will compile and run the model on three questions and small data provided in `inputs.json`:
```
python3 bert-squad-migraphx.py
```
## References
This example utilizes the following notebook :notebook: and applies it to MIGraphX:
https://github.com/onnx/models/blob/master/text/machine_comprehension/bert-squad/BERT-Squad.ipynb
import numpy as np
import json
import time
import os.path
from os import path
import sys
import tokenization
from run_onnx_squad import *
import migraphx
#######################################
input_file = 'inputs_amd.json'
with open(input_file) as json_file:
test_data = json.load(json_file)
print(json.dumps(test_data, indent=2))
# preprocess input
predict_file = 'inputs_amd.json'
# Use read_squad_examples method from run_onnx_squad to read the input file
eval_examples = read_squad_examples(input_file=predict_file)
max_seq_length = 256
doc_stride = 128
max_query_length = 64
batch_size = 1
n_best_size = 20
max_answer_length = 30
vocab_file = os.path.join('uncased_L-12_H-768_A-12', 'vocab.txt')
tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file,
do_lower_case=True)
# Use convert_examples_to_features method from run_onnx_squad to get parameters from the input
input_ids, input_mask, segment_ids, extra_data = convert_examples_to_features(
eval_examples, tokenizer, max_seq_length, doc_stride, max_query_length)
#######################################
# Compile
print("INFO: Parsing and compiling the model...")
model = migraphx.parse_onnx("bertsquad-10.onnx")
model.compile(migraphx.get_target("gpu"))
#model.print()
print(model.get_parameter_names())
print(model.get_parameter_shapes())
n = len(input_ids)
bs = batch_size
all_results = []
for idx in range(0, n):
item = eval_examples[idx]
print(item)
result = model.run({
"unique_ids_raw_output___9:0":
np.array([item.qas_id], dtype=np.int64),
"input_ids:0":
input_ids[idx:idx + bs],
"input_mask:0":
input_mask[idx:idx + bs],
"segment_ids:0":
segment_ids[idx:idx + bs]
})
in_batch = result[1].get_shape().lens()[0]
start_logits = [float(x) for x in result[1].tolist()]
end_logits = [float(x) for x in result[0].tolist()]
for i in range(0, in_batch):
unique_id = len(all_results)
all_results.append(
RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
output_dir = 'predictions'
os.makedirs(output_dir, exist_ok=True)
output_prediction_file = os.path.join(output_dir, "predictions.json")
output_nbest_file = os.path.join(output_dir, "nbest_predictions.json")
write_predictions(eval_examples, extra_data, all_results, n_best_size,
max_answer_length, True, output_prediction_file,
output_nbest_file)
with open(output_prediction_file) as json_file:
test_data = json.load(json_file)
print(json.dumps(test_data, indent=2))
{
"version": "1.4",
"data": [
{
"paragraphs": [
{
"context": "In its early years, the new convention center failed to meet attendance and revenue expectations.[12] By 2002, many Silicon Valley businesses were choosing the much larger Moscone Center in San Francisco over the San Jose Convention Center due to the latter's limited space. A ballot measure to finance an expansion via a hotel tax failed to reach the required two-thirds majority to pass. In June 2005, Team San Jose built the South Hall, a $6.77 million, blue and white tent, adding 80,000 square feet (7,400 m2) of exhibit space",
"qas": [
{
"question": "where is the businesses choosing to go?",
"id": "1"
},
{
"question": "how may votes did the ballot measure need?",
"id": "2"
},
{
"question": "By what year many Silicon Valley businesses were choosing the Moscone Center?",
"id": "3"
}
]
}
],
"title": "Conference Center"
}
]
}
\ No newline at end of file
{
"data": [
{
"paragraphs": [
{
"context": "ROCm is the first open-source exascale-class platform for accelerated computing that’s also programming-language independent. It brings a philosophy of choice, minimalism and modular software development to GPU computing. You are free to choose or even develop tools and a language run time for your application. ROCm is built for scale, it supports multi-GPU computing and has a rich system run time with the critical features that large-scale application, compiler and language-run-time development requires. Since the ROCm ecosystem is comprised of open technologies: frameworks (Tensorflow / PyTorch), libraries (MIOpen / Blas / RCCL), programming model (HIP), inter-connect (OCD) and up streamed Linux® Kernel support – the platform is continually optimized for performance and extensibility.",
"qas": [
{
"question": "What is ROCm?",
"id": "1"
},
{
"question": "Which frameworks does ROCm support?",
"id": "2"
},
{
"question": "What is ROCm built for?",
"id": "3"
}
]
}
],
"title": "AMD ROCm"
}
]
}
\ No newline at end of file
tensorflow==1.14
onnxruntime
\ No newline at end of file
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Inference for squad/bert using onnx.
This is going to do the samem as 'python run_squad.py --do_predict=True ...' using a squad/bert model
that was converted to onnx. Lots of code was taken from run_squad.py.
You run it with:
python onnx_squad.py --model $SQUAD_MODEL/squad.onnx \
--vocab_file $BERT_BASE_DIR/uncased_L-12_H-768_A-12/vocab.txt
--predict_file $SQUAD_DATA/dev-v1.1.json \
--bert_config_file $BERT_BASE_DIR/uncased_L-12_H-768_A-12/bert_config.json \
--output /tmp/
"""
import argparse
import collections
import json
import logging
import math
import os
import sys
from timeit import default_timer as timer
import numpy as np
import onnxruntime as onnxrt
import six
import tokenization
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
Feature = collections.namedtuple("Feature", [
"unique_id", "tokens", "example_index", "token_to_orig_map",
"token_is_max_context"
])
class SquadExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
def __str__(self):
return self.__repr__()
def __repr__(self):
s = []
s.append("qas_id: %s" % (tokenization.printable_text(self.qas_id)))
s.append("question_text: %s" %
(tokenization.printable_text(self.question_text)))
s.append("doc_tokens: [%s]" % (" ".join(self.doc_tokens)))
if self.start_position:
s.append("start_position: %d" % (self.start_position))
if self.start_position:
s.append("end_position: %d" % (self.end_position))
return ", ".join(s)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context,
num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length):
"""Loads a data file into a list of `InputBatch`s."""
res_input_ids = []
res_input_mask = []
res_segment_ids = []
extra = []
unique_id = 0
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(
tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans,
doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
res_input_ids.append(np.array(input_ids, dtype=np.int64))
res_input_mask.append(np.array(input_mask, dtype=np.int64))
res_segment_ids.append(np.array(segment_ids, dtype=np.int64))
feature = Feature(unique_id=unique_id,
tokens=tokens,
example_index=example_index,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context)
extra.append(feature)
unique_id += 1
return np.array(res_input_ids), np.array(res_input_mask), np.array(
res_segment_ids), extra
def read_squad_examples(input_file):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r") as f:
input_data = json.load(f)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for idx, entry in enumerate(input_data):
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
example = SquadExample(qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position)
examples.append(example)
return examples
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file):
"""Write final predictions to the json file."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction", [
"feature_index", "start_index", "end_index", "start_logit",
"end_logit"
])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
if not feature.unique_id in unique_id_to_result:
print("feature not in unique_Id", feature.unique_id)
continue
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(
start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(prelim_predictions,
key=lambda x:
(x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = float(entry.start_logit)
output["end_logit"] = float(entry.end_logit)
nbest_json.append(output)
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits),
key=lambda x: x[1],
reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def main():
parser = argparse.ArgumentParser(description='onnx squad')
parser.add_argument('--model', required=True, help='model')
parser.add_argument('--vocab_file', required=True, help='vocab_file')
parser.add_argument('--bert_config_file', help='vocab_file')
parser.add_argument('--predict_file', required=True, help='predict_file')
parser.add_argument('--output_dir', help='output dir')
parser.add_argument('--max_seq_length',
type=int,
default=256,
help='max_seq_length')
parser.add_argument('--max_query_length',
type=int,
default=64,
help='max_query_length')
parser.add_argument('--max_answer_length',
type=int,
default=30,
help='max_answer_length')
parser.add_argument('--n_best_size',
type=int,
default=20,
help='n_best_size')
parser.add_argument('--doc_stride',
type=int,
default=128,
help='doc_stride')
parser.add_argument('--batch_size', type=int, default=1, help='batch_size')
parser.add_argument('--profile',
action='store_true',
help='enable chrome timeline trace profiling.')
parser.add_argument('--log', type=int, help='log level.')
args = parser.parse_args()
sess_options = None
if args.profile:
sess_options = onnxrt.SessionOptions()
sess_options.enable_profiling = True
sess_options.profile_file_prefix = os.path.basename(args.model)
if args.log:
sess_options = onnxrt.SessionOptions()
sess_options.session_log_verbosity_level = args.log
tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file,
do_lower_case=True)
eval_examples = read_squad_examples(input_file=args.predict_file)
input_ids, input_mask, segment_ids, extra_data = \
convert_examples_to_features(eval_examples, tokenizer, args.max_seq_length,
args.doc_stride, args.max_query_length)
sess = onnxrt.InferenceSession(args.model, sess_options)
for input_meta in sess.get_inputs():
print(input_meta)
n = len(input_ids)
bs = args.batch_size
all_results = []
start = timer()
for idx in range(0, n, bs):
data = {
"input_ids:0": input_ids[idx:idx + bs],
"input_mask:0": input_mask[idx:idx + bs],
"segment_ids:0": segment_ids[idx:idx + bs]
}
result = sess.run(["unstack:0", "unstack:1"], data)
in_batch = result[0].shape[1]
for i in range(0, in_batch):
unique_id = len(all_results)
all_results.append(
RawResult(unique_id=unique_id,
start_logits=result[0][0][i],
end_logits=result[1][0][i]))
if unique_id > 0 and unique_id % 100 == 0:
print("at {} {}sec per item".format(
unique_id, (timer() - start) / unique_id))
end = timer()
print("total time: {}sec, {}sec per item".format(
end - start, (end - start) / len(all_results)))
if args.output_dir:
output_prediction_file = os.path.join(args.output_dir,
"predictions.json")
output_nbest_file = os.path.join(args.output_dir,
"nbest_predictions.json")
write_predictions(eval_examples, extra_data, all_results,
args.n_best_size, args.max_answer_length, True,
output_prediction_file, output_nbest_file)
if args.profile:
trace_file = sess.end_profiling()
print("trace file written to: {}".format(trace_file))
return 0
if __name__ == "__main__":
sys.exit(main())
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow as tf
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." %
(actual_flag, init_checkpoint, model_name, case_name,
opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64)
or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment