diff --git a/syntaxnet/.dockerignore b/syntaxnet/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..14b990cb244dd42360aeafe2eca52228bdfcaf4c --- /dev/null +++ b/syntaxnet/.dockerignore @@ -0,0 +1,4 @@ +.git +bazel/ +Dockerfile* +tensorflow/.git diff --git a/syntaxnet/Dockerfile b/syntaxnet/Dockerfile index 9b4b9689439f47cd366f6790d698577de47d7e69..1d20f1a7b6ba928ee6ff4457ec1850542f5f2d8d 100644 --- a/syntaxnet/Dockerfile +++ b/syntaxnet/Dockerfile @@ -1,33 +1,91 @@ +# Java baseimage, for Bazel. FROM java:8 ENV SYNTAXNETDIR=/opt/tensorflow PATH=$PATH:/root/bin +# Install system packages. This doesn't include everything the TensorFlow +# dockerfile specifies, so if anything goes awry, maybe install more packages +# from there. Also, running apt-get clean before further commands will make the +# Docker images smaller. RUN mkdir -p $SYNTAXNETDIR \ && cd $SYNTAXNETDIR \ && apt-get update \ - && apt-get install git zlib1g-dev file swig python2.7 python-dev python-pip python-mock -y \ - && pip install --upgrade pip \ - && pip install -U protobuf==3.0.0b2 \ - && pip install asciitree \ - && pip install numpy \ - && wget https://github.com/bazelbuild/bazel/releases/download/0.4.3/bazel-0.4.3-installer-linux-x86_64.sh \ + && apt-get install -y \ + file \ + git \ + graphviz \ + libcurl3-dev \ + libfreetype6-dev \ + libgraphviz-dev \ + liblapack-dev \ + libopenblas-dev \ + libpng12-dev \ + libxft-dev \ + python-dev \ + python-mock \ + python-pip \ + python2.7 \ + swig \ + vim \ + zlib1g-dev \ + && apt-get clean \ + && (rm -f /var/cache/apt/archives/*.deb \ + /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true) + +# Install common Python dependencies. Similar to above, remove caches +# afterwards to help keep Docker images smaller. +RUN pip install --ignore-installed pip \ + && python -m pip install numpy \ + && rm -rf /root/.cache/pip /tmp/pip* +RUN python -m pip install \ + asciitree \ + ipykernel \ + jupyter \ + matplotlib \ + pandas \ + protobuf \ + scipy \ + sklearn \ + && python -m ipykernel.kernelspec \ + && python -m pip install pygraphviz \ + --install-option="--include-path=/usr/include/graphviz" \ + --install-option="--library-path=/usr/lib/graphviz/" \ + && rm -rf /root/.cache/pip /tmp/pip* + +# Installs the latest version of Bazel. +RUN wget --quiet https://github.com/bazelbuild/bazel/releases/download/0.4.3/bazel-0.4.3-installer-linux-x86_64.sh \ && chmod +x bazel-0.4.3-installer-linux-x86_64.sh \ - && ./bazel-0.4.3-installer-linux-x86_64.sh --user \ - && git clone --recursive https://github.com/tensorflow/models.git \ - && cd $SYNTAXNETDIR/models/syntaxnet/tensorflow \ - && echo -e "\n\n\n\n\n\n\n\n\n" | ./configure \ - && apt-get autoremove -y \ - && apt-get clean + && ./bazel-0.4.3-installer-linux-x86_64.sh \ + && rm ./bazel-0.4.3-installer-linux-x86_64.sh + +COPY WORKSPACE $SYNTAXNETDIR/syntaxnet/WORKSPACE +COPY tools/bazel.rc $SYNTAXNETDIR/syntaxnet/tools/bazel.rc +COPY tensorflow $SYNTAXNETDIR/syntaxnet/tensorflow + +# Compile common TensorFlow targets, which don't depend on DRAGNN / SyntaxNet +# source. This makes it more convenient to re-compile DRAGNN / SyntaxNet for +# development (though not as convenient as the docker-devel scripts). +RUN cd $SYNTAXNETDIR/syntaxnet/tensorflow \ + && tensorflow/tools/ci_build/builds/configured CPU \ + && cd $SYNTAXNETDIR/syntaxnet \ + && bazel build -c opt @org_tensorflow//tensorflow:tensorflow_py -RUN cd $SYNTAXNETDIR/models/syntaxnet \ - && bazel test --genrule_strategy=standalone syntaxnet/... util/utf8/... +# Build the codez. +WORKDIR $SYNTAXNETDIR/syntaxnet +COPY dragnn $SYNTAXNETDIR/syntaxnet/dragnn +COPY syntaxnet $SYNTAXNETDIR/syntaxnet/syntaxnet +COPY third_party $SYNTAXNETDIR/syntaxnet/third_party +COPY util/utf8 $SYNTAXNETDIR/syntaxnet/util/utf8 +RUN bazel build -c opt //dragnn/python:all //dragnn/tools:all -WORKDIR $SYNTAXNETDIR/models/syntaxnet +# This makes the IP exposed actually "*"; we'll do host restrictions by passing +# a hostname to the `docker run` command. +COPY tensorflow/tensorflow/tools/docker/jupyter_notebook_config.py /root/.jupyter/ +EXPOSE 8888 -CMD [ "sh", "-c", "echo 'Bob brought the pizza to Alice.' | syntaxnet/demo.sh" ] +# This does not need to be compiled, only copied. +COPY examples $SYNTAXNETDIR/syntaxnet/examples +# Todo: Move this earlier in the file (don't want to invalidate caches for now). +RUN jupyter nbextension enable --py --sys-prefix widgetsnbextension -# COMMANDS to build and run -# =============================== -# mkdir build && cp Dockerfile build/ && cd build -# docker build -t syntaxnet . -# docker run syntaxnet +CMD /bin/bash -c "bazel-bin/dragnn/tools/oss_notebook_launcher notebook --debug --notebook-dir=/opt/tensorflow/syntaxnet/examples" diff --git a/syntaxnet/README.md b/syntaxnet/README.md index bce5ea75a1307243977eae6bf4e9d26c71a53925..28e06810b2669fba2eca375f32b766eaf40fa1d0 100644 --- a/syntaxnet/README.md +++ b/syntaxnet/README.md @@ -1,90 +1,71 @@ # SyntaxNet: Neural Models of Syntax. -*A TensorFlow implementation of the models described in [Andor et al. (2016)] -(http://arxiv.org/abs/1603.06042).* +*A TensorFlow toolkit for deep learning powered natural language understanding +(NLU).* -**Update**: Parsey models are now [available](universal.md) for 40 languages -trained on Universal Dependencies datasets, with support for text segmentation -and morphological analysis. +**CoNLL**: See [here](g3doc/conll2017/README.md) for instructions for using the +SyntaxNet/DRAGNN baseline for the CoNLL2017 Shared Task. At Google, we spend a lot of time thinking about how computer systems can read and understand human language in order to process it in intelligent ways. We are excited to share the fruits of our research with the broader community by -releasing SyntaxNet, an open-source neural network framework for [TensorFlow] -(http://www.tensorflow.org) that provides a foundation for Natural Language -Understanding (NLU) systems. Our release includes all the code needed to train -new SyntaxNet models on your own data, as well as *Parsey McParseface*, an -English parser that we have trained for you, and that you can use to analyze -English text. - -So, how accurate is Parsey McParseface? For this release, we tried to balance a -model that runs fast enough to be useful on a single machine (e.g. ~600 -words/second on a modern desktop) and that is also the most accurate parser -available. Here's how Parsey McParseface compares to the academic literature on -several different English domains: (all numbers are % correct head assignments -in the tree, or unlabelled attachment score) - -Model | News | Web | Questions ---------------------------------------------------------------------------------------------------------------- | :---: | :---: | :-------: -[Martins et al. (2013)](http://www.cs.cmu.edu/~ark/TurboParser/) | 93.10 | 88.23 | 94.21 -[Zhang and McDonald (2014)](http://research.google.com/pubs/archive/38148.pdf) | 93.32 | 88.65 | 93.37 -[Weiss et al. (2015)](http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43800.pdf) | 93.91 | 89.29 | 94.17 -[Andor et al. (2016)](http://arxiv.org/abs/1603.06042)* | 94.44 | 90.17 | 95.40 -Parsey McParseface | 94.15 | 89.08 | 94.77 - -We see that Parsey McParseface is state-of-the-art; more importantly, with -SyntaxNet you can train larger networks with more hidden units and bigger beam -sizes if you want to push the accuracy even further: [Andor et al. (2016)] -(http://arxiv.org/abs/1603.06042)* is simply a SyntaxNet model with a -larger beam and network. For futher information on the datasets, see that paper -under the section "Treebank Union". +releasing SyntaxNet, an open-source neural network framework for +[TensorFlow](http://www.tensorflow.org) that provides a foundation for Natural +Language Understanding (NLU) systems. Our release includes all the code needed +to train new SyntaxNet models on your own data, as well as a suite of models +that we have trained for you, and that you can use to analyze text in over 40 +languages. + +This repository is largely divided into two sub-packages: + +1. **DRAGNN: + [code](https://github.com/tensorflow/models/tree/master/syntaxnet/dragnn), + [documentation](g3doc/DRAGNN.md)** implements Dynamic Recurrent Acyclic + Graphical Neural Networks (DRAGNN), a framework for building multi-task, + fully dynamic constructed computation graphs. Practically, we use DRAGNN to + extend our prior work from [Andor et al. + (2016)](http://arxiv.org/abs/1603.06042) with end-to-end, deep recurrent + models and to provide a much easier to use interface to SyntaxNet. +1. **SyntaxNet: + [code](https://github.com/tensorflow/models/tree/master/syntaxnet/syntaxnet), + [documentation](g3doc/syntaxnet-tutorial.md)** is a transition-based + framework for natural language processing, with core functionality for + feature extraction, representing annotated data, and evaluation. As of the + DRAGNN release, it is recommended to train and deploy SyntaxNet models using + the DRAGNN framework. + +## How to use this library + +There are three ways to use SyntaxNet: + +* See [here](g3doc/conll2017/README.md) for instructions for using the + SyntaxNet/DRAGNN baseline for the CoNLL2017 Shared Task, and running the + ParseySaurus models. +* You can use DRAGNN to train your NLP models for other tasks and dataset. See + "Getting started with DRAGNN below." +* You can continue to use the Parsey McParseface family of pre-trained + SyntaxNet models. See "Pre-trained NLP models" below. -Parsey McParseface is also state-of-the-art for part-of-speech (POS) tagging -(numbers below are per-token accuracy): +## Installation -Model | News | Web | Questions --------------------------------------------------------------------------- | :---: | :---: | :-------: -[Ling et al. (2015)](http://www.cs.cmu.edu/~lingwang/papers/emnlp2015.pdf) | 97.44 | 94.03 | 96.18 -[Andor et al. (2016)](http://arxiv.org/abs/1603.06042)* | 97.77 | 94.80 | 96.86 -Parsey McParseface | 97.52 | 94.24 | 96.45 +### Docker installation -The first part of this tutorial describes how to install the necessary tools and -use the already trained models provided in this release. In the second part of -the tutorial we provide more background about the models, as well as -instructions for training models on other datasets. - -## Contents -* [Installation](#installation) -* [Getting Started](#getting-started) - * [Parsing from Standard Input](#parsing-from-standard-input) - * [Annotating a Corpus](#annotating-a-corpus) - * [Configuring the Python Scripts](#configuring-the-python-scripts) - * [Next Steps](#next-steps) -* [Detailed Tutorial: Building an NLP Pipeline with SyntaxNet](#detailed-tutorial-building-an-nlp-pipeline-with-syntaxnet) - * [Obtaining Data](#obtaining-data) - * [Part-of-Speech Tagging](#part-of-speech-tagging) - * [Training the SyntaxNet POS Tagger](#training-the-syntaxnet-pos-tagger) - * [Preprocessing with the Tagger](#preprocessing-with-the-tagger) - * [Dependency Parsing: Transition-Based Parsing](#dependency-parsing-transition-based-parsing) - * [Training a Parser Step 1: Local Pretraining](#training-a-parser-step-1-local-pretraining) - * [Training a Parser Step 2: Global Training](#training-a-parser-step-2-global-training) -* [Contact](#contact) -* [Credits](#credits) +The simplest way to get started with DRAGNN is by loading our Docker container. +[Here](g3doc/CLOUD.md) is a tutorial for running the DRAGNN container on +[GCP](https://cloud.google.com) (just as applicable to your own computer). -## Installation +### Manual installation -Running and training SyntaxNet models requires building this package from +Running and training SyntaxNet/DRAGNN models requires building this package from source. You'll need to install: * python 2.7: - * python 3 support is not available yet + * Python 3 support is not available yet * bazel: - * **version 0.4.3** - * follow the instructions [here](http://bazel.build/docs/install.html) - * Alternately, Download bazel (0.4.3) <.deb> from - [https://github.com/bazelbuild/bazel/releases] - (https://github.com/bazelbuild/bazel/releases) for your system - configuration. + * Follow the instructions [here](http://bazel.build/docs/install.html) + * Alternately, Download bazel <.deb> from + [https://github.com/bazelbuild/bazel/releases](https://github.com/bazelbuild/bazel/releases) + for your system configuration. * Install it using the command: sudo dpkg -i <.deb file> * Check for the bazel version by typing: bazel version * swig: @@ -99,6 +80,11 @@ source. You'll need to install: * `pip install asciitree` * numpy, package for scientific computing: * `pip install numpy` +* pygraphviz to visualize traces and parse trees: + * `apt-get install -y graphviz libgraphviz-dev` + * `pip install pygraphviz + --install-option="--include-path=/usr/include/graphviz" + --install-option="--library-path=/usr/lib/graphviz/"` Once you completed the above steps, you can build and test SyntaxNet with the following commands: @@ -108,17 +94,14 @@ following commands: cd models/syntaxnet/tensorflow ./configure cd .. - bazel test syntaxnet/... util/utf8/... + bazel test ... # On Mac, run the following: bazel test --linkopt=-headerpad_max_install_names \ - syntaxnet/... util/utf8/... + dragnn/... syntaxnet/... util/utf8/... ``` Bazel should complete reporting all tests passed. -You can also compile SyntaxNet in a [Docker](https://www.docker.com/what-docker) -container using this [Dockerfile](Dockerfile). - To build SyntaxNet with GPU support please refer to the instructions in [issues/248](https://github.com/tensorflow/models/issues/248). @@ -127,12 +110,64 @@ memory allocated for your Docker VM. ## Getting Started +We have a few guides on this README, as well as more extensive +[documentation](g3doc/). + +### Learning the DRAGNN framework + +![DRAGNN](g3doc/unrolled-dragnn.png) + +An easy and visual way to get started with DRAGNN is to run [our Jupyter +Notebook](examples/dragnn/basic_parser_tutorial.ipynb). Our tutorial +[here](g3doc/CLOUD.md) explains how to start it up from the Docker container. + +### Using the Pre-trained NLP models + +We are happy to release *Parsey McParseface*, an English parser that we have +trained for you, and that you can use to analyze English text, along with +[trained models for 40 languages](g3doc/universal.md) and support for text +segmentation and morphological analysis. + Once you have successfully built SyntaxNet, you can start parsing text right away with Parsey McParseface, located under `syntaxnet/models`. The easiest thing is to use or modify the included script `syntaxnet/demo.sh`, which shows a basic setup to parse English taking plain text as input. -### Parsing from Standard Input +You can also skip right away to the [detailed SyntaxNet +tutorial](g3doc/syntaxnet-tutorial.md). + +How accurate is Parsey McParseface? For the initial release, we tried to balance +a model that runs fast enough to be useful on a single machine (e.g. ~600 +words/second on a modern desktop) and that is also the most accurate parser +available. Here's how Parsey McParseface compares to the academic literature on +several different English domains: (all numbers are % correct head assignments +in the tree, or unlabelled attachment score) + +Model | News | Web | Questions +--------------------------------------------------------------------------------------------------------------- | :---: | :---: | :-------: +[Martins et al. (2013)](http://www.cs.cmu.edu/~ark/TurboParser/) | 93.10 | 88.23 | 94.21 +[Zhang and McDonald (2014)](http://research.google.com/pubs/archive/38148.pdf) | 93.32 | 88.65 | 93.37 +[Weiss et al. (2015)](http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43800.pdf) | 93.91 | 89.29 | 94.17 +[Andor et al. (2016)](http://arxiv.org/abs/1603.06042)* | 94.44 | 90.17 | 95.40 +Parsey McParseface | 94.15 | 89.08 | 94.77 + +We see that Parsey McParseface is state-of-the-art; more importantly, with +SyntaxNet you can train larger networks with more hidden units and bigger beam +sizes if you want to push the accuracy even further: [Andor et al. +(2016)](http://arxiv.org/abs/1603.06042)* is simply a SyntaxNet model with a +larger beam and network. For futher information on the datasets, see that paper +under the section "Treebank Union". + +Parsey McParseface is also state-of-the-art for part-of-speech (POS) tagging +(numbers below are per-token accuracy): + +Model | News | Web | Questions +-------------------------------------------------------------------------- | :---: | :---: | :-------: +[Ling et al. (2015)](http://www.cs.cmu.edu/~lingwang/papers/emnlp2015.pdf) | 97.44 | 94.03 | 96.18 +[Andor et al. (2016)](http://arxiv.org/abs/1603.06042)* | 97.77 | 94.80 | 96.86 +Parsey McParseface | 97.52 | 94.24 | 96.45 + +#### Parsing from Standard Input Simply pass one sentence per line of text into the script at `syntaxnet/demo.sh`. The script will break the text into words, run the POS @@ -160,7 +195,7 @@ visualized in our tutorial graphs. In this example, we see that the verb If you want to feed in tokenized, CONLL-formatted text, you can run `demo.sh --conll`. -### Annotating a Corpus +#### Annotating a Corpus To change the pipeline to read and write to specific files (as opposed to piping through stdin and stdout), we have to modify the `demo.sh` to point to the files @@ -200,7 +235,7 @@ input { Then we can use `--input=wsj-data --output=wsj-data-tagged` on the command line to specify reading and writing to these files. -### Configuring the Python Scripts +#### Configuring the Python Scripts As mentioned above, the python scripts are configured in two ways: @@ -234,386 +269,13 @@ There are many ways to extend this framework, e.g. adding new features, changing the model structure, training on other languages, etc. We suggest reading the detailed tutorial below to get a handle on the rest of the framework. -## Detailed Tutorial: Building an NLP Pipeline with SyntaxNet - -In this tutorial, we'll go over how to train new models, and explain in a bit -more technical detail the NLP side of the models. Our goal here is to explain -the NLP pipeline produced by this package. - -### Obtaining Data - -The included English parser, Parsey McParseface, was trained on the the standard -corpora of the [Penn Treebank](https://catalog.ldc.upenn.edu/LDC99T42) and -[OntoNotes](https://catalog.ldc.upenn.edu/LDC2013T19), as well as the [English -Web Treebank](https://catalog.ldc.upenn.edu/LDC2012T13), but these are -unfortunately not freely available. - -However, the [Universal Dependencies](http://universaldependencies.org/) project -provides freely available treebank data in a number of languages. SyntaxNet can -be trained and evaluated on any of these corpora. - -### Part-of-Speech Tagging - -Consider the following sentence, which exhibits several ambiguities that affect -its interpretation: - -> I saw the man with glasses. - -This sentence is composed of words: strings of characters that are segmented -into groups (e.g. "I", "saw", etc.) Each word in the sentence has a *grammatical -function* that can be useful for understanding the meaning of language. For -example, "saw" in this example is a past tense of the verb "to see". But any -given word might have different meanings in different contexts: "saw" could just -as well be a noun (e.g., a saw used for cutting) or a present tense verb (using -a saw to cut something). - -A logical first step in understanding language is figuring out these roles for -each word in the sentence. This process is called *Part-of-Speech (POS) -Tagging*. The roles are called POS tags. Although a given word might have -multiple possible tags depending on the context, given any one interpretation of -a sentence each word will generally only have one tag. - -One interesting challenge of POS tagging is that the problem of defining a -vocabulary of POS tags for a given language is quite involved. While the concept -of nouns and verbs is pretty common, it has been traditionally difficult to -agree on a standard set of roles across all languages. The [Universal -Dependencies](http://www.universaldependencies.org) project aims to solve this -problem. - -### Training the SyntaxNet POS Tagger - -In general, determining the correct POS tag requires understanding the entire -sentence and the context in which it is uttered. In practice, we can do very -well just by considering a small window of words around the word of interest. -For example, words that follow the word ‘the’ tend to be adjectives or nouns, -rather than verbs. - -To predict POS tags, we use a simple setup. We process the sentences -left-to-right. For any given word, we extract features of that word and a window -around it, and use these as inputs to a feed-forward neural network classifier, -which predicts a probability distribution over POS tags. Because we make -decisions in left-to-right order, we also use prior decisions as features in -subsequent ones (e.g. "the previous predicted tag was a noun."). - -All the models in this package use a flexible markup language to define -features. For example, the features in the POS tagger are found in the -`brain_pos_features` parameter in the `TaskSpec`, and look like this (modulo -spacing): - -``` -stack(3).word stack(2).word stack(1).word stack.word input.word input(1).word input(2).word input(3).word; -input.digit input.hyphen; -stack.suffix(length=2) input.suffix(length=2) input(1).suffix(length=2); -stack.prefix(length=2) input.prefix(length=2) input(1).prefix(length=2) -``` - -Note that `stack` here means "words we have already tagged." Thus, this feature -spec uses three types of features: words, suffixes, and prefixes. The features -are grouped into blocks that share an embedding matrix, concatenated together, -and fed into a chain of hidden layers. This structure is based upon the model -proposed by [Chen and Manning (2014)] -(http://cs.stanford.edu/people/danqi/papers/emnlp2014.pdf). - -We show this layout in the schematic below: the state of the system (a stack and -a buffer, visualized below for both the POS and the dependency parsing task) is -used to extract sparse features, which are fed into the network in groups. We -show only a small subset of the features to simplify the presentation in the -schematic: - -![Schematic](ff_nn_schematic.png "Feed-forward Network Structure") - -In the configuration above, each block gets its own embedding matrix and the -blocks in the configuration above are delineated with a semi-colon. The -dimensions of each block are controlled in the `brain_pos_embedding_dims` -parameter. **Important note:** unlike many simple NLP models, this is *not* a -bag of words model. Remember that although certain features share embedding -matrices, the above features will be concatenated, so the interpretation of -`input.word` will be quite different from `input(1).word`. This also means that -adding features increases the dimension of the `concat` layer of the model as -well as the number of parameters for the first hidden layer. - -To train the model, first edit `syntaxnet/context.pbtxt` so that the inputs -`training-corpus`, `tuning-corpus`, and `dev-corpus` point to the location of -your training data. You can then train a part-of-speech tagger with: - -```shell -bazel-bin/syntaxnet/parser_trainer \ - --task_context=syntaxnet/context.pbtxt \ - --arg_prefix=brain_pos \ # read from POS configuration - --compute_lexicon \ # required for first stage of pipeline - --graph_builder=greedy \ # no beam search - --training_corpus=training-corpus \ # names of training/tuning set - --tuning_corpus=tuning-corpus \ - --output_path=models \ # where to save new resources - --batch_size=32 \ # Hyper-parameters - --decay_steps=3600 \ - --hidden_layer_sizes=128 \ - --learning_rate=0.08 \ - --momentum=0.9 \ - --seed=0 \ - --params=128-0.08-3600-0.9-0 # name for these parameters -``` - -This will read in the data, construct a lexicon, build a tensorflow graph for -the model with the specific hyperparameters, and train the model. Every so often -the model will be evaluated on the tuning set, and only the checkpoint with the -highest accuracy on this set will be saved. **Note that you should never use a -corpus you intend to test your model on as your tuning set, as you will inflate -your test set results.** - -For best results, you should repeat this command with at least 3 different -seeds, and possibly with a few different values for `--learning_rate` and -`--decay_steps`. Good values for `--learning_rate` are usually close to 0.1, and -you usually want `--decay_steps` to correspond to about one tenth of your -corpus. The `--params` flag is only a human readable identifier for the model -being trained, used to construct the full output path, so that you don't need to -worry about clobbering old models by accident. - -The `--arg_prefix` flag controls which parameters should be read from the task -context file `context.pbtxt`. In this case `arg_prefix` is set to `brain_pos`, -so the paramters being used in this training run are -`brain_pos_transition_system`, `brain_pos_embedding_dims`, `brain_pos_features` -and, `brain_pos_embedding_names`. To train the dependency parser later -`arg_prefix` will be set to `brain_parser`. - -### Preprocessing with the Tagger - -Now that we have a trained POS tagging model, we want to use the output of this -model as features in the parser. Thus the next step is to run the trained model -over our training, tuning, and dev (evaluation) sets. We can use the -parser_eval.py` script for this. - -For example, the model `128-0.08-3600-0.9-0` trained above can be run over the -training, tuning, and dev sets with the following command: - -```shell -PARAMS=128-0.08-3600-0.9-0 -for SET in training tuning dev; do - bazel-bin/syntaxnet/parser_eval \ - --task_context=models/brain_pos/greedy/$PARAMS/context \ - --hidden_layer_sizes=128 \ - --input=$SET-corpus \ - --output=tagged-$SET-corpus \ - --arg_prefix=brain_pos \ - --graph_builder=greedy \ - --model_path=models/brain_pos/greedy/$PARAMS/model -done -``` - -**Important note:** This command only works because we have created entries for -you in `context.pbtxt` that correspond to `tagged-training-corpus`, -`tagged-dev-corpus`, and `tagged-tuning-corpus`. From these default settings, -the above will write tagged versions of the training, tuning, and dev set to the -directory `models/brain_pos/greedy/$PARAMS/`. This location is chosen because -the `input` entries do not have `file_pattern` set: instead, they have `creator: -brain_pos/greedy`, which means that `parser_trainer.py` will construct *new* -files when called with `--arg_prefix=brain_pos --graph_builder=greedy` using the -`--model_path` flag to determine the location. - -For convenience, `parser_eval.py` also logs POS tagging accuracy after the -output tagged datasets have been written. - -### Dependency Parsing: Transition-Based Parsing - -Now that we have a prediction for the grammatical role of the words, we want to -understand how the words in the sentence relate to each other. This parser is -built around the *head-modifier* construction: for each word, we choose a -*syntactic head* that it modifies according to some grammatical role. - -An example for the above sentence is as follows: - -![Figure](sawman.png) - -Below each word in the sentence we see both a fine-grained part-of-speech -(*PRP*, *VBD*, *DT*, *NN* etc.), and a coarse-grained part-of-speech (*PRON*, -*VERB*, *DET*, *NOUN*, etc.). Coarse-grained POS tags encode basic grammatical -categories, while the fine-grained POS tags make further distinctions: for -example *NN* is a singular noun (as opposed, for example, to *NNS*, which is a -plural noun), and *VBD* is a past-tense verb. For more discussion see [Petrov et -al. (2012)](http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf). - -Crucially, we also see directed arcs signifying grammatical relationships -between different words in the sentence. For example *I* is the subject of -*saw*, as signified by the directed arc labeled *nsubj* between these words; -*man* is the direct object (dobj) of *saw*; the preposition *with* modifies -*man* with a prep relation, signifiying modification by a prepositional phrase; -and so on. In addition the verb *saw* is identified as the *root* of the entire -sentence. - -Whenever we have a directed arc between two words, we refer to the word at the -start of the arc as the *head*, and the word at the end of the arc as the -*modifier*. For example we have one arc where the head is *saw* and the modifier -is *I*, another where the head is *saw* and the modifier is *man*, and so on. - -The grammatical relationships encoded in dependency structures are directly -related to the underlying meaning of the sentence in question. They allow us to -easily recover the answers to various questions, for example *whom did I see?*, -*who saw the man with glasses?*, and so on. - -SyntaxNet is a **transition-based** dependency parser [Nivre (2007)] -(http://www.mitpressjournals.org/doi/pdfplus/10.1162/coli.07-056-R1-07-027) that -constructs a parse incrementally. Like the tagger, it processes words -left-to-right. The words all start as unprocessed input, called the *buffer*. As -words are encountered they are put onto a *stack*. At each step, the parser can -do one of three things: - -1. **SHIFT:** Push another word onto the top of the stack, i.e. shifting one - token from the buffer to the stack. -1. **LEFT_ARC:** Pop the top two words from the stack. Attach the second to the - first, creating an arc pointing to the **left**. Push the **first** word - back on the stack. -1. **RIGHT_ARC:** Pop the top two words from the stack. Attach the second to - the first, creating an arc point to the **right**. Push the **second** word - back on the stack. - -At each step, we call the combination of the stack and the buffer the -*configuration* of the parser. For the left and right actions, we also assign a -dependency relation label to that arc. This process is visualized in the -following animation for a short sentence: - -![Animation](looping-parser.gif "Parsing in Action") - -Note that this parser is following a sequence of actions, called a -**derivation**, to produce a "gold" tree labeled by a linguist. We can use this -sequence of decisions to learn a classifier that takes a configuration and -predicts the next action to take. - -### Training a Parser Step 1: Local Pretraining - -As described in our [paper](http://arxiv.org/abs/1603.06042), the first -step in training the model is to *pre-train* using *local* decisions. In this -phase, we use the gold dependency to guide the parser, and train a softmax layer -to predict the correct action given these gold dependencies. This can be -performed very efficiently, since the parser's decisions are all independent in -this setting. - -Once the tagged datasets are available, a locally normalized dependency parsing -model can be trained with the following command: - -```shell -bazel-bin/syntaxnet/parser_trainer \ - --arg_prefix=brain_parser \ - --batch_size=32 \ - --projectivize_training_set \ - --decay_steps=4400 \ - --graph_builder=greedy \ - --hidden_layer_sizes=200,200 \ - --learning_rate=0.08 \ - --momentum=0.85 \ - --output_path=models \ - --task_context=models/brain_pos/greedy/$PARAMS/context \ - --seed=4 \ - --training_corpus=tagged-training-corpus \ - --tuning_corpus=tagged-tuning-corpus \ - --params=200x200-0.08-4400-0.85-4 -``` - -Note that we point the trainer to the context corresponding to the POS tagger -that we picked previously. This allows the parser to reuse the lexicons and the -tagged datasets that were created in the previous steps. Processing data can be -done similarly to how tagging was done above. For example if in this case we -picked parameters `200x200-0.08-4400-0.85-4`, the training, tuning and dev sets -can be parsed with the following command: - -```shell -PARAMS=200x200-0.08-4400-0.85-4 -for SET in training tuning dev; do - bazel-bin/syntaxnet/parser_eval \ - --task_context=models/brain_parser/greedy/$PARAMS/context \ - --hidden_layer_sizes=200,200 \ - --input=tagged-$SET-corpus \ - --output=parsed-$SET-corpus \ - --arg_prefix=brain_parser \ - --graph_builder=greedy \ - --model_path=models/brain_parser/greedy/$PARAMS/model -done -``` - -### Training a Parser Step 2: Global Training - -As we describe in the paper, there are several problems with the locally -normalized models we just trained. The most important is the *label-bias* -problem: the model doesn't learn what a good parse looks like, only what action -to take given a history of gold decisions. This is because the scores are -normalized *locally* using a softmax for each decision. - -In the paper, we show how we can achieve much better results using a *globally* -normalized model: in this model, the softmax scores are summed in log space, and -the scores are not normalized until we reach a final decision. When the parser -stops, the scores of each hypothesis are normalized against a small set of -possible parses (in the case of this model, a beam size of 8). When training, we -force the parser to stop during parsing when the gold derivation falls off the -beam (a strategy known as early-updates). - -We give a simplified view of how this training works for a [garden path -sentence](https://en.wikipedia.org/wiki/Garden_path_sentence), where it is -important to maintain multiple hypotheses. A single mistake early on in parsing -leads to a completely incorrect parse; after training, the model learns to -prefer the second (correct) parse. - -![Beam search training](beam_search_training.png) - -Parsey McParseface correctly parses this sentence. Even though the correct parse -is initially ranked 4th out of multiple hypotheses, when the end of the garden -path is reached, Parsey McParseface can recover due to the beam; using a larger -beam will get a more accurate model, but it will be slower (we used beam 32 for -the models in the paper). - -Once you have the pre-trained locally normalized model, a globally normalized -parsing model can now be trained with the following command: - -```shell -bazel-bin/syntaxnet/parser_trainer \ - --arg_prefix=brain_parser \ - --batch_size=8 \ - --decay_steps=100 \ - --graph_builder=structured \ - --hidden_layer_sizes=200,200 \ - --learning_rate=0.02 \ - --momentum=0.9 \ - --output_path=models \ - --task_context=models/brain_parser/greedy/$PARAMS/context \ - --seed=0 \ - --training_corpus=projectivized-training-corpus \ - --tuning_corpus=tagged-tuning-corpus \ - --params=200x200-0.02-100-0.9-0 \ - --pretrained_params=models/brain_parser/greedy/$PARAMS/model \ - --pretrained_params_names=\ -embedding_matrix_0,embedding_matrix_1,embedding_matrix_2,\ -bias_0,weights_0,bias_1,weights_1 -``` - -Training a beam model with the structured builder will take a lot longer than -the greedy training runs above, perhaps 3 or 4 times longer. Note once again -that multiple restarts of training will yield the most reliable results. -Evaluation can again be done with `parser_eval.py`. In this case we use -parameters `200x200-0.02-100-0.9-0` to evaluate on the training, tuning and dev -sets with the following command: - -```shell -PARAMS=200x200-0.02-100-0.9-0 -for SET in training tuning dev; do - bazel-bin/syntaxnet/parser_eval \ - --task_context=models/brain_parser/structured/$PARAMS/context \ - --hidden_layer_sizes=200,200 \ - --input=tagged-$SET-corpus \ - --output=beam-parsed-$SET-corpus \ - --arg_prefix=brain_parser \ - --graph_builder=structured \ - --model_path=models/brain_parser/structured/$PARAMS/model -done -``` - -Hooray! You now have your very own cousin of Parsey McParseface, ready to go out -and parse text in the wild. - ## Contact To ask questions or report issues please post on Stack Overflow with the tag -[syntaxnet](http://stackoverflow.com/questions/tagged/syntaxnet) -or open an issue on the tensorflow/models -[issues tracker](https://github.com/tensorflow/models/issues). -Please assign SyntaxNet issues to @calberti or @andorardo. +[syntaxnet](http://stackoverflow.com/questions/tagged/syntaxnet) or open an +issue on the tensorflow/models [issues +tracker](https://github.com/tensorflow/models/issues). Please assign SyntaxNet +issues to @calberti or @andorardo. ## Credits @@ -633,6 +295,7 @@ Original authors of the code in this package include (in alphabetical order): * Keith Hall * Kuzman Ganchev * Livio Baldini Soares +* Mark Omernick * Michael Collins * Michael Ringgaard * Ryan McDonald @@ -640,3 +303,4 @@ Original authors of the code in this package include (in alphabetical order): * Stefan Istrate * Terry Koo * Tim Credo +* Zora Tung diff --git a/syntaxnet/WORKSPACE b/syntaxnet/WORKSPACE index 8b5b22442e78e4c21116f6c84c07a4aeb6f1f871..f9b2ffd6238d48851b686500066aa354bcbb4c9f 100644 --- a/syntaxnet/WORKSPACE +++ b/syntaxnet/WORKSPACE @@ -3,10 +3,23 @@ local_repository( path = "tensorflow", ) +# We need to pull in @io_bazel_rules_closure for TensorFlow. Bazel design +# documentation states that this verbosity is intentional, to prevent +# TensorFlow/SyntaxNet from depending on different versions of +# @io_bazel_rules_closure. +http_archive( + name = "io_bazel_rules_closure", + sha256 = "60fc6977908f999b23ca65698c2bb70213403824a84f7904310b6000d78be9ce", + strip_prefix = "rules_closure-5ca1dab6df9ad02050f7ba4e816407f88690cf7d", + urls = [ + "http://bazel-mirror.storage.googleapis.com/github.com/bazelbuild/rules_closure/archive/5ca1dab6df9ad02050f7ba4e816407f88690cf7d.tar.gz", # 2017-02-03 + "https://github.com/bazelbuild/rules_closure/archive/5ca1dab6df9ad02050f7ba4e816407f88690cf7d.tar.gz", + ], +) + load("@org_tensorflow//tensorflow:workspace.bzl", "tf_workspace") tf_workspace(path_prefix="", tf_repo_name="org_tensorflow") # Test that Bazel is up-to-date. load("@org_tensorflow//tensorflow:workspace.bzl", "check_version") -check_version("0.4.3") - +check_version("0.4.2") diff --git a/syntaxnet/docker-devel/build_devel.sh b/syntaxnet/docker-devel/build_devel.sh new file mode 100755 index 0000000000000000000000000000000000000000..98425467fd70cb059b0c0c5afeda402050e6676a --- /dev/null +++ b/syntaxnet/docker-devel/build_devel.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# +# This file puts you in a Docker sub-shell where you can build SyntaxNet +# targets. It is intended for development, as the Dockerfile (build file) does +# not actually build any of SyntaxNet, but instead mounts it in a volume. + +script_path="$(readlink -f "$0")" +root_path="$(dirname "$(dirname "${script_path}")")" +set -e + +if [[ -z "$(docker images -q dragnn-oss)" ]]; then + docker build -t dragnn-oss . +else + echo "NOTE: dragnn-oss image already exists, not re-building." >&2 + echo "Please run \`docker build -t dragnn-oss .\` if you need." >&2 +fi + +echo -e "\n\nRun bazel commands like \`bazel test syntaxnet/...\`" + +# NOTE: Unfortunately, we need to mount /tensorflow over /syntaxnet/tensorflow +# (which happens via devel_entrypoint.sh). This requires privileged mode. +syntaxnet_base="/opt/tensorflow/syntaxnet" +docker run --rm -ti \ + -v "${root_path}"/syntaxnet:"${syntaxnet_base}"/syntaxnet \ + -v "${root_path}"/dragnn:"${syntaxnet_base}"/dragnn \ + -p 127.0.0.1:8888:8888 \ + dragnn-oss "$@" diff --git a/syntaxnet/docker-devel/build_wheels.sh b/syntaxnet/docker-devel/build_wheels.sh new file mode 100644 index 0000000000000000000000000000000000000000..a79063abed4d0df8c6efbcffb31eed88073c5fdb --- /dev/null +++ b/syntaxnet/docker-devel/build_wheels.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# +# Convenience script to build wheel files in Docker, and copy them out of the +# container. +# +# Usage: docker-devel/build_wheels.sh (takes no arguments; run it from the base +# directory). +set -e +docker build -t dragnn-oss . + +# Start building the wheels. +script="bazel run //dragnn/tools:build_pip_package \ + -- --output-dir=/opt/tensorflow/syntaxnet; \ + bazel run //dragnn/tools:build_pip_package \ + -- --output-dir=/opt/tensorflow/syntaxnet --include-tensorflow" +container_id="$(docker run -d dragnn-oss /bin/bash -c "${script}")" + +echo "Waiting for container ${container_id} to finish building the wheel ..." +if [[ "$(docker wait "${container_id}")" != 0 ]]; then + echo "Container failed! Please run \`docker logs \` to see errors." >&2 + exit 1 +fi + +# The build_pip_package.py script prints lines like "Wrote x.whl". The wheel +# names are prefixed by architecture and such, so don't guess them. +wheels=( + $(docker logs "${container_id}" 2>/dev/null | grep Wrote | awk '{print $2;}')) +for wheel in "${wheels[@]}"; do + output=./"$(basename "${wheel}")" + docker cp "${container_id}:${wheel}" "${output}" + echo "Wrote ${output} ($(du -h "${output}" | awk '{print $1;}'))" +done + +echo "Removing ${container_id} ..." +docker rm "${container_id}" >/dev/null diff --git a/syntaxnet/dragnn/BUILD b/syntaxnet/dragnn/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..3ac01eacdceb557c72f939a45c01127251b20f06 --- /dev/null +++ b/syntaxnet/dragnn/BUILD @@ -0,0 +1,5 @@ +package_group( + name = "dragnn_visibility", + packages = [ + ], +) diff --git a/syntaxnet/dragnn/components/syntaxnet/BUILD b/syntaxnet/dragnn/components/syntaxnet/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..abf415f20557dbda91c9842476c79e12670dd1cd --- /dev/null +++ b/syntaxnet/dragnn/components/syntaxnet/BUILD @@ -0,0 +1,116 @@ +package(default_visibility = ["//visibility:public"]) + +cc_library( + name = "syntaxnet_component", + srcs = ["syntaxnet_component.cc"], + hdrs = ["syntaxnet_component.h"], + deps = [ + ":syntaxnet_link_feature_extractor", + ":syntaxnet_transition_state", + "//dragnn/components/util:bulk_feature_extractor", + "//dragnn/core:beam", + "//dragnn/core:component_registry", + "//dragnn/core:input_batch_cache", + "//dragnn/core/interfaces:component", + "//dragnn/core/interfaces:transition_state", + "//dragnn/io:sentence_input_batch", + "//dragnn/io:syntaxnet_sentence", + "//dragnn/protos:data_proto", + "//dragnn/protos:spec_proto", + "//dragnn/protos:trace_proto", + "//syntaxnet:base", + "//syntaxnet:parser_transitions", + "//syntaxnet:registry", + "//syntaxnet:sparse_proto", + "//syntaxnet:task_context", + "//syntaxnet:task_spec_proto", + "//syntaxnet:utils", + "@org_tensorflow//tensorflow/core:lib", # For tf/core/platform/logging.h + ], + alwayslink = 1, +) + +cc_library( + name = "syntaxnet_link_feature_extractor", + srcs = ["syntaxnet_link_feature_extractor.cc"], + hdrs = ["syntaxnet_link_feature_extractor.h"], + deps = [ + "//dragnn/protos:spec_proto", + "//syntaxnet:embedding_feature_extractor", + "//syntaxnet:parser_transitions", + "//syntaxnet:task_context", + "@org_tensorflow//tensorflow/core:lib", # For tf/core/platform/logging.h + ], +) + +cc_library( + name = "syntaxnet_transition_state", + srcs = ["syntaxnet_transition_state.cc"], + hdrs = ["syntaxnet_transition_state.h"], + deps = [ + "//dragnn/core/interfaces:cloneable_transition_state", + "//dragnn/core/interfaces:transition_state", + "//dragnn/io:syntaxnet_sentence", + "//dragnn/protos:trace_proto", + "//syntaxnet:base", + "//syntaxnet:parser_transitions", + "@org_tensorflow//tensorflow/core:lib", # For tf/core/platform/logging.h + ], +) + +# Test data. +filegroup( + name = "testdata", + data = glob(["testdata/**"]), +) + +# Tests. +cc_test( + name = "syntaxnet_component_test", + srcs = ["syntaxnet_component_test.cc"], + data = [":testdata"], + deps = [ + ":syntaxnet_component", + "//dragnn/core:input_batch_cache", + "//dragnn/core/test:generic", + "//dragnn/core/test:mock_transition_state", + "//dragnn/io:sentence_input_batch", + "//syntaxnet:sentence_proto", + "@org_tensorflow//tensorflow/core:lib", + "@org_tensorflow//tensorflow/core:test", + ], +) + +cc_test( + name = "syntaxnet_link_feature_extractor_test", + srcs = ["syntaxnet_link_feature_extractor_test.cc"], + deps = [ + ":syntaxnet_link_feature_extractor", + "//dragnn/core/test:generic", + "//dragnn/protos:spec_proto", + "//syntaxnet:task_context", + "//syntaxnet:test_main", + "@org_tensorflow//tensorflow/core:test", + "@org_tensorflow//tensorflow/core:testlib", + ], +) + +cc_test( + name = "syntaxnet_transition_state_test", + srcs = ["syntaxnet_transition_state_test.cc"], + data = [":testdata"], + deps = [ + ":syntaxnet_component", + ":syntaxnet_transition_state", + "//dragnn/core:input_batch_cache", + "//dragnn/core/test:generic", + "//dragnn/core/test:mock_transition_state", + "//dragnn/io:sentence_input_batch", + "//dragnn/protos:spec_proto", + "//syntaxnet:sentence_proto", + "//syntaxnet:test_main", + "@org_tensorflow//tensorflow/core:lib", + "@org_tensorflow//tensorflow/core:test", + "@org_tensorflow//tensorflow/core:testlib", + ], +) diff --git a/syntaxnet/dragnn/components/syntaxnet/syntaxnet_component.cc b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_component.cc new file mode 100644 index 0000000000000000000000000000000000000000..0f2c1c4387c6fa6782bbb2159285afb892a9391a --- /dev/null +++ b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_component.cc @@ -0,0 +1,779 @@ +#include "dragnn/components/syntaxnet/syntaxnet_component.h" + +#include + +#include "dragnn/components/util/bulk_feature_extractor.h" +#include "dragnn/core/component_registry.h" +#include "dragnn/core/input_batch_cache.h" +#include "dragnn/core/interfaces/component.h" +#include "dragnn/core/interfaces/transition_state.h" +#include "dragnn/io/sentence_input_batch.h" +#include "dragnn/io/syntaxnet_sentence.h" +#include "syntaxnet/parser_state.h" +#include "syntaxnet/sparse.pb.h" +#include "syntaxnet/task_spec.pb.h" +#include "syntaxnet/utils.h" +#include "tensorflow/core/platform/logging.h" + +namespace syntaxnet { +namespace dragnn { + +using tensorflow::strings::StrCat; + +namespace { + +// Returns a new step in a trace based on a ComponentSpec. +ComponentStepTrace GetNewStepTrace(const ComponentSpec &spec, + const TransitionState &state) { + ComponentStepTrace step; + for (auto &linked_spec : spec.linked_feature()) { + auto &channel_trace = *step.add_linked_feature_trace(); + channel_trace.set_name(linked_spec.name()); + channel_trace.set_source_component(linked_spec.source_component()); + channel_trace.set_source_translator(linked_spec.source_translator()); + channel_trace.set_source_layer(linked_spec.source_layer()); + } + for (auto &fixed_spec : spec.fixed_feature()) { + step.add_fixed_feature_trace()->set_name(fixed_spec.name()); + } + step.set_html_representation(state.HTMLRepresentation()); + return step; +} + +// Returns the last step in the trace. +ComponentStepTrace *GetLastStepInTrace(ComponentTrace *trace) { + CHECK_GT(trace->step_trace_size(), 0) << "Trace has no steps added yet"; + return trace->mutable_step_trace(trace->step_trace_size() - 1); +} + +} // anonymous namespace + +SyntaxNetComponent::SyntaxNetComponent() + : feature_extractor_("brain_parser"), + rewrite_root_labels_(false), + max_beam_size_(1), + input_data_(nullptr) {} + +void SyntaxNetComponent::InitializeComponent(const ComponentSpec &spec) { + // Save off the passed spec for future reference. + spec_ = spec; + + // Create and populate a TaskContext for the underlying parser. + TaskContext context; + + // Add the specified resources. + for (const Resource &resource : spec_.resource()) { + auto *input = context.GetInput(resource.name()); + for (const Part &part : resource.part()) { + auto *input_part = input->add_part(); + input_part->set_file_pattern(part.file_pattern()); + input_part->set_file_format(part.file_format()); + input_part->set_record_format(part.record_format()); + } + } + + // Add the specified task args to the transition system. + for (const auto ¶m : spec_.transition_system().parameters()) { + context.SetParameter(param.first, param.second); + } + + // Set the arguments for the feature extractor. + std::vector names; + std::vector dims; + std::vector fml; + std::vector predicate_maps; + + for (const FixedFeatureChannel &channel : spec.fixed_feature()) { + names.push_back(channel.name()); + fml.push_back(channel.fml()); + predicate_maps.push_back(channel.predicate_map()); + dims.push_back(StrCat(channel.embedding_dim())); + } + + context.SetParameter("neurosis_feature_syntax_version", "2"); + context.SetParameter("brain_parser_embedding_dims", utils::Join(dims, ";")); + context.SetParameter("brain_parser_predicate_maps", + utils::Join(predicate_maps, ";")); + context.SetParameter("brain_parser_features", utils::Join(fml, ";")); + context.SetParameter("brain_parser_embedding_names", utils::Join(names, ";")); + + names.clear(); + dims.clear(); + fml.clear(); + predicate_maps.clear(); + + std::vector source_components; + std::vector source_layers; + std::vector source_translators; + + for (const LinkedFeatureChannel &channel : spec.linked_feature()) { + names.push_back(channel.name()); + fml.push_back(channel.fml()); + dims.push_back(StrCat(channel.embedding_dim())); + source_components.push_back(channel.source_component()); + source_layers.push_back(channel.source_layer()); + source_translators.push_back(channel.source_translator()); + predicate_maps.push_back("none"); + } + + context.SetParameter("link_embedding_dims", utils::Join(dims, ";")); + context.SetParameter("link_predicate_maps", utils::Join(predicate_maps, ";")); + context.SetParameter("link_features", utils::Join(fml, ";")); + context.SetParameter("link_embedding_names", utils::Join(names, ";")); + context.SetParameter("link_source_layers", utils::Join(source_layers, ";")); + context.SetParameter("link_source_translators", + utils::Join(source_translators, ";")); + context.SetParameter("link_source_components", + utils::Join(source_components, ";")); + + context.SetParameter("parser_transition_system", + spec.transition_system().registered_name()); + + // Set up the fixed feature extractor. + feature_extractor_.Setup(&context); + feature_extractor_.Init(&context); + feature_extractor_.RequestWorkspaces(&workspace_registry_); + + // Set up the underlying transition system. + transition_system_.reset(ParserTransitionSystem::Create( + context.Get("parser_transition_system", "arc-standard"))); + transition_system_->Setup(&context); + transition_system_->Init(&context); + + // Create label map. + string path = TaskContext::InputFile(*context.GetInput("label-map")); + label_map_ = + SharedStoreUtils::GetWithDefaultName(path, 0, 0); + + // Set up link feature extractors. + if (spec.linked_feature_size() > 0) { + link_feature_extractor_.Setup(&context); + link_feature_extractor_.Init(&context); + link_feature_extractor_.RequestWorkspaces(&workspace_registry_); + } + + // Get the legacy flag for simulating old parser processor behavior. If the + // flag is not set, default to 'false'. + rewrite_root_labels_ = context.Get("rewrite_root_labels", false); +} + +std::unique_ptr> SyntaxNetComponent::CreateBeam( + int max_size) { + std::unique_ptr> beam( + new Beam(max_size)); + auto permission_function = [this](SyntaxNetTransitionState *state, + int action) { + VLOG(3) << "permission_function action:" << action + << " is_allowed:" << this->IsAllowed(state, action); + return this->IsAllowed(state, action); + }; + auto finality_function = [this](SyntaxNetTransitionState *state) { + VLOG(2) << "finality_function is_final:" << this->IsFinal(state); + return this->IsFinal(state); + }; + auto oracle_function = [this](SyntaxNetTransitionState *state) { + VLOG(2) << "oracle_function action:" << this->GetOracleLabel(state); + return this->GetOracleLabel(state); + }; + auto beam_ptr = beam.get(); + auto advance_function = [this, beam_ptr](SyntaxNetTransitionState *state, + int action) { + VLOG(2) << "advance_function beam ptr:" << beam_ptr << " action:" << action; + this->Advance(state, action, beam_ptr); + }; + beam->SetFunctions(permission_function, finality_function, advance_function, + oracle_function); + + return beam; +} + +void SyntaxNetComponent::InitializeData( + const std::vector> &parent_states, + int max_beam_size, InputBatchCache *input_data) { + // Save off the input data object. + input_data_ = input_data; + + // If beam size has changed, change all beam sizes for existing beams. + if (max_beam_size_ != max_beam_size) { + CHECK_GT(max_beam_size, 0) + << "Requested max beam size must be greater than 0."; + VLOG(2) << "Adjusting max beam size from " << max_beam_size_ << " to " + << max_beam_size; + max_beam_size_ = max_beam_size; + for (auto &beam : batch_) { + beam->SetMaxSize(max_beam_size_); + } + } + + SentenceInputBatch *sentences = input_data->GetAs(); + + // Expect that the sentence data is the same size as the input states batch. + if (!parent_states.empty()) { + CHECK_EQ(parent_states.size(), sentences->data()->size()); + } + + // Adjust the beam vector so that it is the correct size for this batch. + if (batch_.size() < sentences->data()->size()) { + VLOG(1) << "Batch size is increased to " << sentences->data()->size() + << " from " << batch_.size(); + for (int i = batch_.size(); i < sentences->data()->size(); ++i) { + batch_.push_back(CreateBeam(max_beam_size)); + } + } else if (batch_.size() > sentences->data()->size()) { + VLOG(1) << "Batch size is decreased to " << sentences->data()->size() + << " from " << batch_.size(); + batch_.erase(batch_.begin() + sentences->data()->size(), batch_.end()); + + } else { + VLOG(1) << "Batch size is constant at " << sentences->data()->size(); + } + CHECK_EQ(batch_.size(), sentences->data()->size()); + + // Fill the beams with the relevant data for that batch. + for (int batch_index = 0; batch_index < sentences->data()->size(); + ++batch_index) { + // Create a vector of states for this component's beam. + std::vector> initial_states; + if (parent_states.empty()) { + // If no states have been passed in, create a single state to seed the + // beam. + initial_states.push_back( + CreateState(&(sentences->data()->at(batch_index)))); + } else { + // If states have been passed in, seed the beam with them up to the max + // beam size. + int num_states = + std::min(batch_.at(batch_index)->max_size(), + static_cast(parent_states.at(batch_index).size())); + VLOG(2) << "Creating a beam using " << num_states << " initial states"; + for (int i = 0; i < num_states; ++i) { + std::unique_ptr state( + CreateState(&(sentences->data()->at(batch_index)))); + state->Init(*parent_states.at(batch_index).at(i)); + initial_states.push_back(std::move(state)); + } + } + batch_.at(batch_index)->Init(std::move(initial_states)); + } +} + +bool SyntaxNetComponent::IsReady() const { return input_data_ != nullptr; } + +string SyntaxNetComponent::Name() const { + return "SyntaxNet-backed beam parser"; +} + +int SyntaxNetComponent::BatchSize() const { return batch_.size(); } + +int SyntaxNetComponent::BeamSize() const { return max_beam_size_; } + +int SyntaxNetComponent::StepsTaken(int batch_index) const { + return batch_.at(batch_index)->num_steps(); +} + +int SyntaxNetComponent::GetBeamIndexAtStep(int step, int current_index, + int batch) const { + return batch_.at(batch)->FindPreviousIndex(current_index, step); +} + +int SyntaxNetComponent::GetSourceBeamIndex(int current_index, int batch) const { + return batch_.at(batch)->FindPreviousIndex(current_index, 0); +} + +std::function SyntaxNetComponent::GetStepLookupFunction( + const string &method) { + if (method == "shift-reduce-step") { + // TODO(googleuser): Describe this function. + return [this](int batch_index, int beam_index, int value) { + SyntaxNetTransitionState *state = + batch_.at(batch_index)->beam_state(beam_index); + return state->step_for_token(value); + }; + } else if (method == "reduce-step") { + // TODO(googleuser): Describe this function. + return [this](int batch_index, int beam_index, int value) { + SyntaxNetTransitionState *state = + batch_.at(batch_index)->beam_state(beam_index); + return state->parent_step_for_token(value); + }; + } else if (method == "parent-shift-reduce-step") { + // TODO(googleuser): Describe this function. + return [this](int batch_index, int beam_index, int value) { + SyntaxNetTransitionState *state = + batch_.at(batch_index)->beam_state(beam_index); + return state->step_for_token(state->parent_step_for_token(value)); + }; + } else if (method == "reverse-token") { + // TODO(googleuser): Describe this function. + return [this](int batch_index, int beam_index, int value) { + SyntaxNetTransitionState *state = + batch_.at(batch_index)->beam_state(beam_index); + int result = state->sentence()->sentence()->token_size() - value - 1; + if (result >= 0 && result < state->sentence()->sentence()->token_size()) { + return result; + } else { + return -1; + } + }; + } else { + LOG(FATAL) << "Unable to find step lookup function " << method; + } +} + +void SyntaxNetComponent::AdvanceFromPrediction(const float transition_matrix[], + int transition_matrix_length) { + VLOG(2) << "Advancing from prediction."; + int matrix_index = 0; + int num_labels = transition_system_->NumActions(label_map_->Size()); + for (int i = 0; i < batch_.size(); ++i) { + int max_beam_size = batch_.at(i)->max_size(); + int matrix_size = num_labels * max_beam_size; + CHECK_LE(matrix_index + matrix_size, transition_matrix_length); + if (!batch_.at(i)->IsTerminal()) { + batch_.at(i)->AdvanceFromPrediction(&transition_matrix[matrix_index], + matrix_size, num_labels); + } + matrix_index += num_labels * max_beam_size; + } +} + +void SyntaxNetComponent::AdvanceFromOracle() { + VLOG(2) << "Advancing from oracle."; + for (auto &beam : batch_) { + beam->AdvanceFromOracle(); + } +} + +bool SyntaxNetComponent::IsTerminal() const { + VLOG(2) << "Checking terminal status."; + for (const auto &beam : batch_) { + if (!beam->IsTerminal()) { + return false; + } + } + return true; +} + +std::vector> +SyntaxNetComponent::GetBeam() { + std::vector> state_beam; + for (auto &beam : batch_) { + // Because this component only finalizes the data of the highest ranked + // component in each beam, the next component should only be initialized + // from the highest ranked component in that beam. + state_beam.push_back({beam->beam().at(0)}); + } + return state_beam; +} + +int SyntaxNetComponent::GetFixedFeatures( + std::function allocate_indices, + std::function allocate_ids, + std::function allocate_weights, int channel_id) const { + std::vector features; + + const int channel_size = spec_.fixed_feature(channel_id).size(); + + // For every beam in the batch... + for (const auto &beam : batch_) { + // For every element in the beam... + for (int beam_idx = 0; beam_idx < beam->size(); ++beam_idx) { + // Get the SparseFeatures from the feature extractor. + auto state = beam->beam_state(beam_idx); + const std::vector> sparse_features = + feature_extractor_.ExtractSparseFeatures( + *(state->sentence()->workspace()), *(state->parser_state())); + + // Hold the SparseFeatures for later processing. + for (const SparseFeatures &f : sparse_features[channel_id]) { + features.emplace_back(f); + if (do_tracing_) { + FixedFeatures fixed_features; + for (const string &name : f.description()) { + fixed_features.add_value_name(name); + } + fixed_features.set_feature_name(""); + auto *trace = GetLastStepInTrace(state->mutable_trace()); + auto *fixed_trace = trace->mutable_fixed_feature_trace(channel_id); + *fixed_trace->add_value_trace() = fixed_features; + } + } + } + const int pad_amount = max_beam_size_ - beam->size(); + features.resize(features.size() + pad_amount * channel_size); + } + + int feature_count = 0; + for (const auto &feature : features) { + feature_count += feature.id_size(); + } + + VLOG(2) << "Feature count is " << feature_count; + int32 *indices_tensor = allocate_indices(feature_count); + int64 *ids_tensor = allocate_ids(feature_count); + float *weights_tensor = allocate_weights(feature_count); + + int array_index = 0; + for (int feature_index = 0; feature_index < features.size(); + ++feature_index) { + VLOG(2) << "Extracting for feature_index " << feature_index; + const auto feature = features[feature_index]; + for (int sub_idx = 0; sub_idx < feature.id_size(); ++sub_idx) { + indices_tensor[array_index] = feature_index; + ids_tensor[array_index] = feature.id(sub_idx); + if (sub_idx < feature.weight_size()) { + weights_tensor[array_index] = feature.weight(sub_idx); + } else { + weights_tensor[array_index] = 1.0; + } + VLOG(2) << "Feature index: " << indices_tensor[array_index] + << " id: " << ids_tensor[array_index] + << " weight: " << weights_tensor[array_index]; + + ++array_index; + } + } + return feature_count; +} + +int SyntaxNetComponent::BulkGetFixedFeatures( + const BulkFeatureExtractor &extractor) { + // Allocate a vector of SparseFeatures per channel. + const int num_channels = spec_.fixed_feature_size(); + std::vector channel_size(num_channels); + for (int i = 0; i < num_channels; ++i) { + channel_size[i] = spec_.fixed_feature(i).size(); + } + std::vector> features(num_channels); + std::vector> feature_indices(num_channels); + std::vector> step_indices(num_channels); + std::vector> element_indices(num_channels); + std::vector feature_counts(num_channels); + int step_count = 0; + + while (!IsTerminal()) { + int current_element = 0; + + // For every beam in the batch... + for (const auto &beam : batch_) { + // For every element in the beam... + for (int beam_idx = 0; beam_idx < beam->size(); ++beam_idx) { + // Get the SparseFeatures from the parser. + auto state = beam->beam_state(beam_idx); + const std::vector> sparse_features = + feature_extractor_.ExtractSparseFeatures( + *(state->sentence()->workspace()), *(state->parser_state())); + + for (int channel_id = 0; channel_id < num_channels; ++channel_id) { + int feature_count = 0; + for (const SparseFeatures &f : sparse_features[channel_id]) { + // Trace, if requested. + if (do_tracing_) { + FixedFeatures fixed_features; + for (const string &name : f.description()) { + fixed_features.add_value_name(name); + } + fixed_features.set_feature_name(""); + auto *trace = GetLastStepInTrace(state->mutable_trace()); + auto *fixed_trace = + trace->mutable_fixed_feature_trace(channel_id); + *fixed_trace->add_value_trace() = fixed_features; + } + + // Hold the SparseFeatures for later processing. + features[channel_id].emplace_back(f); + element_indices[channel_id].emplace_back(current_element); + step_indices[channel_id].emplace_back(step_count); + feature_indices[channel_id].emplace_back(feature_count); + feature_counts[channel_id] += f.id_size(); + ++feature_count; + } + } + ++current_element; + } + + // Advance the current element to skip unused beam slots. + // Pad the beam out to max_beam_size. + int pad_amount = max_beam_size_ - beam->size(); + current_element += pad_amount; + } + AdvanceFromOracle(); + ++step_count; + } + + const int total_steps = step_count; + const int num_elements = batch_.size() * max_beam_size_; + + // This would be a good place to add threading. + for (int channel_id = 0; channel_id < num_channels; ++channel_id) { + int feature_count = feature_counts[channel_id]; + LOG(INFO) << "Feature count is " << feature_count << " for channel " + << channel_id; + int32 *indices_tensor = + extractor.AllocateIndexMemory(channel_id, feature_count); + int64 *ids_tensor = extractor.AllocateIdMemory(channel_id, feature_count); + float *weights_tensor = + extractor.AllocateWeightMemory(channel_id, feature_count); + int array_index = 0; + for (int feat_idx = 0; feat_idx < features[channel_id].size(); ++feat_idx) { + const auto &feature = features[channel_id][feat_idx]; + int element_index = element_indices[channel_id][feat_idx]; + int step_index = step_indices[channel_id][feat_idx]; + int feature_index = feature_indices[channel_id][feat_idx]; + for (int sub_idx = 0; sub_idx < feature.id_size(); ++sub_idx) { + indices_tensor[array_index] = + extractor.GetIndex(total_steps, num_elements, feature_index, + element_index, step_index); + ids_tensor[array_index] = feature.id(sub_idx); + if (sub_idx < feature.weight_size()) { + weights_tensor[array_index] = feature.weight(sub_idx); + } else { + weights_tensor[array_index] = 1.0; + } + ++array_index; + } + } + } + return step_count; +} + +std::vector SyntaxNetComponent::GetRawLinkFeatures( + int channel_id) const { + std::vector features; + const int channel_size = spec_.linked_feature(channel_id).size(); + std::unique_ptr> feature_names; + if (do_tracing_) { + feature_names.reset(new std::vector); + *feature_names = utils::Split(spec_.linked_feature(channel_id).fml(), ' '); + } + + // For every beam in the batch... + for (int batch_idx = 0; batch_idx < batch_.size(); ++batch_idx) { + // For every element in the beam... + const auto &beam = batch_[batch_idx]; + for (int beam_idx = 0; beam_idx < beam->size(); ++beam_idx) { + // Get the raw link features from the linked feature extractor. + auto state = beam->beam_state(beam_idx); + std::vector raw_features( + link_feature_extractor_.NumEmbeddings()); + link_feature_extractor_.ExtractFeatures(*(state->sentence()->workspace()), + *(state->parser_state()), + &raw_features); + + // Add the raw feature values to the LinkFeatures proto. + CHECK_LT(channel_id, raw_features.size()); + for (int i = 0; i < raw_features[channel_id].size(); ++i) { + features.emplace_back(); + features.back().set_feature_value(raw_features[channel_id].value(i)); + features.back().set_batch_idx(batch_idx); + features.back().set_beam_idx(beam_idx); + if (do_tracing_) { + features.back().set_feature_name(feature_names->at(i)); + } + } + } + + // Pad the beam out to max_beam_size. + int pad_amount = max_beam_size_ - beam->size(); + features.resize(features.size() + pad_amount * channel_size); + } + + return features; +} + +std::vector> SyntaxNetComponent::GetOracleLabels() const { + std::vector> oracle_labels; + for (const auto &beam : batch_) { + oracle_labels.emplace_back(); + for (int beam_idx = 0; beam_idx < beam->size(); ++beam_idx) { + // Get the raw link features from the linked feature extractor. + auto state = beam->beam_state(beam_idx); + oracle_labels.back().push_back(GetOracleLabel(state)); + } + } + return oracle_labels; +} + +void SyntaxNetComponent::FinalizeData() { + // This chooses the top-scoring member of the beam to annotate the underlying + // document. + VLOG(2) << "Finalizing data."; + for (auto &beam : batch_) { + if (beam->size() != 0) { + auto top_state = beam->beam_state(0); + VLOG(3) << "Finalizing for sentence: " + << top_state->sentence()->sentence()->ShortDebugString(); + top_state->parser_state()->AddParseToDocument( + top_state->sentence()->sentence(), rewrite_root_labels_); + VLOG(3) << "Sentence is now: " + << top_state->sentence()->sentence()->ShortDebugString(); + } else { + LOG(WARNING) << "Attempting to finalize an empty beam for component " + << spec_.name(); + } + } +} + +void SyntaxNetComponent::ResetComponent() { + for (auto &beam : batch_) { + beam->Reset(); + } + input_data_ = nullptr; + max_beam_size_ = 0; +} + +std::unique_ptr SyntaxNetComponent::CreateState( + SyntaxNetSentence *sentence) { + VLOG(3) << "Creating state for sentence " + << sentence->sentence()->DebugString(); + std::unique_ptr parser_state(new ParserState( + sentence->sentence(), transition_system_->NewTransitionState(false), + label_map_)); + sentence->workspace()->Reset(workspace_registry_); + feature_extractor_.Preprocess(sentence->workspace(), parser_state.get()); + link_feature_extractor_.Preprocess(sentence->workspace(), parser_state.get()); + std::unique_ptr transition_state( + new SyntaxNetTransitionState(std::move(parser_state), sentence)); + return transition_state; +} + +bool SyntaxNetComponent::IsAllowed(SyntaxNetTransitionState *state, + int action) const { + return transition_system_->IsAllowedAction(action, *(state->parser_state())); +} + +bool SyntaxNetComponent::IsFinal(SyntaxNetTransitionState *state) const { + return transition_system_->IsFinalState(*(state->parser_state())); +} + +int SyntaxNetComponent::GetOracleLabel(SyntaxNetTransitionState *state) const { + if (IsFinal(state)) { + // It is not permitted to request an oracle label from a sentence that is + // in a final state. + return -1; + } else { + return transition_system_->GetNextGoldAction(*(state->parser_state())); + } +} + +void SyntaxNetComponent::Advance(SyntaxNetTransitionState *state, int action, + Beam *beam) { + auto parser_state = state->parser_state(); + auto sentence_size = state->sentence()->sentence()->token_size(); + const int num_steps = beam->num_steps(); + + if (transition_system_->SupportsActionMetaData()) { + const int parent_idx = + transition_system_->ParentIndex(*parser_state, action); + constexpr int kShiftAction = -1; + if (parent_idx == kShiftAction) { + if (parser_state->Next() < sentence_size && parser_state->Next() >= 0) { + // if we have already consumed all the input then it is not a shift + // action. We just skip it. + state->set_step_for_token(parser_state->Next(), num_steps); + } + } else if (parent_idx >= 0) { + VLOG(2) << spec_.name() << ": Updating pointer: " << parent_idx << " -> " + << num_steps; + state->set_step_for_token(parent_idx, num_steps); + const int child_idx = + transition_system_->ChildIndex(*parser_state, action); + assert(child_idx >= 0 && child_idx < sentence_size); + state->set_parent_for_token(child_idx, parent_idx); + + VLOG(2) << spec_.name() << ": Updating parent for child: " << parent_idx + << " -> " << child_idx; + state->set_parent_step_for_token(child_idx, num_steps); + } else { + VLOG(2) << spec_.name() << ": Invalid parent index: " << parent_idx; + } + } + if (do_tracing_) { + auto *trace = state->mutable_trace(); + auto *last_step = GetLastStepInTrace(trace); + + // Add action to the prior step. + last_step->set_caption( + transition_system_->ActionAsString(action, *parser_state)); + last_step->set_step_finished(true); + } + + transition_system_->PerformAction(action, parser_state); + + if (do_tracing_) { + // Add info for the next step. + *state->mutable_trace()->add_step_trace() = GetNewStepTrace(spec_, *state); + } +} + +void SyntaxNetComponent::InitializeTracing() { + do_tracing_ = true; + CHECK(IsReady()) << "Cannot initialize trace before InitializeData()."; + + // Initialize each element of the beam with a new trace. + for (auto &beam : batch_) { + for (int beam_idx = 0; beam_idx < beam->size(); ++beam_idx) { + SyntaxNetTransitionState *state = beam->beam_state(beam_idx); + std::unique_ptr trace(new ComponentTrace()); + trace->set_name(spec_.name()); + *trace->add_step_trace() = GetNewStepTrace(spec_, *state); + state->set_trace(std::move(trace)); + } + } + + feature_extractor_.set_add_strings(true); +} + +void SyntaxNetComponent::DisableTracing() { + do_tracing_ = false; + feature_extractor_.set_add_strings(false); +} + +void SyntaxNetComponent::AddTranslatedLinkFeaturesToTrace( + const std::vector &features, int channel_id) { + CHECK(do_tracing_) << "Tracing is not enabled."; + int linear_idx = 0; + const int channel_size = spec_.linked_feature(channel_id).size(); + + // For every beam in the batch... + for (const auto &beam : batch_) { + // For every element in the beam... + for (int beam_idx = 0; beam_idx < max_beam_size_; ++beam_idx) { + for (int feature_idx = 0; feature_idx < channel_size; ++feature_idx) { + if (beam_idx < beam->size()) { + auto state = beam->beam_state(beam_idx); + auto *trace = GetLastStepInTrace(state->mutable_trace()); + auto *link_trace = trace->mutable_linked_feature_trace(channel_id); + if (features[linear_idx].feature_value() >= 0 && + features[linear_idx].step_idx() >= 0) { + *link_trace->add_value_trace() = features[linear_idx]; + } + } + ++linear_idx; + } + } + } +} + +std::vector> SyntaxNetComponent::GetTraceProtos() + const { + std::vector> traces; + + // For every beam in the batch... + for (const auto &beam : batch_) { + std::vector beam_trace; + + // For every element in the beam... + for (int beam_idx = 0; beam_idx < beam->size(); ++beam_idx) { + auto state = beam->beam_state(beam_idx); + beam_trace.push_back(*state->mutable_trace()); + } + traces.push_back(beam_trace); + } + return traces; +}; + +REGISTER_DRAGNN_COMPONENT(SyntaxNetComponent); + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/components/syntaxnet/syntaxnet_component.h b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_component.h new file mode 100644 index 0000000000000000000000000000000000000000..d404e9825de4abae92fbdbada5d1dd4a1f78698c --- /dev/null +++ b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_component.h @@ -0,0 +1,183 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_COMPONENT_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_COMPONENT_H_ + +#include + +#include "dragnn/components/syntaxnet/syntaxnet_link_feature_extractor.h" +#include "dragnn/components/syntaxnet/syntaxnet_transition_state.h" +#include "dragnn/components/util/bulk_feature_extractor.h" +#include "dragnn/core/beam.h" +#include "dragnn/core/input_batch_cache.h" +#include "dragnn/core/interfaces/component.h" +#include "dragnn/core/interfaces/transition_state.h" +#include "dragnn/protos/data.pb.h" +#include "dragnn/protos/spec.pb.h" +#include "dragnn/protos/trace.pb.h" +#include "syntaxnet/base.h" +#include "syntaxnet/parser_transitions.h" +#include "syntaxnet/registry.h" +#include "syntaxnet/task_context.h" + +namespace syntaxnet { +namespace dragnn { + +class SyntaxNetComponent : public Component { + public: + // Create a SyntaxNet-backed DRAGNN component. + SyntaxNetComponent(); + + // Initializes this component from the spec. + void InitializeComponent(const ComponentSpec &spec) override; + + // Provides the previous beam to the component. + void InitializeData( + const std::vector> &states, + int max_beam_size, InputBatchCache *input_data) override; + + // Returns true if the component has had InitializeData called on it since + // the last time it was reset. + bool IsReady() const override; + + // Returns the string name of this component. + string Name() const override; + + // Returns the number of steps taken by the given batch in this component. + int StepsTaken(int batch_index) const override; + + // Returns the current batch size of the component's underlying data. + int BatchSize() const override; + + // Returns the maximum beam size of this component. + int BeamSize() const override; + + // Return the beam index of the item which is currently at index + // 'index', when the beam was at step 'step', for batch element 'batch'. + int GetBeamIndexAtStep(int step, int current_index, int batch) const override; + + // Return the source index of the item which is currently at index 'index' + // for batch element 'batch'. This index is into the final beam of the + // Component that this Component was initialized from. + int GetSourceBeamIndex(int current_index, int batch) const override; + + // Request a translation function based on the given method string. + // The translation function will be called with arguments (batch, beam, value) + // and should return the step index corresponding to the given value, for the + // data in the given beam and batch. + std::function GetStepLookupFunction( + const string &method) override; + + // Advances this component from the given transition matrix. + void AdvanceFromPrediction(const float transition_matrix[], + int transition_matrix_length) override; + + // Advances this component from the state oracles. + void AdvanceFromOracle() override; + + // Returns true if all states within this component are terminal. + bool IsTerminal() const override; + + // Returns the current batch of beams for this component. + std::vector> GetBeam() override; + + // Extracts and populates the vector of FixedFeatures for the specified + // channel. + int GetFixedFeatures(std::function allocate_indices, + std::function allocate_ids, + std::function allocate_weights, + int channel_id) const override; + + // Extracts and populates all FixedFeatures for all channels, advancing this + // component via the oracle until it is terminal. + int BulkGetFixedFeatures(const BulkFeatureExtractor &extractor) override; + + // Extracts and returns the vector of LinkFeatures for the specified + // channel. Note: these are NOT translated. + std::vector GetRawLinkFeatures(int channel_id) const override; + + // Returns a vector of oracle labels for each element in the beam and + // batch. + std::vector> GetOracleLabels() const override; + + // Annotate the underlying data object with the results of this Component's + // calculation. + void FinalizeData() override; + + // Reset this component. + void ResetComponent() override; + + // Initializes the component for tracing execution. This will typically have + // the side effect of slowing down all subsequent Component calculations + // and storing a trace in memory that can be returned by GetTraceProtos(). + void InitializeTracing() override; + + // Disables tracing, freeing any additional memory and avoiding triggering + // additional computation in the future. + void DisableTracing() override; + + std::vector> GetTraceProtos() const override; + + void AddTranslatedLinkFeaturesToTrace( + const std::vector &features, int channel_id) override; + + private: + friend class SyntaxNetComponentTest; + friend class SyntaxNetTransitionStateTest; + + // Permission function for this component. + bool IsAllowed(SyntaxNetTransitionState *state, int action) const; + + // Returns true if this state is final + bool IsFinal(SyntaxNetTransitionState *state) const; + + // Oracle function for this component. + int GetOracleLabel(SyntaxNetTransitionState *state) const; + + // State advance function for this component. + void Advance(SyntaxNetTransitionState *state, int action, + Beam *beam); + + // Creates a new state for the given nlp_saft::SentenceExample. + std::unique_ptr CreateState( + SyntaxNetSentence *example); + + // Creates a newly initialized Beam. + std::unique_ptr> CreateBeam(int max_size); + + // Transition system. + std::unique_ptr transition_system_; + + // Label map for transition system. + const TermFrequencyMap *label_map_; + + // Extractor for fixed features + ParserEmbeddingFeatureExtractor feature_extractor_; + + // Extractor for linked features. + SyntaxNetLinkFeatureExtractor link_feature_extractor_; + + // Internal workspace registry for use in feature extraction. + WorkspaceRegistry workspace_registry_; + + // Switch for simulating legacy parser behaviour. + bool rewrite_root_labels_; + + // The ComponentSpec used to initialize this component. + ComponentSpec spec_; + + // State search beams + std::vector>> batch_; + + // Current max beam size. + int max_beam_size_; + + // Underlying input data. + InputBatchCache *input_data_; + + // Whether or not to trace for each batch and beam element. + bool do_tracing_ = false; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_COMPONENT_H_ diff --git a/syntaxnet/dragnn/components/syntaxnet/syntaxnet_component_test.cc b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_component_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..8f3da521ef8ecb529729541a9fc45fe87a675bac --- /dev/null +++ b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_component_test.cc @@ -0,0 +1,1174 @@ +#include "dragnn/components/syntaxnet/syntaxnet_component.h" + +#include "dragnn/core/input_batch_cache.h" +#include "dragnn/core/test/generic.h" +#include "dragnn/core/test/mock_transition_state.h" +#include "dragnn/io/sentence_input_batch.h" +#include "syntaxnet/sentence.pb.h" +#include "tensorflow/core/lib/core/errors.h" +#include "tensorflow/core/lib/core/status.h" +#include "tensorflow/core/lib/io/path.h" +#include "tensorflow/core/platform/env.h" +#include "tensorflow/core/platform/protobuf.h" +#include "tensorflow/core/platform/test.h" + +// This test suite is intended to validate the contracts that the DRAGNN +// system expects from all transition state subclasses. Developers creating +// new TransitionStates should copy this test and modify it as necessary, +// using it to ensure their state conforms to DRAGNN expectations. + +namespace syntaxnet { +namespace dragnn { + +namespace { + +const char kSentence0[] = R"( +token { + word: "Sentence" start: 0 end: 7 tag: "NN" category: "NOUN" label: "ROOT" + break_level: NO_BREAK +} +token { + word: "0" start: 9 end: 9 head: 0 tag: "CD" category: "NUM" label: "num" + break_level: SPACE_BREAK +} +token { + word: "." start: 10 end: 10 head: 0 tag: "." category: "." label: "punct" + break_level: NO_BREAK +} +)"; + +const char kSentence1[] = R"( +token { + word: "Sentence" start: 0 end: 7 tag: "NN" category: "NOUN" label: "ROOT" + break_level: NO_BREAK +} +token { + word: "1" start: 9 end: 9 head: 0 tag: "CD" category: "NUM" label: "num" + break_level: SPACE_BREAK +} +token { + word: "." start: 10 end: 10 head: 0 tag: "." category: "." label: "punct" + break_level: NO_BREAK +} +)"; + +const char kLongSentence[] = R"( +token { + word: "Sentence" start: 0 end: 7 tag: "NN" category: "NOUN" label: "ROOT" + break_level: NO_BREAK +} +token { + word: "1" start: 9 end: 9 head: 0 tag: "CD" category: "NUM" label: "num" + break_level: SPACE_BREAK +} +token { + word: "2" start: 10 end: 10 head: 0 tag: "CD" category: "NUM" label: "num" + break_level: SPACE_BREAK +} +token { + word: "3" start: 11 end: 11 head: 0 tag: "CD" category: "NUM" label: "num" + break_level: SPACE_BREAK +} +token { + word: "." start: 12 end: 12 head: 0 tag: "." category: "." label: "punct" + break_level: NO_BREAK +} +)"; + +} // namespace + +using testing::Return; + +class SyntaxNetComponentTest : public ::testing::Test { + public: + std::unique_ptr CreateParser( + const std::vector> &states, + const std::vector &data) { + constexpr int kBeamSize = 2; + return CreateParserWithBeamSize(kBeamSize, states, data); + } + std::unique_ptr CreateParserWithBeamSize( + int beam_size, + const std::vector> &states, + const std::vector &data) { + // Get the master spec proto from the test data directory. + MasterSpec master_spec; + string file_name = tensorflow::io::JoinPath( + test::GetTestDataPrefix(), "dragnn/components/syntaxnet/testdata", + "master_spec.textproto"); + TF_CHECK_OK(tensorflow::ReadTextProto(tensorflow::Env::Default(), file_name, + &master_spec)); + + // Get all the resource protos from the test data directory. + for (Resource &resource : + *(master_spec.mutable_component(0)->mutable_resource())) { + resource.mutable_part(0)->set_file_pattern(tensorflow::io::JoinPath( + test::GetTestDataPrefix(), "dragnn/components/syntaxnet/testdata", + resource.part(0).file_pattern())); + } + + data_.reset(new InputBatchCache(data)); + + // Create a parser component with the specified beam size. + std::unique_ptr parser_component( + new SyntaxNetComponent()); + parser_component->InitializeComponent(*(master_spec.mutable_component(0))); + parser_component->InitializeData(states, beam_size, data_.get()); + return parser_component; + } + + const std::vector *> GetBeams( + SyntaxNetComponent *component) const { + std::vector *> return_vector; + for (const auto &beam : component->batch_) { + return_vector.push_back(beam.get()); + } + return return_vector; + } + + std::unique_ptr data_; +}; + +TEST_F(SyntaxNetComponentTest, AdvancesFromOracleAndTerminates) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + auto test_parser = CreateParser({}, {sentence_0_str}); + constexpr int kNumTokensInSentence = 3; + + // The master spec will initialize a parser, so expect 2*N transitions. + constexpr int kExpectedNumTransitions = kNumTokensInSentence * 2; + for (int i = 0; i < kExpectedNumTransitions; ++i) { + EXPECT_FALSE(test_parser->IsTerminal()); + test_parser->AdvanceFromOracle(); + } + + // At this point, the test parser should be terminal. + EXPECT_TRUE(test_parser->IsTerminal()); + + // Check that the component is reporting 2N steps taken. + EXPECT_EQ(test_parser->StepsTaken(0), kExpectedNumTransitions); + + // Make sure the parser doesn't segfault. + test_parser->FinalizeData(); +} + +TEST_F(SyntaxNetComponentTest, AdvancesFromPredictionAndTerminates) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + auto test_parser = CreateParser({}, {sentence_0_str}); + constexpr int kNumTokensInSentence = 3; + + // The master spec will initialize a parser, so expect 2*N transitions. + constexpr int kExpectedNumTransitions = kNumTokensInSentence * 2; + + // There are 93 possible transitions for any given state. Create a transition + // array with a score of 10.0 for each transition. + constexpr int kBeamSize = 2; + constexpr int kNumPossibleTransitions = 93; + constexpr float kTransitionValue = 10.0; + float transition_matrix[kNumPossibleTransitions * kBeamSize]; + for (int i = 0; i < kNumPossibleTransitions * kBeamSize; ++i) { + transition_matrix[i] = kTransitionValue; + } + + // Transition the expected number of times. + for (int i = 0; i < kExpectedNumTransitions; ++i) { + EXPECT_FALSE(test_parser->IsTerminal()); + test_parser->AdvanceFromPrediction(transition_matrix, + kNumPossibleTransitions * kBeamSize); + } + + // At this point, the test parser should be terminal. + EXPECT_TRUE(test_parser->IsTerminal()); + + // Check that the component is reporting 2N steps taken. + EXPECT_EQ(test_parser->StepsTaken(0), kExpectedNumTransitions); + + // Prepare to validate the batched beams. + auto beam = test_parser->GetBeam(); + + // All beams should only have one element. + for (const auto &per_beam : beam) { + EXPECT_EQ(per_beam.size(), 1); + } + + // The final states should have kExpectedNumTransitions * kTransitionValue. + EXPECT_EQ(beam.at(0).at(0)->GetScore(), + kTransitionValue * kExpectedNumTransitions); + + // Make sure the parser doesn't segfault. + test_parser->FinalizeData(); + + // TODO(googleuser): What should the finalized data look like? +} + +TEST_F(SyntaxNetComponentTest, RetainsPassedTransitionStateData) { + // Create and initialize the state-> + MockTransitionState mock_state_one; + constexpr int kParentBeamIndexOne = 1138; + constexpr float kParentScoreOne = 7.2; + EXPECT_CALL(mock_state_one, GetBeamIndex()) + .WillRepeatedly(Return(kParentBeamIndexOne)); + EXPECT_CALL(mock_state_one, GetScore()) + .WillRepeatedly(Return(kParentScoreOne)); + + MockTransitionState mock_state_two; + constexpr int kParentBeamIndexTwo = 1123; + constexpr float kParentScoreTwo = 42.03; + EXPECT_CALL(mock_state_two, GetBeamIndex()) + .WillRepeatedly(Return(kParentBeamIndexTwo)); + EXPECT_CALL(mock_state_two, GetScore()) + .WillRepeatedly(Return(kParentScoreTwo)); + + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + auto test_parser = + CreateParser({{&mock_state_one, &mock_state_two}}, {sentence_0_str}); + constexpr int kNumTokensInSentence = 3; + + // The master spec will initialize a parser, so expect 2*N transitions. + constexpr int kExpectedNumTransitions = kNumTokensInSentence * 2; + + // There are 93 possible transitions for any given state. Create a transition + // array with a score of 10.0 for each transition. + constexpr int kBeamSize = 2; + constexpr int kNumPossibleTransitions = 93; + constexpr float kTransitionValue = 10.0; + float transition_matrix[kNumPossibleTransitions * kBeamSize]; + for (int i = 0; i < kNumPossibleTransitions * kBeamSize; ++i) { + transition_matrix[i] = kTransitionValue; + } + + // Transition the expected number of times + for (int i = 0; i < kExpectedNumTransitions; ++i) { + EXPECT_FALSE(test_parser->IsTerminal()); + test_parser->AdvanceFromPrediction(transition_matrix, + kNumPossibleTransitions * kBeamSize); + } + + // At this point, the test parser should be terminal. + EXPECT_TRUE(test_parser->IsTerminal()); + + // Check that the component is reporting 2N steps taken. + EXPECT_EQ(test_parser->StepsTaken(0), kExpectedNumTransitions); + + // The final states should have kExpectedNumTransitions * kTransitionValue, + // plus the higher parent state score (from state two). + auto beam = test_parser->GetBeam(); + EXPECT_EQ(beam.at(0).at(0)->GetScore(), + kTransitionValue * kExpectedNumTransitions + kParentScoreTwo); + + // Make sure that the parent state is reported correctly. + EXPECT_EQ(test_parser->GetSourceBeamIndex(0, 0), kParentBeamIndexTwo); + + // Make sure the parser doesn't segfault. + test_parser->FinalizeData(); + + // TODO(googleuser): What should the finalized data look like? +} + +TEST_F(SyntaxNetComponentTest, AdvancesFromPredictionForMultiSentenceBatches) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + Sentence sentence_1; + TextFormat::ParseFromString(kSentence1, &sentence_1); + string sentence_1_str; + sentence_1.SerializeToString(&sentence_1_str); + + auto test_parser = CreateParser({}, {sentence_0_str, sentence_1_str}); + constexpr int kNumTokensInSentence = 3; + + // The master spec will initialize a parser, so expect 2*N transitions. + constexpr int kExpectedNumTransitions = kNumTokensInSentence * 2; + + // There are 93 possible transitions for any given state. Create a transition + // array with a score of 10.0 for each transition. + constexpr int kBatchSize = 2; + constexpr int kBeamSize = 2; + constexpr int kNumPossibleTransitions = 93; + constexpr float kTransitionValue = 10.0; + float transition_matrix[kNumPossibleTransitions * kBeamSize * kBatchSize]; + for (int i = 0; i < kNumPossibleTransitions * kBeamSize * kBatchSize; ++i) { + transition_matrix[i] = kTransitionValue; + } + + // Transition the expected number of times. + for (int i = 0; i < kExpectedNumTransitions; ++i) { + EXPECT_FALSE(test_parser->IsTerminal()); + test_parser->AdvanceFromPrediction( + transition_matrix, kNumPossibleTransitions * kBeamSize * kBatchSize); + } + + // At this point, the test parser should be terminal. + EXPECT_TRUE(test_parser->IsTerminal()); + + // Check that the component is reporting 2N steps taken. + EXPECT_EQ(test_parser->StepsTaken(0), kExpectedNumTransitions); + EXPECT_EQ(test_parser->StepsTaken(1), kExpectedNumTransitions); + + // The final states should have kExpectedNumTransitions * kTransitionValue. + auto beam = test_parser->GetBeam(); + EXPECT_EQ(beam.at(0).at(0)->GetScore(), + kTransitionValue * kExpectedNumTransitions); + EXPECT_EQ(beam.at(1).at(0)->GetScore(), + kTransitionValue * kExpectedNumTransitions); + + // Make sure the parser doesn't segfault. + test_parser->FinalizeData(); + + // TODO(googleuser): What should the finalized data look like? +} + +TEST_F(SyntaxNetComponentTest, + AdvancesFromPredictionForVaryingLengthSentences) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + Sentence long_sentence; + TextFormat::ParseFromString(kLongSentence, &long_sentence); + string long_sentence_str; + long_sentence.SerializeToString(&long_sentence_str); + + auto test_parser = CreateParser({}, {sentence_0_str, long_sentence_str}); + constexpr int kNumTokensInSentence = 3; + constexpr int kNumTokensInLongSentence = 5; + + // There are 93 possible transitions for any given state. Create a transition + // array with a score of 10.0 for each transition. + constexpr int kBatchSize = 2; + constexpr int kBeamSize = 2; + constexpr int kNumPossibleTransitions = 93; + constexpr float kTransitionValue = 10.0; + float transition_matrix[kNumPossibleTransitions * kBeamSize * kBatchSize]; + for (int i = 0; i < kNumPossibleTransitions * kBeamSize * kBatchSize; ++i) { + transition_matrix[i] = kTransitionValue; + } + + // Transition the expected number of times. + constexpr int kExpectedNumTransitions = kNumTokensInLongSentence * 2; + for (int i = 0; i < kExpectedNumTransitions; ++i) { + EXPECT_FALSE(test_parser->IsTerminal()); + test_parser->AdvanceFromPrediction( + transition_matrix, kNumPossibleTransitions * kBeamSize * kBatchSize); + } + + // At this point, the test parser should be terminal. + EXPECT_TRUE(test_parser->IsTerminal()); + + // Check that the component is reporting 2N steps taken. + EXPECT_EQ(test_parser->StepsTaken(0), kNumTokensInSentence * 2); + EXPECT_EQ(test_parser->StepsTaken(1), kNumTokensInLongSentence * 2); + + // The final states should have kExpectedNumTransitions * kTransitionValue. + auto beam = test_parser->GetBeam(); + + // The first sentence is shorter, so it should have a lower final score. + EXPECT_EQ(beam.at(0).at(0)->GetScore(), + kTransitionValue * kNumTokensInSentence * 2); + EXPECT_EQ(beam.at(1).at(0)->GetScore(), + kTransitionValue * kNumTokensInLongSentence * 2); + + // Make sure the parser doesn't segfault. + test_parser->FinalizeData(); + + // TODO(googleuser): What should the finalized data look like? +} + +TEST_F(SyntaxNetComponentTest, ResetAllowsReductionInBatchSize) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + Sentence long_sentence; + TextFormat::ParseFromString(kLongSentence, &long_sentence); + string long_sentence_str; + long_sentence.SerializeToString(&long_sentence_str); + + // Get the master spec proto from the test data directory. + MasterSpec master_spec; + string file_name = tensorflow::io::JoinPath( + test::GetTestDataPrefix(), "dragnn/components/syntaxnet/testdata", + "master_spec.textproto"); + TF_CHECK_OK(tensorflow::ReadTextProto(tensorflow::Env::Default(), file_name, + &master_spec)); + + // Get all the resource protos from the test data directory. + for (Resource &resource : + *(master_spec.mutable_component(0)->mutable_resource())) { + resource.mutable_part(0)->set_file_pattern(tensorflow::io::JoinPath( + test::GetTestDataPrefix(), "dragnn/components/syntaxnet/testdata", + resource.part(0).file_pattern())); + } + + // Create an input batch cache with a large batch size. + constexpr int kBeamSize = 2; + std::unique_ptr large_batch_data(new InputBatchCache( + {sentence_0_str, sentence_0_str, sentence_0_str, sentence_0_str})); + std::unique_ptr parser_component( + new SyntaxNetComponent()); + parser_component->InitializeComponent(*(master_spec.mutable_component(0))); + parser_component->InitializeData({}, kBeamSize, large_batch_data.get()); + + // Reset the component and pass in a new input batch that is smaller. + parser_component->ResetComponent(); + std::unique_ptr small_batch_data(new InputBatchCache( + {long_sentence_str, long_sentence_str, long_sentence_str})); + parser_component->InitializeData({}, kBeamSize, small_batch_data.get()); + + // There are 93 possible transitions for any given state. Create a transition + // array with a score of 10.0 for each transition. + constexpr int kBatchSize = 3; + constexpr int kNumPossibleTransitions = 93; + constexpr float kTransitionValue = 10.0; + float transition_matrix[kNumPossibleTransitions * kBeamSize * kBatchSize]; + for (int i = 0; i < kNumPossibleTransitions * kBeamSize * kBatchSize; ++i) { + transition_matrix[i] = kTransitionValue; + } + + // Transition the expected number of times. + constexpr int kNumTokensInSentence = 5; + constexpr int kExpectedNumTransitions = kNumTokensInSentence * 2; + for (int i = 0; i < kExpectedNumTransitions; ++i) { + EXPECT_FALSE(parser_component->IsTerminal()); + parser_component->AdvanceFromPrediction( + transition_matrix, kNumPossibleTransitions * kBeamSize * kBatchSize); + } + + // At this point, the test parser should be terminal. + EXPECT_TRUE(parser_component->IsTerminal()); + + // Check that the component is reporting 2N steps taken. + EXPECT_EQ(parser_component->StepsTaken(0), kExpectedNumTransitions); + EXPECT_EQ(parser_component->StepsTaken(1), kExpectedNumTransitions); + EXPECT_EQ(parser_component->StepsTaken(2), kExpectedNumTransitions); + + // The final states should have kExpectedNumTransitions * kTransitionValue. + auto beam = parser_component->GetBeam(); + + // The beam should be of batch size 3. + EXPECT_EQ(beam.size(), 3); + EXPECT_EQ(beam.at(0).at(0)->GetScore(), + kTransitionValue * kExpectedNumTransitions); + EXPECT_EQ(beam.at(1).at(0)->GetScore(), + kTransitionValue * kExpectedNumTransitions); + EXPECT_EQ(beam.at(2).at(0)->GetScore(), + kTransitionValue * kExpectedNumTransitions); + + // Make sure the parser doesn't segfault. + parser_component->FinalizeData(); +} + +TEST_F(SyntaxNetComponentTest, ResetAllowsIncreaseInBatchSize) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + Sentence long_sentence; + TextFormat::ParseFromString(kLongSentence, &long_sentence); + string long_sentence_str; + long_sentence.SerializeToString(&long_sentence_str); + + // Get the master spec proto from the test data directory. + MasterSpec master_spec; + string file_name = tensorflow::io::JoinPath( + test::GetTestDataPrefix(), "dragnn/components/syntaxnet/testdata", + "master_spec.textproto"); + TF_CHECK_OK(tensorflow::ReadTextProto(tensorflow::Env::Default(), file_name, + &master_spec)); + + // Get all the resource protos from the test data directory. + for (Resource &resource : + *(master_spec.mutable_component(0)->mutable_resource())) { + resource.mutable_part(0)->set_file_pattern(tensorflow::io::JoinPath( + test::GetTestDataPrefix(), "dragnn/components/syntaxnet/testdata", + resource.part(0).file_pattern())); + } + + // Create an input batch cache with a small batch size. + constexpr int kBeamSize = 2; + std::unique_ptr small_batch_data( + new InputBatchCache(sentence_0_str)); + std::unique_ptr parser_component( + new SyntaxNetComponent()); + parser_component->InitializeComponent(*(master_spec.mutable_component(0))); + parser_component->InitializeData({}, kBeamSize, small_batch_data.get()); + + // Reset the component and pass in a new input batch that is larger. + parser_component->ResetComponent(); + std::unique_ptr large_batch_data(new InputBatchCache( + {long_sentence_str, long_sentence_str, long_sentence_str})); + parser_component->InitializeData({}, kBeamSize, large_batch_data.get()); + + // There are 93 possible transitions for any given state. Create a transition + // array with a score of 10.0 for each transition. + constexpr int kBatchSize = 3; + constexpr int kNumPossibleTransitions = 93; + constexpr float kTransitionValue = 10.0; + float transition_matrix[kNumPossibleTransitions * kBeamSize * kBatchSize]; + for (int i = 0; i < kNumPossibleTransitions * kBeamSize * kBatchSize; ++i) { + transition_matrix[i] = kTransitionValue; + } + + // Transition the expected number of times. + constexpr int kNumTokensInSentence = 5; + constexpr int kExpectedNumTransitions = kNumTokensInSentence * 2; + for (int i = 0; i < kExpectedNumTransitions; ++i) { + EXPECT_FALSE(parser_component->IsTerminal()); + parser_component->AdvanceFromPrediction( + transition_matrix, kNumPossibleTransitions * kBeamSize * kBatchSize); + } + + // At this point, the test parser should be terminal. + EXPECT_TRUE(parser_component->IsTerminal()); + + // Check that the component is reporting 2N steps taken. + EXPECT_EQ(parser_component->StepsTaken(0), kExpectedNumTransitions); + EXPECT_EQ(parser_component->StepsTaken(1), kExpectedNumTransitions); + EXPECT_EQ(parser_component->StepsTaken(2), kExpectedNumTransitions); + + // The final states should have kExpectedNumTransitions * kTransitionValue. + auto beam = parser_component->GetBeam(); + + // The beam should be of batch size 3. + EXPECT_EQ(beam.size(), 3); + EXPECT_EQ(beam.at(0).at(0)->GetScore(), + kTransitionValue * kExpectedNumTransitions); + EXPECT_EQ(beam.at(1).at(0)->GetScore(), + kTransitionValue * kExpectedNumTransitions); + EXPECT_EQ(beam.at(2).at(0)->GetScore(), + kTransitionValue * kExpectedNumTransitions); + + // Make sure the parser doesn't segfault. + parser_component->FinalizeData(); +} + +TEST_F(SyntaxNetComponentTest, ResetCausesBeamToReset) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + Sentence long_sentence; + TextFormat::ParseFromString(kLongSentence, &long_sentence); + string long_sentence_str; + long_sentence.SerializeToString(&long_sentence_str); + + auto test_parser = CreateParser({}, {sentence_0_str}); + constexpr int kNumTokensInSentence = 3; + + // The master spec will initialize a parser, so expect 2*N transitions. + constexpr int kExpectedNumTransitions = kNumTokensInSentence * 2; + + // There are 93 possible transitions for any given state. Create a transition + // array with a score of 10.0 for each transition. + constexpr int kBeamSize = 2; + constexpr int kNumPossibleTransitions = 93; + constexpr float kTransitionValue = 10.0; + float transition_matrix[kNumPossibleTransitions * kBeamSize]; + for (int i = 0; i < kNumPossibleTransitions * kBeamSize; ++i) { + transition_matrix[i] = kTransitionValue; + } + + // Transition the expected number of times. + for (int i = 0; i < kExpectedNumTransitions; ++i) { + EXPECT_FALSE(test_parser->IsTerminal()); + test_parser->AdvanceFromPrediction(transition_matrix, + kNumPossibleTransitions * kBeamSize); + } + + // At this point, the test parser should be terminal. + EXPECT_TRUE(test_parser->IsTerminal()); + + // Check that the component is reporting 2N steps taken. + EXPECT_EQ(test_parser->StepsTaken(0), kExpectedNumTransitions); + + // The final states should have kExpectedNumTransitions * kTransitionValue. + auto beam = test_parser->GetBeam(); + EXPECT_EQ(beam.at(0).at(0)->GetScore(), + kTransitionValue * kExpectedNumTransitions); + + // Reset the test parser and give it new data. + test_parser->ResetComponent(); + std::unique_ptr new_data( + new InputBatchCache(long_sentence_str)); + test_parser->InitializeData({}, kBeamSize, new_data.get()); + + // Check that the component is not terminal. + EXPECT_FALSE(test_parser->IsTerminal()); + + // Check that the component is reporting 0 steps taken. + EXPECT_EQ(test_parser->StepsTaken(0), 0); + + // The states should have 0 as their score. + auto new_beam = test_parser->GetBeam(); + EXPECT_EQ(new_beam.at(0).at(0)->GetScore(), 0); +} + +TEST_F(SyntaxNetComponentTest, AdjustingMaxBeamSizeAdjustsSizeForAllBeams) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + Sentence long_sentence; + TextFormat::ParseFromString(kLongSentence, &long_sentence); + string long_sentence_str; + long_sentence.SerializeToString(&long_sentence_str); + + // Get the master spec proto from the test data directory. + MasterSpec master_spec; + string file_name = tensorflow::io::JoinPath( + test::GetTestDataPrefix(), "dragnn/components/syntaxnet/testdata", + "master_spec.textproto"); + TF_CHECK_OK(tensorflow::ReadTextProto(tensorflow::Env::Default(), file_name, + &master_spec)); + + // Get all the resource protos from the test data directory. + for (Resource &resource : + *(master_spec.mutable_component(0)->mutable_resource())) { + resource.mutable_part(0)->set_file_pattern(tensorflow::io::JoinPath( + test::GetTestDataPrefix(), "dragnn/components/syntaxnet/testdata", + resource.part(0).file_pattern())); + } + + // Create an input batch cache with a small batch size. + constexpr int kBeamSize = 2; + std::unique_ptr small_batch_data( + new InputBatchCache(sentence_0_str)); + std::unique_ptr parser_component( + new SyntaxNetComponent()); + parser_component->InitializeComponent(*(master_spec.mutable_component(0))); + parser_component->InitializeData({}, kBeamSize, small_batch_data.get()); + + // Make sure all the beams in the batch have max size 2. + for (const auto &beam : GetBeams(parser_component.get())) { + EXPECT_EQ(beam->max_size(), kBeamSize); + } + + // Reset the component and pass in a new input batch that is larger, with + // a higher beam size. + constexpr int kNewBeamSize = 5; + parser_component->ResetComponent(); + std::unique_ptr large_batch_data(new InputBatchCache( + {long_sentence_str, long_sentence_str, long_sentence_str})); + parser_component->InitializeData({}, kNewBeamSize, large_batch_data.get()); + + // Make sure all the beams in the batch now have max size 5. + for (const auto &beam : GetBeams(parser_component.get())) { + EXPECT_EQ(beam->max_size(), kNewBeamSize); + } +} + +TEST_F(SyntaxNetComponentTest, SettingBeamSizeZeroFails) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + Sentence long_sentence; + TextFormat::ParseFromString(kLongSentence, &long_sentence); + string long_sentence_str; + long_sentence.SerializeToString(&long_sentence_str); + + // Get the master spec proto from the test data directory. + MasterSpec master_spec; + string file_name = tensorflow::io::JoinPath( + test::GetTestDataPrefix(), "dragnn/components/syntaxnet/testdata", + "master_spec.textproto"); + TF_CHECK_OK(tensorflow::ReadTextProto(tensorflow::Env::Default(), file_name, + &master_spec)); + + // Get all the resource protos from the test data directory. + for (Resource &resource : + *(master_spec.mutable_component(0)->mutable_resource())) { + resource.mutable_part(0)->set_file_pattern(tensorflow::io::JoinPath( + test::GetTestDataPrefix(), "dragnn/components/syntaxnet/testdata", + resource.part(0).file_pattern())); + } + + // Create an input batch cache with a small batch size. + constexpr int kBeamSize = 0; + std::unique_ptr small_batch_data( + new InputBatchCache(sentence_0_str)); + std::unique_ptr parser_component( + new SyntaxNetComponent()); + parser_component->InitializeComponent(*(master_spec.mutable_component(0))); + EXPECT_DEATH( + parser_component->InitializeData({}, kBeamSize, small_batch_data.get()), + "must be greater than 0"); +} + +TEST_F(SyntaxNetComponentTest, ExportsFixedFeaturesWithPadding) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + Sentence sentence_1; + TextFormat::ParseFromString(kSentence1, &sentence_1); + string sentence_1_str; + sentence_1.SerializeToString(&sentence_1_str); + + constexpr int kBeamSize = 3; + + auto test_parser = + CreateParserWithBeamSize(kBeamSize, {}, {sentence_0_str, sentence_1_str}); + + // Get and check the raw link features. + vector indices; + auto indices_fn = [&indices](int size) { + indices.resize(size); + return indices.data(); + }; + vector ids; + auto ids_fn = [&ids](int size) { + ids.resize(size); + return ids.data(); + }; + vector weights; + auto weights_fn = [&weights](int size) { + weights.resize(size); + return weights.data(); + }; + constexpr int kChannelId = 0; + const int num_features = + test_parser->GetFixedFeatures(indices_fn, ids_fn, weights_fn, kChannelId); + + // The raw features for each beam object should be [single, single]. + // There is also padding expected in this beam - there is only one + // element in each beam (so two elements total; batch is two). Thus, we expect + // 0,1 and 6,7 to be filled with one element each. + constexpr int kExpectedOutputSize = 4; + const vector expected_indices({0, 1, 6, 7}); + const vector expected_ids({0, 12, 0, 12}); + const vector expected_weights({1.0, 1.0, 1.0, 1.0}); + + EXPECT_EQ(expected_indices.size(), kExpectedOutputSize); + EXPECT_EQ(expected_ids.size(), kExpectedOutputSize); + EXPECT_EQ(expected_weights.size(), kExpectedOutputSize); + EXPECT_EQ(num_features, kExpectedOutputSize); + + EXPECT_EQ(expected_indices, indices); + EXPECT_EQ(expected_ids, ids); + EXPECT_EQ(expected_weights, weights); +} + +TEST_F(SyntaxNetComponentTest, ExportsFixedFeatures) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + Sentence sentence_1; + TextFormat::ParseFromString(kSentence1, &sentence_1); + string sentence_1_str; + sentence_1.SerializeToString(&sentence_1_str); + + constexpr int kBeamSize = 3; + + auto test_parser = + CreateParserWithBeamSize(kBeamSize, {}, {sentence_0_str, sentence_1_str}); + + // There are 93 possible transitions for any given state. Create a transition + // array with a score of 10.0 for each transition. + constexpr int kBatchSize = 2; + constexpr int kNumPossibleTransitions = 93; + constexpr float kTransitionValue = 10.0; + float transition_matrix[kNumPossibleTransitions * kBeamSize * kBatchSize]; + for (int i = 0; i < kNumPossibleTransitions * kBeamSize * kBatchSize; ++i) { + transition_matrix[i] = kTransitionValue; + } + + // Advance twice, so that the underlying parser fills the beam. + test_parser->AdvanceFromPrediction( + transition_matrix, kNumPossibleTransitions * kBeamSize * kBatchSize); + test_parser->AdvanceFromPrediction( + transition_matrix, kNumPossibleTransitions * kBeamSize * kBatchSize); + + // Get and check the raw link features. + vector indices; + auto indices_fn = [&indices](int size) { + indices.resize(size); + return indices.data(); + }; + vector ids; + auto ids_fn = [&ids](int size) { + ids.resize(size); + return ids.data(); + }; + vector weights; + auto weights_fn = [&weights](int size) { + weights.resize(size); + return weights.data(); + }; + constexpr int kChannelId = 0; + const int num_features = + test_parser->GetFixedFeatures(indices_fn, ids_fn, weights_fn, kChannelId); + + // In this case, all even features and all odd features are identical. + constexpr int kExpectedOutputSize = 12; + const vector expected_indices({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}); + const vector expected_ids({12, 7, 12, 7, 12, 7, 12, 7, 12, 7, 12, 7}); + const vector expected_weights( + {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}); + + EXPECT_EQ(expected_indices.size(), kExpectedOutputSize); + EXPECT_EQ(expected_ids.size(), kExpectedOutputSize); + EXPECT_EQ(expected_weights.size(), kExpectedOutputSize); + EXPECT_EQ(num_features, kExpectedOutputSize); + + EXPECT_EQ(expected_indices, indices); + EXPECT_EQ(expected_ids, ids); + EXPECT_EQ(expected_weights, weights); +} + +TEST_F(SyntaxNetComponentTest, ExportsBulkFixedFeatures) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + Sentence sentence_1; + TextFormat::ParseFromString(kSentence1, &sentence_1); + string sentence_1_str; + sentence_1.SerializeToString(&sentence_1_str); + + constexpr int kBeamSize = 3; + auto test_parser = + CreateParserWithBeamSize(kBeamSize, {}, {sentence_0_str, sentence_1_str}); + + // Get and check the raw link features. + vector> indices; + auto indices_fn = [&indices](int channel, int size) { + indices.resize(channel + 1); + indices[channel].resize(size); + return indices[channel].data(); + }; + vector> ids; + auto ids_fn = [&ids](int channel, int size) { + ids.resize(channel + 1); + ids[channel].resize(size); + return ids[channel].data(); + }; + vector> weights; + auto weights_fn = [&weights](int channel, int size) { + weights.resize(channel + 1); + weights[channel].resize(size); + return weights[channel].data(); + }; + + BulkFeatureExtractor extractor(indices_fn, ids_fn, weights_fn); + const int num_steps = test_parser->BulkGetFixedFeatures(extractor); + + // There should be 6 steps (2N, where N is the longest number of tokens). + EXPECT_EQ(num_steps, 6); + + // These are empirically derived. + const vector expected_ch0_indices({0, 36, 18, 54, 1, 37, 19, 55, + 2, 38, 20, 56, 3, 39, 21, 57, + 4, 40, 22, 58, 5, 41, 23, 59}); + const vector expected_ch0_ids({0, 12, 0, 12, 12, 7, 12, 7, + 7, 50, 7, 50, 7, 50, 7, 50, + 50, 50, 50, 50, 50, 50, 50, 50}); + const vector expected_ch0_weights( + {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}); + const vector expected_ch1_indices( + {0, 36, 72, 18, 54, 90, 1, 37, 73, 19, 55, 91, 2, 38, 74, 20, 56, 92, + 3, 39, 75, 21, 57, 93, 4, 40, 76, 22, 58, 94, 5, 41, 77, 23, 59, 95}); + const vector expected_ch1_ids( + {51, 0, 12, 51, 0, 12, 0, 12, 7, 0, 12, 7, 12, 7, 50, 12, 7, 50, + 12, 7, 50, 12, 7, 50, 7, 50, 50, 7, 50, 50, 7, 50, 50, 7, 50, 50}); + const vector expected_ch1_weights( + {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}); + + EXPECT_EQ(indices[0], expected_ch0_indices); + EXPECT_EQ(ids[0], expected_ch0_ids); + EXPECT_EQ(weights[0], expected_ch0_weights); + EXPECT_EQ(indices[1], expected_ch1_indices); + EXPECT_EQ(ids[1], expected_ch1_ids); + EXPECT_EQ(weights[1], expected_ch1_weights); +} + +TEST_F(SyntaxNetComponentTest, ExportsRawLinkFeaturesWithPadding) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + Sentence sentence_1; + TextFormat::ParseFromString(kSentence1, &sentence_1); + string sentence_1_str; + sentence_1.SerializeToString(&sentence_1_str); + + constexpr int kBeamSize = 3; + constexpr int kBatchSize = 2; + auto test_parser = + CreateParserWithBeamSize(kBeamSize, {}, {sentence_0_str, sentence_1_str}); + + // Get and check the raw link features. + constexpr int kNumLinkFeatures = 2; + auto link_features = test_parser->GetRawLinkFeatures(0); + EXPECT_EQ(link_features.size(), kBeamSize * kBatchSize * kNumLinkFeatures); + + EXPECT_EQ(link_features.at(0).feature_value(), -1); + EXPECT_EQ(link_features.at(0).batch_idx(), 0); + EXPECT_EQ(link_features.at(0).beam_idx(), 0); + + EXPECT_EQ(link_features.at(1).feature_value(), -2); + EXPECT_EQ(link_features.at(1).batch_idx(), 0); + EXPECT_EQ(link_features.at(1).beam_idx(), 0); + + // These are padding, so we do not expect them to have a feature value. + EXPECT_FALSE(link_features.at(2).has_feature_value()); + EXPECT_FALSE(link_features.at(2).has_batch_idx()); + EXPECT_FALSE(link_features.at(2).has_beam_idx()); + EXPECT_FALSE(link_features.at(3).has_feature_value()); + EXPECT_FALSE(link_features.at(3).has_batch_idx()); + EXPECT_FALSE(link_features.at(3).has_beam_idx()); + EXPECT_FALSE(link_features.at(4).has_feature_value()); + EXPECT_FALSE(link_features.at(4).has_batch_idx()); + EXPECT_FALSE(link_features.at(4).has_beam_idx()); + EXPECT_FALSE(link_features.at(5).has_feature_value()); + EXPECT_FALSE(link_features.at(5).has_batch_idx()); + EXPECT_FALSE(link_features.at(5).has_beam_idx()); + + EXPECT_EQ(link_features.at(6).feature_value(), -1); + EXPECT_EQ(link_features.at(6).batch_idx(), 1); + EXPECT_EQ(link_features.at(6).beam_idx(), 0); + + EXPECT_EQ(link_features.at(7).feature_value(), -2); + EXPECT_EQ(link_features.at(7).batch_idx(), 1); + EXPECT_EQ(link_features.at(7).beam_idx(), 0); + + // These are padding, so we do not expect them to have a feature value. + EXPECT_FALSE(link_features.at(8).has_feature_value()); + EXPECT_FALSE(link_features.at(8).has_batch_idx()); + EXPECT_FALSE(link_features.at(8).has_beam_idx()); + EXPECT_FALSE(link_features.at(9).has_feature_value()); + EXPECT_FALSE(link_features.at(9).has_batch_idx()); + EXPECT_FALSE(link_features.at(9).has_beam_idx()); + EXPECT_FALSE(link_features.at(10).has_feature_value()); + EXPECT_FALSE(link_features.at(10).has_batch_idx()); + EXPECT_FALSE(link_features.at(10).has_beam_idx()); + EXPECT_FALSE(link_features.at(11).has_feature_value()); + EXPECT_FALSE(link_features.at(11).has_batch_idx()); + EXPECT_FALSE(link_features.at(11).has_beam_idx()); +} + +TEST_F(SyntaxNetComponentTest, ExportsRawLinkFeatures) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + Sentence sentence_1; + TextFormat::ParseFromString(kSentence1, &sentence_1); + string sentence_1_str; + sentence_1.SerializeToString(&sentence_1_str); + + constexpr int kBeamSize = 3; + auto test_parser = + CreateParserWithBeamSize(kBeamSize, {}, {sentence_0_str, sentence_1_str}); + + // There are 93 possible transitions for any given state. Create a transition + // array with a score of 10.0 for each transition. + constexpr int kBatchSize = 2; + constexpr int kNumPossibleTransitions = 93; + constexpr float kTransitionValue = 10.0; + float transition_matrix[kNumPossibleTransitions * kBeamSize * kBatchSize]; + for (int i = 0; i < kNumPossibleTransitions * kBeamSize * kBatchSize; ++i) { + transition_matrix[i] = kTransitionValue; + } + + // Advance twice, so that the underlying parser fills the beam. + test_parser->AdvanceFromPrediction( + transition_matrix, kNumPossibleTransitions * kBeamSize * kBatchSize); + test_parser->AdvanceFromPrediction( + transition_matrix, kNumPossibleTransitions * kBeamSize * kBatchSize); + + // Get and check the raw link features. + constexpr int kNumLinkFeatures = 2; + auto link_features = test_parser->GetRawLinkFeatures(0); + EXPECT_EQ(link_features.size(), kBeamSize * kBatchSize * kNumLinkFeatures); + + // These should index into batch 0. + EXPECT_EQ(link_features.at(0).feature_value(), -1); + EXPECT_EQ(link_features.at(0).batch_idx(), 0); + EXPECT_EQ(link_features.at(0).beam_idx(), 0); + + EXPECT_EQ(link_features.at(1).feature_value(), -2); + EXPECT_EQ(link_features.at(1).batch_idx(), 0); + EXPECT_EQ(link_features.at(1).beam_idx(), 0); + + EXPECT_EQ(link_features.at(2).feature_value(), -1); + EXPECT_EQ(link_features.at(2).batch_idx(), 0); + EXPECT_EQ(link_features.at(2).beam_idx(), 1); + + EXPECT_EQ(link_features.at(3).feature_value(), -2); + EXPECT_EQ(link_features.at(3).batch_idx(), 0); + EXPECT_EQ(link_features.at(3).beam_idx(), 1); + + EXPECT_EQ(link_features.at(4).feature_value(), -1); + EXPECT_EQ(link_features.at(4).batch_idx(), 0); + EXPECT_EQ(link_features.at(4).beam_idx(), 2); + + EXPECT_EQ(link_features.at(5).feature_value(), -2); + EXPECT_EQ(link_features.at(5).batch_idx(), 0); + EXPECT_EQ(link_features.at(5).beam_idx(), 2); + + // These should index into batch 1. + EXPECT_EQ(link_features.at(6).feature_value(), -1); + EXPECT_EQ(link_features.at(6).batch_idx(), 1); + EXPECT_EQ(link_features.at(6).beam_idx(), 0); + + EXPECT_EQ(link_features.at(7).feature_value(), -2); + EXPECT_EQ(link_features.at(7).batch_idx(), 1); + EXPECT_EQ(link_features.at(7).beam_idx(), 0); + + EXPECT_EQ(link_features.at(8).feature_value(), -1); + EXPECT_EQ(link_features.at(8).batch_idx(), 1); + EXPECT_EQ(link_features.at(8).beam_idx(), 1); + + EXPECT_EQ(link_features.at(9).feature_value(), -2); + EXPECT_EQ(link_features.at(9).batch_idx(), 1); + EXPECT_EQ(link_features.at(9).beam_idx(), 1); + + EXPECT_EQ(link_features.at(10).feature_value(), -1); + EXPECT_EQ(link_features.at(10).batch_idx(), 1); + EXPECT_EQ(link_features.at(10).beam_idx(), 2); + + EXPECT_EQ(link_features.at(11).feature_value(), -2); + EXPECT_EQ(link_features.at(11).batch_idx(), 1); + EXPECT_EQ(link_features.at(11).beam_idx(), 2); +} + +TEST_F(SyntaxNetComponentTest, AdvancesFromOracleWithTracing) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + constexpr int kBeamSize = 1; + auto test_parser = CreateParserWithBeamSize(kBeamSize, {}, {sentence_0_str}); + test_parser->InitializeTracing(); + + constexpr int kNumTokensInSentence = 3; + + // The master spec will initialize a parser, so expect 2*N transitions. + constexpr int kExpectedNumTransitions = kNumTokensInSentence * 2; + constexpr int kFixedFeatureChannels = 1; + for (int i = 0; i < kExpectedNumTransitions; ++i) { + EXPECT_FALSE(test_parser->IsTerminal()); + vector indices; + auto indices_fn = [&indices](int size) { + indices.resize(size); + return indices.data(); + }; + vector ids; + auto ids_fn = [&ids](int size) { + ids.resize(size); + return ids.data(); + }; + vector weights; + auto weights_fn = [&weights](int size) { + weights.resize(size); + return weights.data(); + }; + for (int j = 0; j < kFixedFeatureChannels; ++j) { + test_parser->GetFixedFeatures(indices_fn, ids_fn, weights_fn, j); + } + auto features = test_parser->GetRawLinkFeatures(0); + + // Make some fake translations to test visualization. + for (int j = 0; j < features.size(); ++j) { + features[j].set_step_idx(j < i ? j : -1); + } + test_parser->AddTranslatedLinkFeaturesToTrace(features, 0); + test_parser->AdvanceFromOracle(); + } + + // At this point, the test parser should be terminal. + EXPECT_TRUE(test_parser->IsTerminal()); + + // TODO(googleuser): Add EXPECT_EQ here instead of printing. + std::vector> traces = + test_parser->GetTraceProtos(); + for (auto &batch_trace : traces) { + for (auto &trace : batch_trace) { + LOG(INFO) << "trace:" << std::endl << trace.DebugString(); + } + } +} + +TEST_F(SyntaxNetComponentTest, NoTracingDropsFeatureNames) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + constexpr int kBeamSize = 1; + const auto test_parser = + CreateParserWithBeamSize(kBeamSize, {}, {sentence_0_str}); + const auto link_features = test_parser->GetRawLinkFeatures(0); + + // The fml associated with the channel is "stack.focus stack(1).focus". + // Both features should lack the feature_name field. + EXPECT_EQ(link_features.size(), 2); + EXPECT_FALSE(link_features.at(0).has_feature_name()); + EXPECT_FALSE(link_features.at(1).has_feature_name()); +} + +TEST_F(SyntaxNetComponentTest, TracingOutputsFeatureNames) { + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + + constexpr int kBeamSize = 1; + auto test_parser = CreateParserWithBeamSize(kBeamSize, {}, {sentence_0_str}); + test_parser->InitializeTracing(); + const auto link_features = test_parser->GetRawLinkFeatures(0); + + // The fml associated with the channel is "stack.focus stack(1).focus". + EXPECT_EQ(link_features.size(), 2); + EXPECT_EQ(link_features.at(0).feature_name(), "stack.focus"); + EXPECT_EQ(link_features.at(1).feature_name(), "stack(1).focus"); +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/components/syntaxnet/syntaxnet_link_feature_extractor.cc b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_link_feature_extractor.cc new file mode 100644 index 0000000000000000000000000000000000000000..fa9612c739c43d9b1b1778705d719f3f48187a08 --- /dev/null +++ b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_link_feature_extractor.cc @@ -0,0 +1,49 @@ +#include "dragnn/components/syntaxnet/syntaxnet_link_feature_extractor.h" + +#include "tensorflow/core/platform/logging.h" + +namespace syntaxnet { +namespace dragnn { + +void SyntaxNetLinkFeatureExtractor::Setup(TaskContext *context) { + ParserEmbeddingFeatureExtractor::Setup(context); + + if (NumEmbeddings() > 0) { + channel_sources_ = utils::Split( + context->Get( + tensorflow::strings::StrCat(ArgPrefix(), "_", "source_components"), + ""), + ';'); + channel_layers_ = utils::Split( + context->Get( + tensorflow::strings::StrCat(ArgPrefix(), "_", "source_layers"), ""), + ';'); + channel_translators_ = utils::Split( + context->Get( + tensorflow::strings::StrCat(ArgPrefix(), "_", "source_translators"), + ""), + ';'); + } + + CHECK_EQ(channel_sources_.size(), NumEmbeddings()); + CHECK_EQ(channel_layers_.size(), NumEmbeddings()); + CHECK_EQ(channel_translators_.size(), NumEmbeddings()); +} + +void SyntaxNetLinkFeatureExtractor::AddLinkedFeatureChannelProtos( + ComponentSpec *spec) const { + for (int embedding_idx = 0; embedding_idx < NumEmbeddings(); + ++embedding_idx) { + LinkedFeatureChannel *channel = spec->add_linked_feature(); + channel->set_name(embedding_name(embedding_idx)); + channel->set_fml(embedding_fml()[embedding_idx]); + channel->set_embedding_dim(EmbeddingDims(embedding_idx)); + channel->set_size(FeatureSize(embedding_idx)); + channel->set_source_layer(channel_layers_[embedding_idx]); + channel->set_source_component(channel_sources_[embedding_idx]); + channel->set_source_translator(channel_translators_[embedding_idx]); + } +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/components/syntaxnet/syntaxnet_link_feature_extractor.h b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_link_feature_extractor.h new file mode 100644 index 0000000000000000000000000000000000000000..e74d77d8045ba64dc3a385fc644e7ac33c3b4505 --- /dev/null +++ b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_link_feature_extractor.h @@ -0,0 +1,55 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_LINK_FEATURE_EXTRACTOR_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_LINK_FEATURE_EXTRACTOR_H_ + +#include +#include + +#include "dragnn/protos/spec.pb.h" +#include "syntaxnet/embedding_feature_extractor.h" +#include "syntaxnet/parser_state.h" +#include "syntaxnet/parser_transitions.h" +#include "syntaxnet/task_context.h" + +namespace syntaxnet { +namespace dragnn { + +// Provides feature extraction for linked features in the +// WrapperParserComponent. This re-ues the EmbeddingFeatureExtractor +// architecture to get another set of feature extractors. Note that we should +// ignore predicate maps here, and we don't care about the vocabulary size +// because all the feature values will be used for translation, but this means +// we can configure the extractor from the GCL using the standard +// neurosis-lib.wf syntax. +// +// Because it uses a different prefix, it can be executed in the same wf.stage +// as the regular fixed extractor. +class SyntaxNetLinkFeatureExtractor : public ParserEmbeddingFeatureExtractor { + public: + SyntaxNetLinkFeatureExtractor() : ParserEmbeddingFeatureExtractor("link") {} + ~SyntaxNetLinkFeatureExtractor() override {} + + const string ArgPrefix() const override { return "link"; } + + // Parses the TaskContext to get additional information like target layers, + // etc. + void Setup(TaskContext *context) override; + + // Called during InitComponentProtoTask to add the specification from the + // wrapped feature extractor as LinkedFeatureChannel protos. + void AddLinkedFeatureChannelProtos(ComponentSpec *spec) const; + + private: + // Source component names for each channel. + std::vector channel_sources_; + + // Source layer names for each channel. + std::vector channel_layers_; + + // Source translator name for each channel. + std::vector channel_translators_; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_LINK_FEATURE_EXTRACTOR_H_ diff --git a/syntaxnet/dragnn/components/syntaxnet/syntaxnet_link_feature_extractor_test.cc b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_link_feature_extractor_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..8c9ad0888fdf4deda59e65aa8b1deed1f5fcc5fc --- /dev/null +++ b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_link_feature_extractor_test.cc @@ -0,0 +1,63 @@ +#include "dragnn/components/syntaxnet/syntaxnet_link_feature_extractor.h" + +#include + +#include "dragnn/core/test/generic.h" +#include "dragnn/protos/spec.pb.h" +#include "syntaxnet/task_context.h" +#include "tensorflow/core/platform/test.h" + +using syntaxnet::test::EqualsProto; + +namespace syntaxnet { +namespace dragnn { + +class ExportSpecTest : public ::testing::Test { + public: +}; + +TEST_F(ExportSpecTest, WritesChannelSpec) { + TaskContext context; + + context.SetParameter("neurosis_feature_syntax_version", "2"); + context.SetParameter("link_features", "input.focus;stack.focus"); + context.SetParameter("link_embedding_names", "tagger;parser"); + context.SetParameter("link_predicate_maps", "none;none"); + context.SetParameter("link_embedding_dims", "16;16"); + context.SetParameter("link_source_components", "tagger;parser"); + context.SetParameter("link_source_layers", "hidden0;lstm"); + context.SetParameter("link_source_translators", "token;last_action"); + + SyntaxNetLinkFeatureExtractor link_features; + link_features.Setup(&context); + link_features.Init(&context); + + ComponentSpec spec; + link_features.AddLinkedFeatureChannelProtos(&spec); + const string expected_spec_str = R"( + linked_feature { + name: "tagger" + fml: "input.focus" + embedding_dim: 16 + size: 1 + source_component: "tagger" + source_translator: "token" + source_layer: "hidden0" + } + linked_feature { + name: "parser" + fml: "stack.focus" + embedding_dim: 16 + size: 1 + source_component: "parser" + source_translator: "last_action" + source_layer: "lstm" + } + )"; + ComponentSpec expected_spec; + TextFormat::ParseFromString(expected_spec_str, &expected_spec); + EXPECT_THAT(spec, EqualsProto(expected_spec)); +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/components/syntaxnet/syntaxnet_transition_state.cc b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_transition_state.cc new file mode 100644 index 0000000000000000000000000000000000000000..93b4ff19dd1a7a9da94d32cc6b4f70b379d10d9b --- /dev/null +++ b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_transition_state.cc @@ -0,0 +1,85 @@ +#include "dragnn/components/syntaxnet/syntaxnet_transition_state.h" + +#include "tensorflow/core/lib/strings/strcat.h" +#include "tensorflow/core/platform/logging.h" + +namespace syntaxnet { +namespace dragnn { + +SyntaxNetTransitionState::SyntaxNetTransitionState( + std::unique_ptr parser_state, SyntaxNetSentence *sentence) + : parser_state_(std::move(parser_state)), sentence_(sentence) { + score_ = 0; + current_beam_index_ = -1; + parent_beam_index_ = 0; + step_for_token_.resize(sentence->sentence()->token_size(), -1); + parent_for_token_.resize(sentence->sentence()->token_size(), -1); + parent_step_for_token_.resize(sentence->sentence()->token_size(), -1); +} + +void SyntaxNetTransitionState::Init(const TransitionState &parent) { + score_ = parent.GetScore(); + parent_beam_index_ = parent.GetBeamIndex(); +} + +std::unique_ptr SyntaxNetTransitionState::Clone() + const { + // Create a new state from a clone of the underlying parser state. + std::unique_ptr cloned_state(parser_state_->Clone()); + std::unique_ptr new_state( + new SyntaxNetTransitionState(std::move(cloned_state), sentence_)); + + // Copy relevant data members and set non-copied ones to flag values. + new_state->score_ = score_; + new_state->current_beam_index_ = current_beam_index_; + new_state->parent_beam_index_ = parent_beam_index_; + new_state->step_for_token_ = step_for_token_; + new_state->parent_step_for_token_ = parent_step_for_token_; + new_state->parent_for_token_ = parent_for_token_; + + // Copy trace if it exists. + if (trace_) { + new_state->trace_.reset(new ComponentTrace(*trace_)); + } + + return new_state; +} + +const int SyntaxNetTransitionState::ParentBeamIndex() const { + return parent_beam_index_; +} + +const int SyntaxNetTransitionState::GetBeamIndex() const { + return current_beam_index_; +} + +void SyntaxNetTransitionState::SetBeamIndex(const int index) { + current_beam_index_ = index; +} + +const float SyntaxNetTransitionState::GetScore() const { return score_; } + +void SyntaxNetTransitionState::SetScore(const float score) { score_ = score; } + +string SyntaxNetTransitionState::HTMLRepresentation() const { + // Crude HTML string showing the stack and the word on the input. + string html = "Stack: "; + for (int i = parser_state_->StackSize() - 1; i >= 0; --i) { + const int word_idx = parser_state_->Stack(i); + if (word_idx >= 0) { + tensorflow::strings::StrAppend( + &html, parser_state_->GetToken(word_idx).word(), " "); + } + } + tensorflow::strings::StrAppend(&html, "| Input: "); + const int word_idx = parser_state_->Input(0); + if (word_idx >= 0) { + tensorflow::strings::StrAppend( + &html, parser_state_->GetToken(word_idx).word(), " "); + } + + return html; +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/components/syntaxnet/syntaxnet_transition_state.h b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_transition_state.h new file mode 100644 index 0000000000000000000000000000000000000000..590880433b1d6f117d13e09f9f92bc9f3de3e294 --- /dev/null +++ b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_transition_state.h @@ -0,0 +1,144 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_TRANSITION_STATE_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_TRANSITION_STATE_H_ + +#include + +#include "dragnn/core/interfaces/cloneable_transition_state.h" +#include "dragnn/core/interfaces/transition_state.h" +#include "dragnn/io/syntaxnet_sentence.h" +#include "dragnn/protos/trace.pb.h" +#include "syntaxnet/base.h" +#include "syntaxnet/parser_state.h" + +namespace syntaxnet { +namespace dragnn { + +class SyntaxNetTransitionState + : public CloneableTransitionState { + public: + // Create a SyntaxNetTransitionState to wrap this nlp_saft::ParserState. + SyntaxNetTransitionState(std::unique_ptr parser_state, + SyntaxNetSentence *sentence); + + // Initialize this TransitionState from a previous TransitionState. The + // ParentBeamIndex is the location of that previous TransitionState in the + // provided beam. + void Init(const TransitionState &parent) override; + + // Produces a new state with the same backing data as this state. + std::unique_ptr Clone() const override; + + // Return the beam index of the state passed into the initializer of this + // TransitionState. + const int ParentBeamIndex() const override; + + // Get the current beam index for this state. + const int GetBeamIndex() const override; + + // Set the current beam index for this state. + void SetBeamIndex(const int index) override; + + // Get the score associated with this transition state. + const float GetScore() const override; + + // Set the score associated with this transition state. + void SetScore(const float score) override; + + // Depicts this state as an HTML-language string. + string HTMLRepresentation() const override; + + // **** END INHERITED INTERFACE **** + + // TODO(googleuser): Make these comments actually mean something. + // Data accessor. + int step_for_token(int token) { + if (token < 0 || token >= step_for_token_.size()) { + return -1; + } else { + return step_for_token_.at(token); + } + } + + // Data setter. + void set_step_for_token(int token, int step) { + step_for_token_.insert(step_for_token_.begin() + token, step); + } + + // Data accessor. + int parent_step_for_token(int token) { + if (token < 0 || token >= step_for_token_.size()) { + return -1; + } else { + return parent_step_for_token_.at(token); + } + } + + // Data setter. + void set_parent_step_for_token(int token, int parent_step) { + parent_step_for_token_.insert(parent_step_for_token_.begin() + token, + parent_step); + } + + // Data accessor. + int parent_for_token(int token) { + if (token < 0 || token >= step_for_token_.size()) { + return -1; + } else { + return parent_for_token_.at(token); + } + } + + // Data setter. + void set_parent_for_token(int token, int parent) { + parent_for_token_.insert(parent_for_token_.begin() + token, parent); + } + + // Accessor for the underlying nlp_saft::ParserState. + ParserState *parser_state() { return parser_state_.get(); } + + // Accessor for the underlying sentence object. + SyntaxNetSentence *sentence() { return sentence_; } + + ComponentTrace *mutable_trace() { + CHECK(trace_) << "Trace is not initialized"; + return trace_.get(); + } + void set_trace(std::unique_ptr trace) { + trace_ = std::move(trace); + } + + private: + // Underlying ParserState object that is being wrapped. + std::unique_ptr parser_state_; + + // Sentence object that is being examined with this state. + SyntaxNetSentence *sentence_; + + // The current score of this state. + float score_; + + // The current beam index of this state. + int current_beam_index_; + + // The parent beam index for this state. + int parent_beam_index_; + + // Maintains a list of which steps in the history correspond to + // representations for each of the tokens on the stack. + std::vector step_for_token_; + + // Maintains a list of which steps in the history correspond to the actions + // that assigned a parent for tokens when reduced. + std::vector parent_step_for_token_; + + // Maintain the parent index of a token in the system. + std::vector parent_for_token_; + + // Trace of the history to produce this state. + std::unique_ptr trace_; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_TRANSITION_STATE_H_ diff --git a/syntaxnet/dragnn/components/syntaxnet/syntaxnet_transition_state_test.cc b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_transition_state_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..094da9b5c56c859cc086f5f8b7a1e85497e1ae6d --- /dev/null +++ b/syntaxnet/dragnn/components/syntaxnet/syntaxnet_transition_state_test.cc @@ -0,0 +1,276 @@ +#include "dragnn/components/syntaxnet/syntaxnet_transition_state.h" + +#include "dragnn/components/syntaxnet/syntaxnet_component.h" +#include "dragnn/core/input_batch_cache.h" +#include "dragnn/core/test/generic.h" +#include "dragnn/core/test/mock_transition_state.h" +#include "dragnn/io/sentence_input_batch.h" +#include "dragnn/protos/spec.pb.h" +#include "syntaxnet/sentence.pb.h" +#include "tensorflow/core/lib/core/errors.h" +#include "tensorflow/core/lib/core/status.h" +#include "tensorflow/core/lib/io/path.h" +#include "tensorflow/core/platform/env.h" +#include "tensorflow/core/platform/protobuf.h" +#include "tensorflow/core/platform/test.h" + +// This test suite is intended to validate the contracts that the DRAGNN +// system expects from all transition state subclasses. Developers creating +// new TransitionStates should copy this test and modify it as necessary, +// using it to ensure their state conforms to DRAGNN expectations. + +namespace syntaxnet { +namespace dragnn { + +namespace { + +const char kSentence0[] = R"( +token { + word: "Sentence" start: 0 end: 7 tag: "NN" category: "NOUN" label: "ROOT" + break_level: NO_BREAK +} +token { + word: "0" start: 9 end: 9 head: 0 tag: "CD" category: "NUM" label: "num" + break_level: SPACE_BREAK +} +token { + word: "." start: 10 end: 10 head: 0 tag: "." category: "." label: "punct" + break_level: NO_BREAK +} +)"; + +} // namespace + +using testing::Return; + +class SyntaxNetTransitionStateTest : public ::testing::Test { + public: + std::unique_ptr CreateState() { + // Get the master spec proto from the test data directory. + MasterSpec master_spec; + string file_name = tensorflow::io::JoinPath( + test::GetTestDataPrefix(), "dragnn/components/syntaxnet/testdata", + "master_spec.textproto"); + TF_CHECK_OK(tensorflow::ReadTextProto(tensorflow::Env::Default(), file_name, + &master_spec)); + + // Get all the resource protos from the test data directory. + for (Resource &resource : + *(master_spec.mutable_component(0)->mutable_resource())) { + resource.mutable_part(0)->set_file_pattern(tensorflow::io::JoinPath( + test::GetTestDataPrefix(), "dragnn/components/syntaxnet/testdata", + resource.part(0).file_pattern())); + } + + // Create an empty input batch and beam vector to initialize the parser. + Sentence sentence_0; + TextFormat::ParseFromString(kSentence0, &sentence_0); + string sentence_0_str; + sentence_0.SerializeToString(&sentence_0_str); + data_.reset(new InputBatchCache(sentence_0_str)); + SentenceInputBatch *sentences = data_->GetAs(); + + // Create a parser comoponent that will generate a parser state for this + // test. + SyntaxNetComponent component; + component.InitializeComponent(*(master_spec.mutable_component(0))); + std::vector> states; + constexpr int kBeamSize = 1; + component.InitializeData(states, kBeamSize, data_.get()); + + // Get a transition state from the component. + std::unique_ptr test_state = + component.CreateState(&(sentences->data()->at(0))); + return test_state; + } + + std::unique_ptr data_; +}; + +// Validates the consistency of the beam index setter and getter. +TEST_F(SyntaxNetTransitionStateTest, CanSetAndGetBeamIndex) { + // Create and initialize a test state. + MockTransitionState mock_state; + auto test_state = CreateState(); + test_state->Init(mock_state); + + constexpr int kOldBeamIndex = 12; + test_state->SetBeamIndex(kOldBeamIndex); + EXPECT_EQ(test_state->GetBeamIndex(), kOldBeamIndex); + + constexpr int kNewBeamIndex = 7; + test_state->SetBeamIndex(kNewBeamIndex); + EXPECT_EQ(test_state->GetBeamIndex(), kNewBeamIndex); +} + +// Validates the consistency of the score setter and getter. +TEST_F(SyntaxNetTransitionStateTest, CanSetAndGetScore) { + // Create and initialize a test state. + MockTransitionState mock_state; + auto test_state = CreateState(); + test_state->Init(mock_state); + + constexpr float kOldScore = 12.1; + test_state->SetScore(kOldScore); + EXPECT_EQ(test_state->GetScore(), kOldScore); + + constexpr float kNewScore = 7.2; + test_state->SetScore(kNewScore); + EXPECT_EQ(test_state->GetScore(), kNewScore); +} + +// This test ensures that the initializing state's current index is saved +// as the parent beam index of the state being initialized. +TEST_F(SyntaxNetTransitionStateTest, ReportsParentBeamIndex) { + // Create a mock transition state that wil report a specific current index. + // This index should become the parent state index for the test state. + MockTransitionState mock_state; + constexpr int kParentBeamIndex = 1138; + EXPECT_CALL(mock_state, GetBeamIndex()) + .WillRepeatedly(Return(kParentBeamIndex)); + + auto test_state = CreateState(); + test_state->Init(mock_state); + EXPECT_EQ(test_state->ParentBeamIndex(), kParentBeamIndex); +} + +// This test ensures that the initializing state's current score is saved +// as the current score of the state being initialized. +TEST_F(SyntaxNetTransitionStateTest, InitializationCopiesParentScore) { + // Create a mock transition state that wil report a specific current index. + // This index should become the parent state index for the test state. + MockTransitionState mock_state; + constexpr float kParentScore = 24.12; + EXPECT_CALL(mock_state, GetScore()).WillRepeatedly(Return(kParentScore)); + + auto test_state = CreateState(); + test_state->Init(mock_state); + EXPECT_EQ(test_state->GetScore(), kParentScore); +} + +// This test ensures that calling Clone maintains the state data (parent beam +// index, beam index, score, etc.) of the state that was cloned. +TEST_F(SyntaxNetTransitionStateTest, CloningMaintainsState) { + // Create and initialize the state-> + MockTransitionState mock_state; + constexpr int kParentBeamIndex = 1138; + EXPECT_CALL(mock_state, GetBeamIndex()) + .WillRepeatedly(Return(kParentBeamIndex)); + auto test_state = CreateState(); + test_state->Init(mock_state); + + // Validate the internal state of the test state. + constexpr float kOldScore = 20.0; + test_state->SetScore(kOldScore); + EXPECT_EQ(test_state->GetScore(), kOldScore); + constexpr int kOldBeamIndex = 12; + test_state->SetBeamIndex(kOldBeamIndex); + EXPECT_EQ(test_state->GetBeamIndex(), kOldBeamIndex); + + auto clone = test_state->Clone(); + + // The clone should have identical state to the old state. + EXPECT_EQ(clone->ParentBeamIndex(), kParentBeamIndex); + EXPECT_EQ(clone->GetScore(), kOldScore); + EXPECT_EQ(clone->GetBeamIndex(), kOldBeamIndex); +} + +// Validates the consistency of the step_for_token setter and getter. +TEST_F(SyntaxNetTransitionStateTest, CanSetAndGetStepForToken) { + // Create and initialize a test state. + MockTransitionState mock_state; + auto test_state = CreateState(); + test_state->Init(mock_state); + + constexpr int kStepForTokenZero = 12; + constexpr int kStepForTokenTwo = 34; + test_state->set_step_for_token(0, kStepForTokenZero); + test_state->set_step_for_token(2, kStepForTokenTwo); + + // Expect that the set tokens return values and the unset steps return the + // default. + constexpr int kDefaultValue = -1; + EXPECT_EQ(kStepForTokenZero, test_state->step_for_token(0)); + EXPECT_EQ(kDefaultValue, test_state->step_for_token(1)); + EXPECT_EQ(kStepForTokenTwo, test_state->step_for_token(2)); + + // Expect that out of bound accesses will return the default. (There are only + // 3 tokens in the backing sentence, so token 3 and greater are out of bound.) + EXPECT_EQ(kDefaultValue, test_state->step_for_token(-1)); + EXPECT_EQ(kDefaultValue, test_state->step_for_token(3)); +} + +// Validates the consistency of the parent_step_for_token setter and getter. +TEST_F(SyntaxNetTransitionStateTest, CanSetAndGetParentStepForToken) { + // Create and initialize a test state. + MockTransitionState mock_state; + auto test_state = CreateState(); + test_state->Init(mock_state); + + constexpr int kStepForTokenZero = 12; + constexpr int kStepForTokenTwo = 34; + test_state->set_parent_step_for_token(0, kStepForTokenZero); + test_state->set_parent_step_for_token(2, kStepForTokenTwo); + + // Expect that the set tokens return values and the unset steps return the + // default. + constexpr int kDefaultValue = -1; + EXPECT_EQ(kStepForTokenZero, test_state->parent_step_for_token(0)); + EXPECT_EQ(kDefaultValue, test_state->parent_step_for_token(1)); + EXPECT_EQ(kStepForTokenTwo, test_state->parent_step_for_token(2)); + + // Expect that out of bound accesses will return the default. (There are only + // 3 tokens in the backing sentence, so token 3 and greater are out of bound.) + EXPECT_EQ(kDefaultValue, test_state->parent_step_for_token(-1)); + EXPECT_EQ(kDefaultValue, test_state->parent_step_for_token(3)); +} + +// Validates the consistency of the parent_for_token setter and getter. +TEST_F(SyntaxNetTransitionStateTest, CanSetAndGetParentForToken) { + // Create and initialize a test state. + MockTransitionState mock_state; + auto test_state = CreateState(); + test_state->Init(mock_state); + + constexpr int kParentForTokenZero = 12; + constexpr int kParentForTokenTwo = 34; + test_state->set_parent_for_token(0, kParentForTokenZero); + test_state->set_parent_for_token(2, kParentForTokenTwo); + + // Expect that the set tokens return values and the unset steps return the + // default. + constexpr int kDefaultValue = -1; + EXPECT_EQ(kParentForTokenZero, test_state->parent_for_token(0)); + EXPECT_EQ(kDefaultValue, test_state->parent_for_token(1)); + EXPECT_EQ(kParentForTokenTwo, test_state->parent_for_token(2)); + + // Expect that out of bound accesses will return the default. (There are only + // 3 tokens in the backing sentence, so token 3 and greater are out of bound.) + EXPECT_EQ(kDefaultValue, test_state->parent_for_token(-1)); + EXPECT_EQ(kDefaultValue, test_state->parent_for_token(3)); +} + +// Validates the consistency of trace proto setter/getter. +TEST_F(SyntaxNetTransitionStateTest, CanSetAndGetTrace) { + // Create and initialize a test state. + MockTransitionState mock_state; + auto test_state = CreateState(); + test_state->Init(mock_state); + + const string kTestComponentName = "test"; + std::unique_ptr trace; + trace.reset(new ComponentTrace()); + trace->set_name(kTestComponentName); + test_state->set_trace(std::move(trace)); + + EXPECT_EQ(trace.get(), nullptr); + EXPECT_EQ(test_state->mutable_trace()->name(), kTestComponentName); + + // Should be preserved when cloing. + auto cloned_state = test_state->Clone(); + EXPECT_EQ(cloned_state->mutable_trace()->name(), kTestComponentName); + EXPECT_EQ(test_state->mutable_trace()->name(), kTestComponentName); +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/components/syntaxnet/testdata/master_spec.textproto b/syntaxnet/dragnn/components/syntaxnet/testdata/master_spec.textproto new file mode 100644 index 0000000000000000000000000000000000000000..ffd87c44d96ac93782c5c40e8688993c96309040 --- /dev/null +++ b/syntaxnet/dragnn/components/syntaxnet/testdata/master_spec.textproto @@ -0,0 +1,48 @@ +component { + name: "parser" + transition_system { + registered_name: "arc-standard" + } + resource { + name: 'label-map' + part { + file_pattern: 'syntaxnet-tagger.label-map' + file_format: 'text' + } + } + resource { + name: 'tag-map' + part { + file_pattern: 'syntaxnet-tagger.tag-map' + file_format: 'text' + } + } + fixed_feature { + name: "tags" + fml: "input.tag input(1).tag" + embedding_dim: 32 + vocabulary_size: 46 + size: 2 + predicate_map: "hashed" + } + fixed_feature { + name: "tags" + fml: "input(-1).tag input.tag input(1).tag" + embedding_dim: 32 + vocabulary_size: 46 + size: 3 + predicate_map: "hashed" + } + linked_feature { + name: "recurrent_stack" + fml: "stack.focus stack(1).focus" + embedding_dim: 32 + size: 2 + source_component: "parser" + source_translator: "identity" + source_layer: "hidden_0" + } + backend { + registered_name: "SyntaxNetComponent" + } +} diff --git a/syntaxnet/dragnn/components/syntaxnet/testdata/syntaxnet-tagger.label-map b/syntaxnet/dragnn/components/syntaxnet/testdata/syntaxnet-tagger.label-map new file mode 100644 index 0000000000000000000000000000000000000000..8fdd1fc86d9f33e2e639d794bb2b719a0767bc75 --- /dev/null +++ b/syntaxnet/dragnn/components/syntaxnet/testdata/syntaxnet-tagger.label-map @@ -0,0 +1,47 @@ +46 +punct 243160 +prep 194627 +pobj 186958 +det 170592 +nsubj 144821 +nn 144800 +amod 117242 +ROOT 90592 +dobj 88551 +aux 76523 +advmod 72893 +conj 59384 +cc 57532 +num 36350 +poss 35117 +dep 34986 +ccomp 29470 +cop 25991 +mark 25141 +xcomp 25111 +rcmod 16234 +auxpass 15740 +advcl 14996 +possessive 14866 +nsubjpass 14133 +pcomp 12488 +appos 11112 +partmod 11106 +neg 11090 +number 10658 +prt 7123 +quantmod 6653 +tmod 5418 +infmod 5134 +npadvmod 3213 +parataxis 3012 +mwe 2793 +expl 2712 +iobj 1642 +acomp 1632 +discourse 1381 +csubj 1225 +predet 1160 +preconj 749 +goeswith 146 +csubjpass 41 diff --git a/syntaxnet/dragnn/components/syntaxnet/testdata/syntaxnet-tagger.master-spec b/syntaxnet/dragnn/components/syntaxnet/testdata/syntaxnet-tagger.master-spec new file mode 100644 index 0000000000000000000000000000000000000000..03305f17d1e3995869773e4e3bec9b60368f195a --- /dev/null +++ b/syntaxnet/dragnn/components/syntaxnet/testdata/syntaxnet-tagger.master-spec @@ -0,0 +1,65 @@ +component { + name: "tagger" + num_actions : 49 + transition_system { + registered_name: "tagger" + parameters { + key: "join_category_to_pos" + value: "true" + } + } + resource { + name: "tag-map" + part { + file_pattern: "TESTDATA/syntaxnet-tagger.tag-map" + file_format: "text" + } + } + resource { + name: "word-map" + part { + file_pattern: "TESTDATA/syntaxnet-tagger.word-map" + file_format: "text" + } + } + resource { + name: "label-map" + part { + file_pattern: "TESTDATA/syntaxnet-tagger.label-map" + file_format: "text" + } + } + fixed_feature { + name: "words" + fml: "input(-1).word input(-2).word input(-3).word input.word input(1).word input(2).word input(3).word" + embedding_dim: 64 + vocabulary_size: 39397 + size: 7 + } + fixed_feature { + name: "words" + fml: "input(-3).word input.word input(1).word input(2).word input(3).word" + embedding_dim: 64 + vocabulary_size: 39397 + size: 5 + } + linked_feature { + name: "rnn" + fml: "stack.focus" + embedding_dim: 32 + size: 1 + source_component: "tagger" + source_translator: "shift-reduce-step" + source_layer: "layer_0" + } + backend { + registered_name: "SyntaxNetComponent" + } + network_unit { + registered_name: 'feed-forward' + parameters { + key: 'hidden_layer_sizes' + value: '64' + } + } +} diff --git a/syntaxnet/dragnn/components/syntaxnet/testdata/syntaxnet-tagger.tag-map b/syntaxnet/dragnn/components/syntaxnet/testdata/syntaxnet-tagger.tag-map new file mode 100644 index 0000000000000000000000000000000000000000..2cad1a73b010ace29854dc80296c79728e9b3c52 --- /dev/null +++ b/syntaxnet/dragnn/components/syntaxnet/testdata/syntaxnet-tagger.tag-map @@ -0,0 +1,50 @@ +49 +NN 285194 +IN 228165 +DT 179147 +NNP 175147 +JJ 125667 +NNS 115732 +, 97481 +. 85938 +RB 78513 +VB 63952 +CC 57554 +VBD 56635 +CD 55674 +PRP 55244 +VBZ 48126 +VBN 44458 +VBG 34524 +VBP 33669 +TO 28772 +MD 22364 +PRP$ 20706 +HYPH 18526 +POS 14905 +`` 12193 +'' 12154 +WDT 10267 +: 8713 +$ 7993 +WP 7336 +RP 7335 +WRB 6634 +JJR 6295 +NNPS 5917 +-RRB- 3904 +-LRB- 3840 +JJS 3596 +RBR 3186 +EX 2733 +UH 1521 +RBS 1467 +PDT 1271 +FW 928 +NFP 844 +SYM 652 +ADD 476 +LS 392 +WP$ 332 +GW 184 +AFX 42 diff --git a/syntaxnet/dragnn/components/syntaxnet/testdata/syntaxnet-tagger.word-map b/syntaxnet/dragnn/components/syntaxnet/testdata/syntaxnet-tagger.word-map new file mode 100644 index 0000000000000000000000000000000000000000..86cc301ae201004b586d87dd28c71d7df6e9788a --- /dev/null +++ b/syntaxnet/dragnn/components/syntaxnet/testdata/syntaxnet-tagger.word-map @@ -0,0 +1,4 @@ +3 +Sentence 4 +. 3 +0 2 diff --git a/syntaxnet/dragnn/components/util/BUILD b/syntaxnet/dragnn/components/util/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..e6400fa7c37b480fa20eff7258f92ef2c6510843 --- /dev/null +++ b/syntaxnet/dragnn/components/util/BUILD @@ -0,0 +1,9 @@ +package(default_visibility = ["//visibility:public"]) + +cc_library( + name = "bulk_feature_extractor", + hdrs = ["bulk_feature_extractor.h"], + deps = [ + "@org_tensorflow//tensorflow/core:lib", + ], +) diff --git a/syntaxnet/dragnn/components/util/bulk_feature_extractor.h b/syntaxnet/dragnn/components/util/bulk_feature_extractor.h new file mode 100644 index 0000000000000000000000000000000000000000..cce897d73be10317a14fd087c30ae77726c684ee --- /dev/null +++ b/syntaxnet/dragnn/components/util/bulk_feature_extractor.h @@ -0,0 +1,95 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_COMPONENTS_UTIL_BULK_FEATURE_EXTRACTOR_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_COMPONENTS_UTIL_BULK_FEATURE_EXTRACTOR_H_ + +#include +#include +#include "tensorflow/core/platform/types.h" + +namespace syntaxnet { +namespace dragnn { + +// Provides a wrapper for allocator functions and padding data for the Bulk +// ExtractFixedFeatures operation. +class BulkFeatureExtractor { + public: + // Create a BulkFeatureExtractor with the given allocator functions and + // padding. The allocator functions should take a channel and an element + // count and return a contigous block of memory that is associated with that + // channel (the caller can decide what that means). If use_padding is true, + // the provided pad_to_step and pad_to_element will be used to calculate + // the ID size. + BulkFeatureExtractor( + std::function + allocate_indices_by_channel, + std::function + allocate_ids_by_channel, + std::function + allocate_weights_by_channel, + bool use_padding, int pad_to_step, int pad_to_element) + : use_padding_(use_padding), + pad_to_step_(pad_to_step), + pad_to_element_(pad_to_element), + allocate_indices_by_channel_(std::move(allocate_indices_by_channel)), + allocate_ids_by_channel_(std::move(allocate_ids_by_channel)), + allocate_weights_by_channel_(std::move(allocate_weights_by_channel)) {} + + // Create a BulkFeatureExtractor with allocator functions as above, but with + // use_padding set to False. Useful when you know your caller will never + // need to pad. + BulkFeatureExtractor( + std::function + allocate_indices_by_channel, + std::function + allocate_ids_by_channel, + std::function + allocate_weights_by_channel) + : use_padding_(false), + pad_to_step_(-1), + pad_to_element_(-1), + allocate_indices_by_channel_(std::move(allocate_indices_by_channel)), + allocate_ids_by_channel_(std::move(allocate_ids_by_channel)), + allocate_weights_by_channel_(std::move(allocate_weights_by_channel)) {} + + // Invoke the index memory allocator. + tensorflow::int32 *AllocateIndexMemory(int channel, int num_elements) const { + return allocate_indices_by_channel_(channel, num_elements); + } + + // Invoke the ID memory allocator. + tensorflow::int64 *AllocateIdMemory(int channel, int num_elements) const { + return allocate_ids_by_channel_(channel, num_elements); + } + + // Invoke the weight memory allocator. + float *AllocateWeightMemory(int channel, int num_elements) const { + return allocate_weights_by_channel_(channel, num_elements); + } + + // Given the total number of steps and total number of elements for a given + // feature, calculate the index (not ID) of that feature. Based on how the + // BulkFeatureExtractor was constructed, it may use the given number of steps + // and number of elements, or it may use the passed padded number. + int GetIndex(int total_steps, int num_elements, int feature_idx, + int element_idx, int step_idx) const { + const int steps = (use_padding_) ? pad_to_step_ : total_steps; + const int elements = (use_padding_) ? pad_to_element_ : num_elements; + const int feature_offset = elements * steps; + const int element_offset = steps; + return (feature_idx * feature_offset) + (element_idx * element_offset) + + step_idx; + } + + private: + const bool use_padding_; + const int pad_to_step_; + const int pad_to_element_; + const std::function + allocate_indices_by_channel_; + const std::function allocate_ids_by_channel_; + const std::function allocate_weights_by_channel_; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_COMPONENTS_UTIL_BULK_FEATURE_EXTRACTOR_H_ diff --git a/syntaxnet/dragnn/conll2017/BUILD b/syntaxnet/dragnn/conll2017/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..ead13b1696fa140ffb97a46041971639ab59196a --- /dev/null +++ b/syntaxnet/dragnn/conll2017/BUILD @@ -0,0 +1,9 @@ +py_binary( + name = "make_parser_spec", + srcs = ["make_parser_spec.py"], + deps = [ + "//dragnn/protos:spec_py_pb2", + "//dragnn/python:spec_builder", + "@org_tensorflow//tensorflow:tensorflow_py", + ], +) diff --git a/syntaxnet/dragnn/conll2017/conll_parser_trainer.sh b/syntaxnet/dragnn/conll2017/conll_parser_trainer.sh new file mode 100755 index 0000000000000000000000000000000000000000..d08d035f811ca28df47c7390effb919f28e8b332 --- /dev/null +++ b/syntaxnet/dragnn/conll2017/conll_parser_trainer.sh @@ -0,0 +1,40 @@ +#!/bin/sh +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# A script to train the CONLL2017 baseline. +set -e + +language=English +output_dir=./trained-"$language" + +training_corpus=$1 +dev_corpus=$2 + +bazel build -c opt //dragnn/tools:trainer //dragnn/conll2017:make_parser_spec + +mkdir -p $output_dir +bazel-bin/dragnn/conll2017/make_parser_spec \ + --spec_file="$output_dir/parser_spec.textproto" + +bazel-bin/dragnn/tools/trainer \ + --logtostderr \ + --compute_lexicon \ + --dragnn_spec="$output_dir/parser_spec.textproto" \ + --resource_path="$output_dir/resources" \ + --training_corpus_path="$training_corpus" \ + --tune_corpus_path="$dev_corpus" \ + --tensorboard_dir="$output_dir/tensorboard" \ + --checkpoint_filename="$output_dir/checkpoint.model" diff --git a/syntaxnet/dragnn/conll2017/make_parser_spec.py b/syntaxnet/dragnn/conll2017/make_parser_spec.py new file mode 100644 index 0000000000000000000000000000000000000000..3dc69d1e39fafa180327cd149bc98e0fc7ef5e69 --- /dev/null +++ b/syntaxnet/dragnn/conll2017/make_parser_spec.py @@ -0,0 +1,105 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Construct the spec for the CONLL2017 Parser baseline.""" + +import tensorflow as tf + +from tensorflow.python.platform import gfile + +from dragnn.protos import spec_pb2 +from dragnn.python import spec_builder + +flags = tf.app.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string('spec_file', 'parser_spec.textproto', + 'Filename to save the spec to.') + + +def main(unused_argv): + # Left-to-right, character-based LSTM. + char2word = spec_builder.ComponentSpecBuilder('char_lstm') + char2word.set_network_unit( + name='wrapped_units.LayerNormBasicLSTMNetwork', + hidden_layer_sizes='256') + char2word.set_transition_system(name='char-shift-only', left_to_right='true') + char2word.add_fixed_feature(name='chars', fml='char-input.text-char', + embedding_dim=16) + + # Lookahead LSTM reads right-to-left to represent the rightmost context of the + # words. It gets word embeddings from the char model. + lookahead = spec_builder.ComponentSpecBuilder('lookahead') + lookahead.set_network_unit( + name='wrapped_units.LayerNormBasicLSTMNetwork', + hidden_layer_sizes='256') + lookahead.set_transition_system(name='shift-only', left_to_right='false') + lookahead.add_link(source=char2word, fml='input.last-char-focus', + embedding_dim=64) + + # Construct the tagger. This is a simple left-to-right LSTM sequence tagger. + tagger = spec_builder.ComponentSpecBuilder('tagger') + tagger.set_network_unit( + name='wrapped_units.LayerNormBasicLSTMNetwork', + hidden_layer_sizes='256') + tagger.set_transition_system(name='tagger') + tagger.add_token_link(source=lookahead, fml='input.focus', embedding_dim=64) + + # Construct the parser. + parser = spec_builder.ComponentSpecBuilder('parser') + parser.set_network_unit(name='FeedForwardNetwork', hidden_layer_sizes='256', + layer_norm_hidden='true') + parser.set_transition_system(name='arc-standard') + parser.add_token_link(source=lookahead, fml='input.focus', embedding_dim=64) + parser.add_token_link( + source=tagger, fml='input.focus stack.focus stack(1).focus', + embedding_dim=64) + + # Add discrete features of the predicted parse tree so far, like in Parsey + # McParseface. + parser.add_fixed_feature(name='labels', embedding_dim=16, + fml=' '.join([ + 'stack.child(1).label', + 'stack.child(1).sibling(-1).label', + 'stack.child(-1).label', + 'stack.child(-1).sibling(1).label', + 'stack(1).child(1).label', + 'stack(1).child(1).sibling(-1).label', + 'stack(1).child(-1).label', + 'stack(1).child(-1).sibling(1).label', + 'stack.child(2).label', + 'stack.child(-2).label', + 'stack(1).child(2).label', + 'stack(1).child(-2).label'])) + + # Recurrent connection for the arc-standard parser. For both tokens on the + # stack, we connect to the last time step to either SHIFT or REDUCE that + # token. This allows the parser to build up compositional representations of + # phrases. + parser.add_link( + source=parser, # recurrent connection + name='rnn-stack', # unique identifier + fml='stack.focus stack(1).focus', # look for both stack tokens + source_translator='shift-reduce-step', # maps token indices -> step + embedding_dim=64) # project down to 64 dims + + master_spec = spec_pb2.MasterSpec() + master_spec.component.extend( + [char2word.spec, lookahead.spec, tagger.spec, parser.spec]) + + with gfile.FastGFile(FLAGS.spec_file, 'w') as f: + f.write(str(master_spec).encode('utf-8')) + +if __name__ == '__main__': + tf.app.run() diff --git a/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/category-map b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/category-map new file mode 100644 index 0000000000000000000000000000000000000000..4a23ecdbaf16beec0851882f533ab666dd882e02 --- /dev/null +++ b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/category-map @@ -0,0 +1,16 @@ +15 +NOUN 25758 +VERB 14242 +PUNCT 12945 +PART 9977 +PROPN 8280 +NUM 5082 +ADV 4323 +ADP 4165 +ADJ 2318 +AUX 2024 +PRON 1343 +CCONJ 1329 +DET 994 +X 948 +SYM 25 diff --git a/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/char-map b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/char-map new file mode 100644 index 0000000000000000000000000000000000000000..d86506634dcc85078405f3373bbe34e4a6d31391 --- /dev/null +++ b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/char-map @@ -0,0 +1,3518 @@ +3517 +9 8220 +, 5926 +的 4320 +. 3945 +年 1387 +在 1372 +一 1331 +是 1254 +為 1153 +國 1092 +中 1087 +人 1010 +、 954 +有 890 +於 849 +大 774 +和 741 +了 672 +他 604 +以 563 +時 563 +不 554 +日 550 +個 538 +學 529 +上 516 +地 513 +後 501 +成 498 +會 495 +月 487 +出 443 +( 431 +) 431 +部 424 +生 410 +公 408 +到 407 +與 405 +行 404 +這 401 +發 389 +之 388 +作 378 +方 378 +家 375 +用 371 +其 361 +主 346 +e 344 +斯 341 +來 337 +由 335 +也 331 +多 331 +而 326 +西 324 +」 321 +分 321 +「 320 +物 319 +被 319 +位 318 +名 318 +a 316 +區 316 +同 311 +對 310 +法 305 +第 303 +下 300 +最 300 +並 298 +軍 298 +及 297 +可 291 +本 286 +此 286 +民 285 +長 284 +自 282 +要 280 +子 271 +爾 270 +開 269 +現 268 +文 267 +海 266 +過 265 +因 263 +i 260 +動 259 +市 259 +政 251 +新 251 +高 251 +n 250 +當 250 +美 250 +o 248 +代 248 +戰 247 +特 247 +前 246 +立 245 +r 242 +世 242 +能 241 +小 240 +道 240 +間 240 +事 239 +建 239 +理 236 +亞 234 +任 234 +體 234 +所 231 +教 230 +等 230 +面 230 +工 228 +得 228 +種 228 +加 227 +里 225 +東 224 +業 224 +經 224 +進 223 +但 222 +內 222 +利 221 +平 220 +度 219 +電 217 +機 216 +南 215 +使 214 +《 213 +》 213 +次 213 +該 210 +期 209 +三 207 +兩 206 +重 206 +北 204 +外 204 +者 203 +路 203 +車 203 +入 200 +化 198 +通 197 +們 196 +員 196 +天 196 +德 196 +場 194 +定 192 +球 191 +表 187 +克 186 +l 183 +t 183 +· 183 +總 183 +都 183 +合 180 +常 180 +口 177 +稱 177 +隊 177 +全 176 +州 176 +性 176 +至 176 +將 175 +二 173 +相 172 +力 168 +山 167 +馬 166 +或 165 +城 164 +華 164 +數 163 +星 163 +羅 162 +院 162 +科 161 +比 160 +目 160 +水 159 +共 158 +式 158 +原 156 +s 155 +些 155 +始 155 +如 154 +明 154 +格 154 +聯 154 +台 153 +士 153 +達 153 +王 152 +米 151 +著 150 +設 150 +就 149 +起 148 +然 147 +統 146 +關 146 +約 145 +英 145 +樂 144 +治 143 +說 142 +則 141 +線 141 +賽 141 +從 140 +交 139 +巴 139 +女 138 +已 138 +拉 138 +產 138 +- 137 +十 137 +布 137 +站 137 +色 136 +手 135 +省 135 +安 134 +心 133 +選 133 +元 132 +正 132 +司 131 +港 131 +議 130 +金 130 +受 129 +接 129 +無 129 +府 128 +影 128 +情 127 +運 127 +量 126 +意 125 +蘭 125 +保 123 +直 123 +她 122 +形 122 +基 121 +實 121 +尼 120 +界 119 +認 119 +務 118 +資 118 +類 118 +指 117 +提 117 +鎮 117 +帝 116 +書 116 +組 116 +義 116 +身 116 +制 115 +管 113 +型 112 +帶 112 +朝 112 +維 112 +號 112 +計 112 +傳 111 +各 111 +展 111 +很 111 +曾 111 +結 111 +u 110 +四 110 +林 110 +品 109 +沒 108 +流 107 +系 107 +鐵 107 +門 107 +古 106 +屬 106 +校 105 +視 105 +五 104 +更 104 +h 103 +廣 103 +河 102 +包 101 +改 101 +社 101 +近 101 +音 101 +領 101 +列 100 +島 100 +信 99 +演 99 +向 98 +它 98 +據 98 +語 98 +獲 97 +香 97 +C 96 +神 96 +紀 96 +還 96 +首 96 +太 95 +級 95 +縣 95 +造 95 +字 94 +空 94 +製 94 +擊 93 +權 93 +親 93 +A 92 +導 92 +決 92 +節 92 +局 91 +條 91 +每 91 +活 91 +程 91 +術 91 +變 91 +村 90 +命 89 +族 89 +集 89 +: 88 +普 88 +積 88 +點 88 +反 87 +取 87 +江 87 +研 87 +團 86 +往 86 +爭 86 +參 85 +洲 85 +遊 85 +阿 85 +c 84 +去 84 +持 84 +清 84 +論 84 +S 83 +只 83 +單 83 +師 83 +故 83 +波 83 +萬 83 +究 82 +納 82 +; 81 +d 81 +兒 81 +史 81 +園 81 +圖 81 +少 81 +應 81 +灣 81 +非 81 +別 80 +回 80 +己 80 +果 80 +死 80 +示 80 +先 79 +收 79 +航 79 +京 78 +專 78 +強 78 +想 78 +李 78 +版 78 +給 78 +解 78 +m 77 +器 77 +母 77 +處 77 +角 77 +頭 77 +黨 77 +求 76 +片 76 +龍 76 +卡 75 +支 75 +畫 75 +白 75 +知 75 +光 74 +服 74 +武 74 +歷 74 +皇 74 +蘇 74 +言 74 +記 74 +許 74 +超 74 +再 73 +放 73 +風 73 +份 72 +伊 72 +初 72 +夫 72 +離 72 +威 71 +引 71 +張 71 +技 71 +氣 71 +父 71 +病 71 +石 71 +終 71 +較 71 +供 70 +創 70 +好 70 +括 70 +湖 70 +亦 69 +即 69 +標 69 +樣 69 +漢 69 +育 69 +食 69 +功 68 +助 68 +又 68 +奧 68 +層 68 +根 68 +舉 68 +戲 67 +劇 66 +商 66 +官 66 +易 66 +班 66 +眾 66 +見 66 +轉 66 +隨 66 +題 66 +另 65 +攻 65 +職 65 +調 65 +B 64 +件 64 +域 64 +居 64 +希 64 +M 63 +具 63 +存 63 +愛 63 +我 63 +歐 63 +興 63 +速 63 +那 63 +八 62 +容 62 +觀 62 +問 61 +委 61 +播 61 +段 61 +老 61 +查 60 +構 60 +派 60 +裡 60 +陸 60 +際 60 +% 59 +p 59 +y 59 +印 59 +報 59 +環 59 +辦 59 +且 58 +便 58 +客 58 +密 58 +街 58 +陽 58 +P 57 +g 57 +堂 57 +料 57 +洛 57 +火 57 +男 57 +薩 57 +足 57 +博 56 +境 56 +態 56 +打 56 +曼 56 +營 56 +連 56 +醫 56 +低 55 +劃 55 +均 55 +感 55 +築 55 +仍 54 +佛 54 +像 54 +協 54 +哥 54 +座 54 +擔 54 +未 54 +歌 54 +牙 54 +真 54 +精 54 +聲 54 +致 54 +花 54 +裝 54 +責 54 +館 54 +律 53 +思 53 +推 53 +案 53 +深 53 +熱 53 +獨 53 +般 53 +需 53 +令 52 +修 52 +季 52 +把 52 +整 52 +核 52 +歲 52 +編 52 +蒙 52 +評 52 +話 52 +負 52 +轄 52 +黑 52 +佔 51 +千 51 +失 51 +木 51 +酒 51 +倫 50 +土 50 +室 50 +幾 50 +採 50 +早 50 +模 50 +源 50 +百 50 +織 50 +藝 50 +質 50 +錄 50 +項 50 +吉 49 +哈 49 +增 49 +完 49 +投 49 +曲 49 +樓 49 +步 49 +溫 49 +狀 49 +留 49 +置 49 +聖 49 +船 49 +規 49 +I 48 +T 48 +升 48 +塔 48 +宣 48 +沙 48 +網 48 +護 48 +讓 48 +邊 48 +除 48 +飛 48 +伯 47 +價 47 +率 47 +福 47 +素 47 +試 47 +遠 47 +響 47 +D 46 +九 46 +半 46 +卻 46 +圍 46 +宗 46 +席 46 +底 46 +濟 46 +考 46 +越 46 +農 46 +依 45 +寫 45 +射 45 +庫 45 +復 45 +承 45 +望 45 +殺 45 +確 45 +繼 45 +魯 45 +候 44 +備 44 +兵 44 +友 44 +姆 44 +才 44 +控 44 +止 44 +盟 44 +看 44 +督 44 +續 44 +習 44 +落 44 +藏 44 +鄉 44 +頓 44 +G 43 +N 43 +R 43 +例 43 +係 43 +典 43 +恩 43 +房 43 +瓦 43 +紅 43 +衛 43 +象 43 +住 42 +做 42 +六 42 +寺 42 +敗 42 +洋 42 +銀 42 +勢 41 +抗 41 +瑞 41 +群 41 +識 41 +雖 41 +麗 41 +今 40 +俄 40 +周 40 +塞 40 +滿 40 +甚 40 +田 40 +紐 40 +絕 40 +貝 40 +億 39 +告 39 +息 39 +毛 39 +測 39 +澳 39 +獎 39 +移 39 +費 39 +限 39 +驗 39 +魚 39 +E 38 +k 38 +久 38 +唱 38 +堡 38 +守 38 +必 38 +排 38 +旗 38 +索 38 +降 38 +預 38 +丹 37 +效 37 +施 37 +極 37 +況 37 +泰 37 +細 37 +艦 37 +雙 37 +雷 37 +青 37 +七 36 +察 36 +屆 36 +庭 36 +康 36 +慶 36 +換 36 +擁 36 +氏 36 +準 36 +童 36 +邦 36 +F 35 +H 35 +v 35 +何 35 +冠 35 +勒 35 +壓 35 +念 35 +短 35 +碼 35 +突 35 +述 35 +逐 35 +附 35 +顯 35 +黎 35 +傷 34 +富 34 +批 34 +景 34 +讀 34 +值 33 +停 33 +僅 33 +破 33 +胡 33 +舊 33 +走 33 +遺 33 +鏡 33 +難 33 +韓 33 +鬥 33 +L 32 +b 32 +勞 32 +善 32 +奇 32 +拔 32 +植 32 +橋 32 +烈 32 +異 32 +策 32 +算 32 +耳 32 +艾 32 +萊 32 +貓 32 +退 32 +雲 32 +革 32 +頻 32 +養 32 +餘 32 +互 31 +嚴 31 +埃 31 +央 31 +屋 31 +春 31 +架 31 +津 31 +瑪 31 +略 31 +範 31 +腦 31 +莫 31 +葉 31 +警 31 +距 31 +遭 31 +配 31 +鮮 31 +麼 31 +黃 31 +丁 30 +乘 30 +川 30 +店 30 +役 30 +托 30 +拿 30 +斷 30 +監 30 +票 30 +靈 30 +駐 30 +麥 30 +齒 30 +寶 29 +差 29 +快 29 +映 29 +油 29 +盛 29 +藥 29 +血 29 +請 29 +證 29 +迪 29 +防 29 +須 29 +似 28 +判 28 +唐 28 +婚 28 +找 28 +消 28 +爆 28 +益 28 +禮 28 +肉 28 +菲 28 +輯 28 +亡 27 +併 27 +優 27 +冰 27 +切 27 +售 27 +唯 27 +啟 27 +執 27 +幫 27 +廷 27 +弗 27 +徵 27 +材 27 +森 27 +永 27 +登 27 +臨 27 +覺 27 +送 27 +J 26 +予 26 +介 26 +免 26 +劉 26 +宮 26 +封 26 +左 26 +延 26 +朗 26 +漸 26 +照 26 +牛 26 +畢 26 +簡 26 +署 26 +股 26 +良 26 +詞 26 +財 26 +貨 26 +趙 26 +輸 26 +隆 26 +階 26 +險 26 +雄 26 +雜 26 +企 25 +刻 25 +副 25 +右 25 +堅 25 +夠 25 +妻 25 +徒 25 +懷 25 +戶 25 +束 25 +榮 25 +歸 25 +滅 25 +烏 25 +猶 25 +療 25 +秘 25 +租 25 +聚 25 +聞 25 +靠 25 +頂 25 +K 24 +O 24 +仁 24 +侵 24 +吃 24 +吸 24 +夏 24 +尾 24 +廳 24 +授 24 +救 24 +柏 24 +毒 24 +炸 24 +獻 24 +玩 24 +珠 24 +甲 24 +章 24 +端 24 +藍 24 +詩 24 +貴 24 +輕 24 +順 24 +願 24 +f 23 +側 23 +冷 23 +址 23 +害 23 +尋 23 +帕 23 +廟 23 +弟 23 +摩 23 +擴 23 +槍 23 +漫 23 +甘 23 +皮 23 +秀 23 +胞 23 +臘 23 +芬 23 +草 23 +訊 23 +誌 23 +諾 23 +遇 23 +避 23 +鄭 23 +w 22 +假 22 +充 22 +固 22 +坡 22 +寬 22 +岸 22 +彈 22 +微 22 +恐 22 +旅 22 +松 22 +梅 22 +泛 22 +激 22 +疾 22 +眼 22 +祖 22 +篇 22 +蓋 22 +輛 22 +迷 22 +透 22 +陳 22 +鳥 22 +' 21 +凱 21 +劍 21 +勝 21 +叫 21 +君 21 +暴 21 +樹 21 +減 21 +湯 21 +潛 21 +爵 21 +缺 21 +臺 21 +蒂 21 +虎 21 +賓 21 +適 21 +雪 21 +W 20 +乎 20 +什 20 +召 20 +含 20 +喬 20 +奪 20 +妃 20 +孫 20 +序 20 +廈 20 +揮 20 +援 20 +攝 20 +暗 20 +杜 20 +染 20 +檢 20 +毀 20 +池 20 +注 20 +牌 20 +牧 20 +練 20 +罪 20 +若 20 +譯 20 +豐 20 +買 20 +跟 20 +軌 20 +載 20 +追 20 +逝 20 +鄰 20 +銅 20 +陣 20 +隻 20 +/ 19 +V 19 +坦 19 +奏 19 +孩 19 +廠 19 +恆 19 +拒 19 +敵 19 +昌 19 +晚 19 +某 19 +梁 19 +污 19 +涉 19 +熊 19 +秦 19 +綫 19 +艇 19 +補 19 +訂 19 +討 19 +譽 19 +赫 19 +辛 19 +遷 19 +魔 19 +默 19 +你 18 +刺 18 +危 18 +否 18 +呼 18 +哲 18 +喜 18 +嘉 18 +奴 18 +妹 18 +審 18 +尚 18 +尤 18 +幣 18 +待 18 +志 18 +擇 18 +末 18 +桑 18 +汽 18 +澤 18 +競 18 +簽 18 +籍 18 +董 18 +藤 18 +襲 18 +談 18 +辭 18 +逃 18 +遣 18 +郡 18 +酸 18 +釋 18 +野 18 +銷 18 +陵 18 +顆 18 +額 18 +亂 17 +俗 17 +剛 17 +塑 17 +墨 17 +壯 17 +奉 17 +寧 17 +巨 17 +幼 17 +捕 17 +掌 17 +探 17 +損 17 +殖 17 +氧 17 +液 17 +症 17 +秋 17 +粒 17 +絲 17 +耶 17 +茲 17 +莉 17 +莎 17 +藉 17 +衝 17 +訴 17 +誤 17 +購 17 +錦 17 +隸 17 +仙 16 +儀 16 +后 16 +噸 16 +圈 16 +夢 16 +姐 16 +宇 16 +宋 16 +彩 16 +徑 16 +扎 16 +抵 16 +拍 16 +晉 16 +板 16 +柯 16 +棲 16 +楚 16 +款 16 +歡 16 +殿 16 +沿 16 +浦 16 +游 16 +牠 16 +穆 16 +穿 16 +符 16 +綱 16 +羊 16 +苦 16 +茶 16 +虛 16 +衣 16 +複 16 +課 16 +輔 16 +靜 16 +U 15 +亮 15 +休 15 +佈 15 +佐 15 +傑 15 +兼 15 +墓 15 +套 15 +幹 15 +廉 15 +戀 15 +戴 15 +拜 15 +描 15 +散 15 +敦 15 +替 15 +楊 15 +泉 15 +獄 15 +私 15 +純 15 +緊 15 +繪 15 +羽 15 +翻 15 +聽 15 +臣 15 +舍 15 +荷 15 +融 15 +訓 15 +訪 15 +貢 15 +賣 15 +輪 15 +遍 15 +郎 15 +郵 15 +x 14 +亨 14 +享 14 +仔 14 +仰 14 +俱 14 +健 14 +匈 14 +圓 14 +姓 14 +宿 14 +岩 14 +幻 14 +廢 14 +怪 14 +慈 14 +招 14 +按 14 +擬 14 +暫 14 +桃 14 +橫 14 +檔 14 +殊 14 +沃 14 +潮 14 +燃 14 +牆 14 +獵 14 +珍 14 +疑 14 +禁 14 +糖 14 +背 14 +脈 14 +葡 14 +蓮 14 +諸 14 +謀 14 +講 14 +返 14 +錯 14 +鐘 14 +阻 14 +霍 14 +韋 14 +飲 14 +飾 14 +餐 14 +骨 14 +鳳 14 +麻 14 +乾 13 +付 13 +伸 13 +傅 13 +刊 13 +劑 13 +壁 13 +娛 13 +峰 13 +干 13 +弱 13 +惡 13 +揚 13 +撤 13 +操 13 +智 13 +朱 13 +氯 13 +泊 13 +洪 13 +混 13 +災 13 +煙 13 +玉 13 +盃 13 +礎 13 +竹 13 +紙 13 +緩 13 +繞 13 +觸 13 +診 13 +誕 13 +趣 13 +跑 13 +迫 13 +郭 13 +閣 13 +陷 13 +障 13 +雨 13 +騎 13 +齊 13 +丘 12 +佳 12 +偏 12 +儒 12 +儘 12 +匯 12 +午 12 +占 12 +坐 12 +培 12 +夜 12 +媒 12 +宏 12 +征 12 +怖 12 +急 12 +患 12 +惠 12 +拆 12 +敏 12 +杭 12 +梯 12 +棄 12 +浙 12 +溪 12 +濱 12 +犯 12 +琴 12 +申 12 +穩 12 +籃 12 +紹 12 +綜 12 +緣 12 +繁 12 +罕 12 +肯 12 +脅 12 +舞 12 +萄 12 +謝 12 +譜 12 +谷 12 +貫 12 +賀 12 +賈 12 +賞 12 +赤 12 +軟 12 +邨 12 +隱 12 +雅 12 +雕 12 +零 12 +顧 12 +鼠 12 +? 11 +乙 11 +促 11 +冬 11 +努 11 +勳 11 +呈 11 +味 11 +困 11 +填 11 +壞 11 +孔 11 +尺 11 +忠 11 +掉 11 +晶 11 +曹 11 +泥 11 +濕 11 +獸 11 +玄 11 +珊 11 +礙 11 +綠 11 +翼 11 +耕 11 +葛 11 +蔡 11 +蛇 11 +覽 11 +託 11 +迅 11 +途 11 +週 11 +鋒 11 +鋼 11 +龐 11 +z 10 +乃 10 +伍 10 +伴 10 +冊 10 +勵 10 +吳 10 +塊 10 +婦 10 +宅 10 +尊 10 +幕 10 +彼 10 +徐 10 +恢 10 +悲 10 +慕 10 +慢 10 +搜 10 +旁 10 +曉 10 +朋 10 +枚 10 +棒 10 +樞 10 +殘 10 +毫 10 +洗 10 +洞 10 +溝 10 +滑 10 +潘 10 +濃 10 +炎 10 +燒 10 +瓷 10 +礁 10 +紛 10 +績 10 +翠 10 +翰 10 +腳 10 +膠 10 +莊 10 +袖 10 +裁 10 +賴 10 +輻 10 +迎 10 +遜 10 +醒 10 +銘 10 +錫 10 +鍵 10 +隔 10 +隧 10 +震 10 +顏 10 +髮 10 +鹿 10 +井 9 +佩 9 +借 9 +傾 9 +准 9 +刑 9 +叛 9 +句 9 +呂 9 +孕 9 +孟 9 +宜 9 +寵 9 +尖 9 +岳 9 +嶼 9 +巧 9 +幅 9 +幸 9 +徹 9 +戈 9 +拓 9 +捷 9 +掛 9 +擎 9 +敘 9 +昭 9 +枝 9 +棍 9 +概 9 +欣 9 +沖 9 +灰 9 +熟 9 +狼 9 +甸 9 +疫 9 +盜 9 +盤 9 +碳 9 +礦 9 +筆 9 +籌 9 +紋 9 +緬 9 +縱 9 +繫 9 +肅 9 +脫 9 +苗 9 +茨 9 +葬 9 +裔 9 +覆 9 +讚 9 +豬 9 +貿 9 +贊 9 +贏 9 +轟 9 +迴 9 +霸 9 +驅 9 +驚 9 +鯨 9 +鰭 9 +鴻 9 +Y 8 +丈 8 +乏 8 +伏 8 +伐 8 +估 8 +倒 8 +兄 8 +兆 8 +刀 8 +勁 8 +勇 8 +嘗 8 +埔 8 +塘 8 +壽 8 +娃 8 +娜 8 +媽 8 +孤 8 +屯 8 +峽 8 +床 8 +彭 8 +御 8 +慮 8 +憲 8 +懸 8 +截 8 +振 8 +捐 8 +搶 8 +斂 8 +昆 8 +杯 8 +析 8 +柔 8 +柱 8 +柳 8 +柴 8 +棋 8 +榜 8 +氫 8 +沉 8 +浪 8 +渡 8 +滾 8 +潭 8 +瀋 8 +灘 8 +犬 8 +狂 8 +琉 8 +町 8 +皆 8 +盡 8 +盧 8 +睡 8 +碑 8 +糧 8 +翌 8 +聘 8 +肖 8 +芝 8 +荒 8 +蓉 8 +裂 8 +註 8 +詢 8 +賢 8 +趾 8 +跡 8 +跨 8 +跳 8 +迦 8 +遼 8 +邀 8 +鄧 8 +針 8 +錢 8 +閉 8 +露 8 +頜 8 +駛 8 +~ 7 +俘 7 +倍 7 +偉 7 +偶 7 +匹 7 +卑 7 +呎 7 +喀 7 +喇 7 +壇 7 +壘 7 +奈 7 +契 7 +婆 7 +崖 7 +弓 7 +彌 7 +徽 7 +忙 7 +忽 7 +怡 7 +惱 7 +憶 7 +拖 7 +握 7 +搬 7 +撒 7 +撞 7 +擾 7 +斐 7 +旋 7 +既 7 +晨 7 +暖 7 +杉 7 +欖 7 +泳 7 +浮 7 +滬 7 +潔 7 +炮 7 +爪 7 +爬 7 +狐 7 +瑟 7 +癌 7 +硬 7 +碎 7 +磨 7 +祭 7 +稻 7 +籤 7 +粹 7 +緒 7 +胎 7 +舒 7 +艱 7 +蒸 7 +蔣 7 +薦 7 +藩 7 +蛋 7 +衡 7 +誠 7 +諷 7 +趨 7 +躲 7 +違 7 +邏 7 +邱 7 +采 7 +銜 7 +陰 7 +陶 7 +馮 7 +鬆 7 +鬼 7 +魏 7 +鼎 7 +鼓 7 +° 6 +乳 6 +仿 6 +伽 6 +侯 6 +允 6 +冕 6 +凡 6 +劫 6 +勃 6 +募 6 +卿 6 +厚 6 +咖 6 +咸 6 +啡 6 +喚 6 +噴 6 +圳 6 +坊 6 +埋 6 +奮 6 +奶 6 +妖 6 +妥 6 +妮 6 +娘 6 +嫁 6 +嬌 6 +嬴 6 +孜 6 +宙 6 +尉 6 +屍 6 +岡 6 +崇 6 +崎 6 +崗 6 +嶺 6 +巡 6 +帽 6 +弄 6 +怒 6 +惜 6 +慣 6 +扶 6 +折 6 +抽 6 +挖 6 +挪 6 +捉 6 +措 6 +搖 6 +擋 6 +擠 6 +敬 6 +斥 6 +旦 6 +栽 6 +棉 6 +汁 6 +汗 6 +汞 6 +泡 6 +涯 6 +淡 6 +淮 6 +漂 6 +煮 6 +爽 6 +猛 6 +猴 6 +玻 6 +琳 6 +璃 6 +甜 6 +痛 6 +硫 6 +祝 6 +禦 6 +稿 6 +窯 6 +箭 6 +粵 6 +絡 6 +縮 6 +翔 6 +翡 6 +蓬 6 +蕭 6 +薪 6 +蝕 6 +衷 6 +袁 6 +袋 6 +裏 6 +諮 6 +豪 6 +貞 6 +貪 6 +赴 6 +軸 6 +輿 6 +遮 6 +遲 6 +邵 6 +郊 6 +醇 6 +鍋 6 +鎖 6 +鑒 6 +閘 6 +陀 6 +雌 6 +雞 6 +霖 6 +頒 6 +頗 6 +飼 6 +鮑 6 +鯉 6 +鳴 6 +X 5 +『 5 +于 5 +亥 5 +俊 5 +倉 5 +偷 5 +偽 5 +僧 5 +僱 5 +償 5 +儲 5 +凌 5 +券 5 +削 5 +剩 5 +割 5 +勾 5 +卜 5 +叔 5 +吏 5 +吾 5 +喪 5 +喻 5 +嘛 5 +堆 5 +墅 5 +墜 5 +墟 5 +夕 5 +姑 5 +姻 5 +嫌 5 +宰 5 +屠 5 +崔 5 +幽 5 +廂 5 +廊 5 +廖 5 +廚 5 +彙 5 +循 5 +忍 5 +怎 5 +愈 5 +慘 5 +憑 5 +憤 5 +戒 5 +抒 5 +抓 5 +抱 5 +挺 5 +掠 5 +搞 5 +搭 5 +摘 5 +斑 5 +旺 5 +栓 5 +梨 5 +棕 5 +欲 5 +欽 5 +殼 5 +沈 5 +沼 5 +涅 5 +涌 5 +淄 5 +淘 5 +湘 5 +溶 5 +滕 5 +漁 5 +燥 5 +狗 5 +狩 5 +獅 5 +玲 5 +珀 5 +瑚 5 +瓊 5 +畜 5 +疆 5 +碧 5 +磁 5 +祥 5 +祿 5 +窄 5 +笑 5 +纖 5 +胸 5 +腹 5 +舌 5 +艘 5 +芭 5 +苑 5 +苯 5 +范 5 +菜 5 +蟲 5 +裕 5 +詹 5 +謙 5 +豫 5 +贈 5 +輝 5 +辯 5 +逼 5 +遂 5 +郗 5 +酵 5 +醉 5 +釀 5 +鋪 5 +鎊 5 +鑑 5 +闢 5 +陝 5 +鹼 5 +鹽 5 +麟 5 +黛 5 +齡 5 +$ 4 +j 4 +』 4 +串 4 +亭 4 +伺 4 +侍 4 +倖 4 +倡 4 +僚 4 +兇 4 +函 4 +劣 4 +勤 4 +厘 4 +吞 4 +吧 4 +呢 4 +喙 4 +喝 4 +嘲 4 +坎 4 +夥 4 +姊 4 +姬 4 +娶 4 +宛 4 +宴 4 +寄 4 +尹 4 +屈 4 +崙 4 +崩 4 +巫 4 +帥 4 +庸 4 +弘 4 +弦 4 +彰 4 +恨 4 +悅 4 +悉 4 +慧 4 +扮 4 +拾 4 +挑 4 +插 4 +撥 4 +撰 4 +擺 4 +敖 4 +旨 4 +昏 4 +暨 4 +朔 4 +杰 4 +橄 4 +橡 4 +檸 4 +櫃 4 +歇 4 +歧 4 +殉 4 +浩 4 +浴 4 +浸 4 +淨 4 +渝 4 +滋 4 +滙 4 +澱 4 +焦 4 +煉 4 +煩 4 +熏 4 +熙 4 +燕 4 +爲 4 +牟 4 +狄 4 +狙 4 +狸 4 +狹 4 +璋 4 +畝 4 +瘋 4 +盾 4 +眉 4 +睦 4 +睿 4 +瞭 4 +矛 4 +矩 4 +矮 4 +砂 4 +碟 4 +磯 4 +秒 4 +窗 4 +窟 4 +竟 4 +笨 4 +答 4 +箱 4 +篡 4 +簧 4 +粉 4 +糕 4 +緻 4 +罰 4 +罷 4 +罹 4 +翁 4 +耀 4 +耐 4 +聊 4 +肥 4 +胺 4 +腔 4 +腸 4 +膚 4 +膜 4 +膝 4 +膽 4 +臉 4 +艙 4 +茂 4 +茵 4 +莽 4 +菌 4 +蓄 4 +蘋 4 +蛛 4 +蜂 4 +蜘 4 +襄 4 +誰 4 +誼 4 +謠 4 +豹 4 +貧 4 +賜 4 +賠 4 +賦 4 +踢 4 +辟 4 +辨 4 +辰 4 +辱 4 +迹 4 +逢 4 +逾 4 +遞 4 +邑 4 +邪 4 +鄂 4 +酷 4 +鈞 4 +錶 4 +鍊 4 +鍾 4 +鎳 4 +鑼 4 +閒 4 +閥 4 +闆 4 +闊 4 +隋 4 +隕 4 +靖 4 +鞏 4 +頸 4 +颱 4 +飯 4 +駕 4 +騙 4 +鬧 4 +鱗 4 +麓 4 +! 3 +Q 3 +Z 3 +q 3 +─ 3 +・ 3 +丟 3 +仇 3 +侈 3 +俠 3 +倆 3 +倪 3 +偵 3 +催 3 +傭 3 +債 3 +僻 3 +儂 3 +冒 3 +冥 3 +凈 3 +凍 3 +刷 3 +卧 3 +厥 3 +厭 3 +吐 3 +吹 3 +咬 3 +哪 3 +哺 3 +喉 3 +嚇 3 +囑 3 +囚 3 +坂 3 +坪 3 +堪 3 +墊 3 +墾 3 +壤 3 +夷 3 +夸 3 +夾 3 +奔 3 +奢 3 +妨 3 +姚 3 +姿 3 +嬰 3 +孝 3 +寒 3 +尻 3 +屏 3 +履 3 +岐 3 +嵌 3 +巷 3 +帳 3 +庄 3 +庾 3 +廿 3 +彥 3 +彬 3 +徙 3 +忒 3 +忘 3 +怕 3 +恥 3 +悟 3 +悠 3 +愉 3 +愚 3 +慌 3 +慎 3 +慾 3 +憂 3 +懲 3 +懶 3 +扭 3 +抄 3 +拯 3 +拳 3 +拷 3 +拼 3 +挽 3 +掘 3 +揭 3 +撐 3 +撫 3 +撲 3 +擒 3 +擦 3 +敕 3 +斜 3 +旬 3 +昂 3 +昇 3 +昔 3 +昧 3 +暑 3 +曆 3 +朴 3 +杏 3 +柬 3 +栃 3 +株 3 +桓 3 +楠 3 +楷 3 +槓 3 +樊 3 +樸 3 +檬 3 +欺 3 +殯 3 +毅 3 +毓 3 +毗 3 +氨 3 +氮 3 +汝 3 +汪 3 +汰 3 +沫 3 +洩 3 +涮 3 +淋 3 +淪 3 +淹 3 +淺 3 +添 3 +溥 3 +滉 3 +滯 3 +漏 3 +漳 3 +潰 3 +澄 3 +瀏 3 +烯 3 +焚 3 +煤 3 +煥 3 +燭 3 +爐 3 +爸 3 +爺 3 +琪 3 +瑙 3 +瑜 3 +瓜 3 +瓶 3 +痕 3 +盆 3 +盪 3 +眠 3 +睛 3 +矚 3 +砍 3 +磡 3 +磷 3 +祐 3 +禍 3 +禽 3 +稀 3 +稅 3 +穌 3 +窮 3 +粗 3 +紡 3 +紮 3 +累 3 +絞 3 +綺 3 +縫 3 +繩 3 +纜 3 +羌 3 +聰 3 +肆 3 +肝 3 +肢 3 +肺 3 +胃 3 +胖 3 +胚 3 +脂 3 +腓 3 +腺 3 +腿 3 +臟 3 +臭 3 +臼 3 +舅 3 +芳 3 +芽 3 +茅 3 +莆 3 +菊 3 +葵 3 +蔭 3 +蕉 3 +蕩 3 +薄 3 +薇 3 +蘆 3 +虔 3 +虜 3 +蜀 3 +螺 3 +蟒 3 +蠟 3 +蠶 3 +蠻 3 +衍 3 +衙 3 +衰 3 +褂 3 +褐 3 +褲 3 +詳 3 +誘 3 +諧 3 +謂 3 +謎 3 +謹 3 +貂 3 +貌 3 +販 3 +貼 3 +賃 3 +趁 3 +趕 3 +跌 3 +蹈 3 +蹟 3 +辜 3 +逆 3 +逮 3 +邁 3 +酗 3 +釉 3 +釜 3 +銠 3 +銳 3 +鐸 3 +鑽 3 +閃 3 +閑 3 +閱 3 +阜 3 +阪 3 +隙 3 +雍 3 +霜 3 +鞋 3 +韻 3 +頌 3 +餃 3 +餅 3 +餓 3 +饑 3 +饒 3 +驟 3 +魅 3 +鴿 3 +鵝 3 +麵 3 += 2 +` 2 +​ 2 +‧ 2 +〈 2 +〉 2 +丐 2 +丞 2 +仲 2 +俸 2 +倚 2 +倭 2 +傍 2 +傲 2 +僕 2 +兔 2 +兢 2 +冤 2 +凝 2 +凰 2 +凸 2 +划 2 +剝 2 +剿 2 +勸 2 +匕 2 +匪 2 +匾 2 +卉 2 +卓 2 +卦 2 +卷 2 +厄 2 +叢 2 +吊 2 +吋 2 +吵 2 +吻 2 +呆 2 +咀 2 +咎 2 +咐 2 +哇 2 +唸 2 +喊 2 +嗜 2 +嗣 2 +嘆 2 +嘴 2 +噁 2 +噪 2 +嚨 2 +圭 2 +坤 2 +垮 2 +堯 2 +塍 2 +塗 2 +塵 2 +墮 2 +壩 2 +壺 2 +奎 2 +奕 2 +妒 2 +妙 2 +妝 2 +姜 2 +姥 2 +婁 2 +婭 2 +婷 2 +媚 2 +嫉 2 +嫻 2 +孵 2 +寓 2 +寢 2 +寨 2 +尿 2 +峙 2 +嶽 2 +帆 2 +帛 2 +并 2 +弈 2 +弊 2 +弧 2 +彗 2 +忌 2 +恰 2 +悼 2 +惑 2 +惟 2 +憎 2 +憐 2 +憩 2 +懊 2 +懼 2 +戍 2 +戚 2 +扣 2 +抑 2 +披 2 +拋 2 +拱 2 +掩 2 +摸 2 +撈 2 +撿 2 +擅 2 +敲 2 +斗 2 +旭 2 +暢 2 +曝 2 +曬 2 +札 2 +柄 2 +栗 2 +栩 2 +桂 2 +框 2 +桌 2 +桐 2 +械 2 +梳 2 +棟 2 +棣 2 +椎 2 +楓 2 +榆 2 +榔 2 +槽 2 +樁 2 +樟 2 +橙 2 +欄 2 +氦 2 +洵 2 +浚 2 +涇 2 +淑 2 +淳 2 +渭 2 +湛 2 +溯 2 +滄 2 +滸 2 +漠 2 +漲 2 +漿 2 +潑 2 +潟 2 +濤 2 +濫 2 +濾 2 +灌 2 +熔 2 +燈 2 +牲 2 +牽 2 +犀 2 +猜 2 +琅 2 +琦 2 +瑣 2 +瑩 2 +璘 2 +甥 2 +甦 2 +畔 2 +番 2 +疊 2 +疏 2 +疲 2 +疽 2 +瘤 2 +皓 2 +盈 2 +盔 2 +眈 2 +眷 2 +砲 2 +硅 2 +硝 2 +碘 2 +碩 2 +磅 2 +磺 2 +祀 2 +祂 2 +祕 2 +祺 2 +禪 2 +稽 2 +穀 2 +穫 2 +穴 2 +窘 2 +竣 2 +笛 2 +筍 2 +筐 2 +箏 2 +簿 2 +籠 2 +籲 2 +糞 2 +糾 2 +紓 2 +綁 2 +綢 2 +綴 2 +綽 2 +縉 2 +縛 2 +繚 2 +繳 2 +羚 2 +羞 2 +翦 2 +肌 2 +肚 2 +腎 2 +腐 2 +臥 2 +舟 2 +艷 2 +芮 2 +苔 2 +苣 2 +茄 2 +茸 2 +荃 2 +菁 2 +菩 2 +菸 2 +萌 2 +葚 2 +蒐 2 +蒜 2 +蒲 2 +蓓 2 +蔬 2 +蔽 2 +蕪 2 +蕾 2 +藻 2 +虐 2 +虢 2 +虹 2 +蜜 2 +蜥 2 +蝸 2 +蟹 2 +裙 2 +裴 2 +裸 2 +詮 2 +誇 2 +誓 2 +謊 2 +豆 2 +豎 2 +賄 2 +賤 2 +賭 2 +賺 2 +贖 2 +踏 2 +蹤 2 +躁 2 +躍 2 +輟 2 +轅 2 +轎 2 +辣 2 +迢 2 +遙 2 +遴 2 +遵 2 +郷 2 +酪 2 +醜 2 +醺 2 +銹 2 +鋁 2 +鋅 2 +錛 2 +錠 2 +錳 2 +鏈 2 +鑄 2 +閏 2 +閩 2 +閻 2 +阱 2 +陪 2 +霧 2 +霾 2 +靡 2 +韌 2 +顎 2 +顛 2 +飢 2 +飽 2 +餾 2 +駁 2 +騰 2 +髓 2 +髖 2 +髻 2 +鬚 2 +魁 2 +鯛 2 +鰂 2 +鱸 2 +鵰 2 +鶴 2 +鷹 2 +麴 2 +黔 2 +黜 2 +齋 2 +齧 2 ++ 1 +á 1 +é 1 +ð 1 +ö 1 +þ 1 +ō 1 +̄ 1 +θ 1 +〔 1 +〕 1 +丕 1 +丙 1 +丸 1 +乞 1 +仕 1 +仗 1 +伎 1 +伙 1 +伶 1 +佑 1 +佗 1 +佬 1 +侄 1 +侏 1 +侖 1 +侮 1 +侶 1 +俏 1 +俚 1 +俯 1 +俾 1 +倩 1 +倬 1 +傀 1 +傢 1 +傻 1 +僑 1 +僖 1 +僵 1 +儡 1 +儷 1 +兌 1 +冀 1 +冉 1 +冢 1 +凄 1 +凊 1 +凳 1 +凶 1 +凹 1 +刃 1 +刪 1 +刮 1 +剋 1 +剌 1 +剪 1 +剷 1 +効 1 +劾 1 +勉 1 +勘 1 +勲 1 +勺 1 +勻 1 +匙 1 +匡 1 +卵 1 +卸 1 +叡 1 +吁 1 +吟 1 +吩 1 +呀 1 +呔 1 +咒 1 +咧 1 +咪 1 +哀 1 +哨 1 +哭 1 +唇 1 +唾 1 +啄 1 +啊 1 +啤 1 +喃 1 +喘 1 +嗅 1 +嗎 1 +嘈 1 +嘎 1 +嘔 1 +嘩 1 +嘯 1 +噶 1 +嚮 1 +囊 1 +囒 1 +囪 1 +坍 1 +坑 1 +坮 1 +垣 1 +埜 1 +埠 1 +埤 1 +堈 1 +堊 1 +堤 1 +堵 1 +塚 1 +塢 1 +墳 1 +壑 1 +壹 1 +奬 1 +奸 1 +妄 1 +妊 1 +妳 1 +姦 1 +姪 1 +娠 1 +婢 1 +婪 1 +嫘 1 +嫣 1 +孚 1 +孛 1 +孺 1 +宦 1 +寅 1 +寇 1 +寡 1 +寮 1 +寰 1 +寸 1 +尬 1 +尷 1 +岑 1 +岔 1 +岷 1 +峨 1 +峪 1 +峯 1 +崞 1 +嵩 1 +巔 1 +巢 1 +巳 1 +巾 1 +幀 1 +幌 1 +幟 1 +幢 1 +幪 1 +庇 1 +庚 1 +庵 1 +廓 1 +廝 1 +廬 1 +弭 1 +彎 1 +彝 1 +彪 1 +彷 1 +彿 1 +怨 1 +恣 1 +恤 1 +恭 1 +悍 1 +悔 1 +悖 1 +您 1 +悶 1 +惇 1 +愁 1 +愙 1 +愧 1 +愨 1 +慚 1 +慨 1 +慰 1 +慷 1 +懂 1 +懿 1 +戌 1 +戟 1 +扁 1 +扈 1 +扔 1 +扼 1 +抨 1 +抬 1 +押 1 +拌 1 +拏 1 +拙 1 +挫 1 +挹 1 +挾 1 +捍 1 +捨 1 +捲 1 +掙 1 +搏 1 +摑 1 +摒 1 +摔 1 +摧 1 +摯 1 +摹 1 +撓 1 +撘 1 +撮 1 +擂 1 +擢 1 +擱 1 +攀 1 +攔 1 +攜 1 +攣 1 +攤 1 +攪 1 +攸 1 +敉 1 +敞 1 +敢 1 +斃 1 +斌 1 +斤 1 +斧 1 +斬 1 +旱 1 +旻 1 +昨 1 +晃 1 +晒 1 +晤 1 +晴 1 +暇 1 +暮 1 +暱 1 +暹 1 +曄 1 +曖 1 +曧 1 +曷 1 +朵 1 +朽 1 +杖 1 +杞 1 +枸 1 +柚 1 +柝 1 +柢 1 +柩 1 +査 1 +柿 1 +桿 1 +梓 1 +梧 1 +梭 1 +梵 1 +棵 1 +椅 1 +椰 1 +楂 1 +楞 1 +榕 1 +榨 1 +榫 1 +榴 1 +槌 1 +槳 1 +樑 1 +橈 1 +橢 1 +檎 1 +檐 1 +檜 1 +檨 1 +檯 1 +檳 1 +櫟 1 +櫾 1 +歆 1 +歉 1 +歩 1 +殃 1 +殆 1 +殲 1 +毎 1 +毯 1 +氈 1 +氘 1 +氚 1 +汀 1 +汲 1 +汶 1 +沌 1 +沢 1 +沽 1 +沾 1 +泌 1 +泗 1 +泠 1 +洒 1 +浜 1 +涪 1 +涵 1 +淀 1 +淚 1 +淫 1 +淵 1 +渚 1 +渠 1 +渣 1 +渤 1 +渦 1 +渴 1 +渾 1 +湄 1 +湧 1 +湮 1 +溢 1 +滲 1 +滴 1 +漆 1 +漬 1 +漱 1 +漶 1 +漾 1 +潢 1 +澀 1 +濁 1 +濠 1 +瀆 1 +瀑 1 +瀕 1 +瀘 1 +瀝 1 +瀟 1 +灶 1 +灼 1 +炘 1 +炙 1 +炭 1 +烤 1 +烴 1 +烷 1 +烹 1 +焊 1 +焗 1 +焜 1 +煌 1 +煜 1 +煦 1 +煽 1 +熄 1 +熾 1 +燁 1 +燄 1 +燦 1 +燾 1 +爛 1 +牘 1 +牡 1 +犁 1 +犧 1 +狡 1 +猝 1 +猾 1 +猿 1 +玕 1 +玖 1 +玫 1 +玷 1 +琬 1 +琺 1 +瑋 1 +瑛 1 +瑰 1 +瑾 1 +璣 1 +瓘 1 +瓣 1 +甄 1 +甌 1 +甫 1 +甬 1 +畏 1 +畹 1 +疇 1 +疙 1 +疹 1 +疼 1 +痙 1 +痢 1 +痰 1 +痹 1 +瘦 1 +瘧 1 +瘩 1 +癖 1 +癤 1 +癥 1 +癮 1 +皈 1 +皋 1 +皖 1 +皰 1 +盒 1 +眯 1 +睞 1 +睹 1 +睾 1 +瞪 1 +瞬 1 +瞰 1 +矗 1 +矢 1 +砒 1 +砝 1 +碁 1 +碰 1 +磐 1 +磚 1 +祁 1 +祈 1 +祠 1 +禕 1 +禛 1 +禱 1 +秉 1 +秩 1 +稍 1 +稗 1 +稚 1 +稼 1 +穗 1 +穹 1 +窩 1 +竄 1 +竊 1 +竭 1 +竿 1 +笳 1 +筒 1 +箔 1 +箬 1 +箴 1 +篤 1 +粦 1 +粽 1 +糟 1 +紂 1 +紈 1 +紊 1 +紗 1 +紜 1 +紳 1 +紺 1 +絨 1 +絶 1 +綉 1 +綏 1 +綿 1 +緋 1 +緝 1 +締 1 +緯 1 +緹 1 +縈 1 +繡 1 +繭 1 +繹 1 +繽 1 +纂 1 +纏 1 +缽 1 +罵 1 +羨 1 +羯 1 +羱 1 +羲 1 +翟 1 +耗 1 +耽 1 +聆 1 +聳 1 +聶 1 +聾 1 +肇 1 +肘 1 +肩 1 +脆 1 +脊 1 +脱 1 +脹 1 +脾 1 +腥 1 +腫 1 +膀 1 +膨 1 +膳 1 +臂 1 +臍 1 +臧 1 +臿 1 +舀 1 +舖 1 +舜 1 +芘 1 +芻 1 +苛 1 +苟 1 +苷 1 +茜 1 +荊 1 +荔 1 +莖 1 +菅 1 +菱 1 +萎 1 +葆 1 +葫 1 +葯 1 +葺 1 +蒼 1 +蔑 1 +蔥 1 +蕙 1 +蕨 1 +薔 1 +薛 1 +薺 1 +蘊 1 +虞 1 +虱 1 +蚊 1 +蚌 1 +蚩 1 +蚺 1 +蛙 1 +蜆 1 +蜒 1 +蜚 1 +蜴 1 +蜿 1 +蝴 1 +蝶 1 +螞 1 +螢 1 +蟬 1 +蟻 1 +蟾 1 +蠣 1 +衢 1 +衫 1 +袍 1 +袥 1 +袱 1 +裋 1 +裹 1 +褪 1 +襟 1 +襪 1 +覓 1 +覲 1 +訃 1 +訄 1 +訇 1 +訐 1 +訖 1 +訝 1 +訥 1 +詐 1 +詔 1 +詛 1 +詝 1 +詼 1 +誥 1 +誦 1 +誹 1 +諂 1 +諒 1 +諜 1 +諱 1 +諶 1 +諺 1 +謁 1 +謇 1 +謔 1 +謗 1 +謚 1 +譚 1 +譴 1 +豈 1 +豢 1 +貶 1 +貽 1 +賚 1 +賡 1 +賬 1 +赦 1 +趟 1 +跋 1 +踐 1 +踞 1 +躬 1 +軀 1 +軋 1 +軒 1 +輾 1 +轍 1 +辮 1 +逍 1 +逛 1 +逵 1 +逸 1 +遹 1 +邗 1 +邳 1 +邸 1 +郝 1 +郪 1 +郫 1 +鄢 1 +酃 1 +酆 1 +酉 1 +酊 1 +酋 1 +酩 1 +酮 1 +醋 1 +醬 1 +醴 1 +釗 1 +釘 1 +釧 1 +釩 1 +鈇 1 +鈦 1 +鈺 1 +鈾 1 +鉑 1 +鉛 1 +鉤 1 +銎 1 +銓 1 +銨 1 +鋸 1 +錘 1 +鍔 1 +鍛 1 +鍝 1 +鎂 1 +鎰 1 +鏞 1 +鏢 1 +鐳 1 +鑫 1 +鑰 1 +鑿 1 +闍 1 +闖 1 +闡 1 +阡 1 +陂 1 +陌 1 +陛 1 +陞 1 +陡 1 +隍 1 +隠 1 +雇 1 +雉 1 +雎 1 +雯 1 +霆 1 +霞 1 +靴 1 +靶 1 +靼 1 +鞍 1 +鞘 1 +鞦 1 +韃 1 +韆 1 +韶 1 +頁 1 +頃 1 +頑 1 +頡 1 +頰 1 +頹 1 +顥 1 +飈 1 +餉 1 +餡 1 +餮 1 +饃 1 +饕 1 +馳 1 +馴 1 +駅 1 +駙 1 +駿 1 +騁 1 +騏 1 +騷 1 +驢 1 +驤 1 +驥 1 +骯 1 +骷 1 +骸 1 +髏 1 +髒 1 +鬢 1 +鬱 1 +魂 1 +鯽 1 +鰓 1 +鰺 1 +鱂 1 +鱲 1 +鱷 1 +鴛 1 +鴦 1 +鴨 1 +鵑 1 +鵬 1 +鷗 1 +麒 1 +麩 1 +鼐 1 +鼩 1 +鼬 1 +鼻 1 +齲 1 +龜 1 diff --git a/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/char-ngram-map b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/char-ngram-map new file mode 100644 index 0000000000000000000000000000000000000000..a2af3de4e9653e07980ae366cd26960fbdab3c9e --- /dev/null +++ b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/char-ngram-map @@ -0,0 +1,16126 @@ +16125 +99 4963 +中國 218 +.9 156 +9. 156 +美國 131 +開始 118 +可以 115 +公里 110 +人口 108 +使用 102 +日本 95 +平方 93 +沒有 93 +第一 92 +他們 91 +公司 88 +由於 88 +其中 87 +地區 87 +國家 86 +政府 86 +主要 83 +世界 81 +大學 81 +不同 80 +香港 79 +自己 77 +因為 76 +研究 76 +面積 75 +,9 74 +9, 74 +進行 72 +包括 69 +當時 66 +這些 66 +部分 66 +中華 65 +工作 65 +認為 65 +也是 64 +以及 64 +發現 64 +方公 62 +er 61 +an 60 +同時 60 +學院 60 +9% 59 +成立 59 +第二 59 +代表 58 +發展 58 +發生 58 +之後 57 +社會 57 +一些 56 +人民 56 +其他 56 +世紀 54 +建築 53 +為了 53 +獲得 52 +目前 52 +英國 52 +重要 52 +文化 51 +中心 50 +但是 50 +第9 50 +許多 50 +之間 49 +可能 49 +歷史 49 +遊戲 49 +9萬 48 +ar 48 +帝國 48 +期間 48 +音樂 48 +一般 47 +年代 47 +根據 47 +行星 47 +電影 47 +in 46 +政治 46 +組織 46 +鐵路 46 +-- 45 +城市 45 +故事 45 +學校 44 +所有 44 +科學 44 +on 43 +作品 43 +最後 43 +通過 43 +關係 43 +已經 42 +建立 42 +時間 42 +電視 42 +共和 41 +後來 41 +管理 41 +表示 41 +通常 41 +出現 40 +影響 40 +成功 40 +戰爭 40 +提供 40 +系統 40 +動物 39 +地方 39 +就是 39 +德國 39 +設計 39 +負責 39 +國際 38 +技術 38 +方面 38 +最終 38 +父親 38 +車站 38 +上海 37 +人物 37 +台灣 37 +參加 36 +擔任 36 +時期 36 +服務 36 +正式 36 +生活 36 +要求 36 +運動 36 +一直 35 +單位 35 +大利 35 +委員 35 +民國 35 +法國 35 +理論 35 +第三 35 +人類 34 +歐洲 34 +決定 34 +現在 34 +羅馬 34 +航空 34 +行政 34 +足球 34 +雖然 34 +利亞 33 +印度 33 +問題 33 +小說 33 +教育 33 +製作 33 +西班 33 +en 32 +不是 32 +保護 32 +全國 32 +形成 32 +很多 32 +得到 32 +活動 32 +班牙 32 +節目 32 +主義 31 +尼亞 31 +市鎮 31 +方式 31 +時代 31 +最高 31 +需要 31 +al 30 +ri 30 +ro 30 +中央 30 +另外 30 +控制 30 +擁有 30 +產生 30 +經濟 30 +進入 30 +he 29 +ll 29 +or 29 +公園 29 +具有 29 +大陸 29 +接受 29 +球隊 29 +當地 29 +並且 28 +北京 28 +受到 28 +如果 28 +學生 28 +工程 28 +時候 28 +計劃 28 +超過 28 +電腦 28 +9億 27 +ia 27 +存在 27 +對於 27 +情況 27 +戰鬥 27 +方法 27 +機場 27 +比賽 27 +甚至 27 +總統 27 +義大 27 +都是 27 +非常 27 +le 26 +st 26 +人員 26 +原因 26 +國民 26 +支持 26 +然而 26 +獨立 26 +生物 26 +聯合 26 +ne 25 +re 25 +兒子 25 +出版 25 +巴士 25 +我們 25 +海拔 25 +經過 25 +議會 25 +il 24 +li 24 +te 24 +一樣 24 +交通 24 +例如 24 +分布 24 +加入 24 +同年 24 +大量 24 +於是 24 +最大 24 +生產 24 +皇帝 24 +系列 24 +高度 24 +事件 23 +內容 23 +命名 23 +宣布 23 +導致 23 +必須 23 +成員 23 +清朝 23 +演出 23 +直接 23 +紐約 23 +行為 23 +距離 23 +軍事 23 +部隊 23 +銀行 23 +集團 23 +-9 22 +不少 22 +不過 22 +傳統 22 +反對 22 +增加 22 +它們 22 +思想 22 +有關 22 +此外 22 +母親 22 +組成 22 +結構 22 +羅斯 22 +聯盟 22 +聯賽 22 +能力 22 +語言 22 +附近 22 +ch 21 +es 21 +nt 21 +ra 21 +一起 21 +作用 21 +出生 21 +只有 21 +唯一 21 +地位 21 +廣泛 21 +植物 21 +海軍 21 +無法 21 +現代 21 +環境 21 +紀念 21 +結束 21 +舉行 21 +角色 21 +議員 21 +選舉 21 +at 20 +el 20 +ha 20 +ic 20 +it 20 +ti 20 +不能 20 +主席 20 +仍然 20 +冠軍 20 +出任 20 +分子 20 +原子 20 +參與 20 +地下 20 +城鎮 20 +天津 20 +工業 20 +希臘 20 +引起 20 +採用 20 +攻擊 20 +整個 20 +文學 20 +文物 20 +朝鮮 20 +東北 20 +機構 20 +比較 20 +猶太 20 +管轄 20 +範圍 20 +細胞 20 +經常 20 +自治 20 +自由 20 +逐漸 20 +重新 20 +類型 20 +am 19 +ma 19 +不久 19 +以上 19 +佔領 19 +分別 19 +台北 19 +多數 19 +天文 19 +巴黎 19 +所以 19 +方米 19 +最早 19 +會議 19 +有些 19 +民族 19 +結果 19 +繼續 19 +能夠 19 +造成 19 +達到 19 +部份 19 +風格 19 +et 18 +la 18 +ve 18 +不會 18 +任何 18 +企業 18 +先後 18 +列車 18 +功能 18 +取得 18 +合併 18 +外交 18 +廣州 18 +戰役 18 +明朝 18 +每年 18 +治療 18 +法院 18 +漫畫 18 +爾斯 18 +畢業 18 +疾病 18 +相當 18 +艦隊 18 +身體 18 +軍隊 18 +離開 18 +領導 18 +體育 18 +Ma 17 +ea 17 +na 17 +ng 17 +ou 17 +rt 17 +ta 17 +再次 17 +名字 17 +大戰 17 +宗教 17 +家族 17 +希望 17 +廣場 17 +採取 17 +提出 17 +教堂 17 +新聞 17 +最初 17 +格蘭 17 +物理 17 +特別 17 +發行 17 +總部 17 +自然 17 +蘇聯 17 +行動 17 +製造 17 +西北 17 +資料 17 +選擇 17 +領域 17 +飛機 17 +St 16 +as 16 +is 16 +nd 16 +ol 16 +oo 16 +se 16 +to 16 +九龍 16 +共同 16 +利用 16 +制度 16 +前往 16 +創作 16 +勢力 16 +區域 16 +協助 16 +各種 16 +大樓 16 +家庭 16 +實驗 16 +居民 16 +山東 16 +心理 16 +或者 16 +拒絕 16 +東南 16 +武器 16 +民主 16 +法律 16 +爆發 16 +狀態 16 +而且 16 +藝術 16 +表現 16 +西亞 16 +記者 16 +設有 16 +設立 16 +資源 16 +軌道 16 +過程 16 +道路 16 +還是 16 +革命 16 +首次 16 +高速 16 +Co 15 +io 15 +os 15 +th 15 +下轄 15 +中共 15 +主角 15 +作戰 15 +則是 15 +化石 15 +十分 15 +南京 15 +南部 15 +回到 15 +國內 15 +國王 15 +地球 15 +基督 15 +大廈 15 +大約 15 +太陽 15 +女兒 15 +女性 15 +如此 15 +學習 15 +完全 15 +實際 15 +常見 15 +幾乎 15 +應用 15 +承認 15 +投資 15 +指出 15 +指揮 15 +斯特 15 +普查 15 +未來 15 +此後 15 +火星 15 +版本 15 +牠們 15 +發表 15 +直到 15 +碼頭 15 +科技 15 +立法 15 +統治 15 +職業 15 +著名 15 +蒙古 15 +西部 15 +調查 15 +路線 15 +車輛 15 +農業 15 +這樣 15 +鐵道 15 +.. 14 +Ca 14 +me 14 +om 14 +un 14 +ur 14 +us 14 +一定 14 +二十 14 +交易 14 +人們 14 +以來 14 +位置 14 +使得 14 +俄羅 14 +俱樂 14 +傳播 14 +兒童 14 +公主 14 +北部 14 +十一 14 +博物 14 +合作 14 +基本 14 +境內 14 +太平 14 +失去 14 +完成 14 +容易 14 +密度 14 +專業 14 +市場 14 +幫助 14 +建造 14 +擊敗 14 +曾經 14 +有限 14 +棲息 14 +樂部 14 +波蘭 14 +澳門 14 +營運 14 +特色 14 +相同 14 +看到 14 +簡稱 14 +統計 14 +網路 14 +聯邦 14 +董事 14 +規模 14 +解決 14 +貝爾 14 +起來 14 +路易 14 +這裡 14 +進攻 14 +開發 14 +限制 14 +顯示 14 +Be 13 +ce 13 +ec 13 +hi 13 +ir 13 +rs 13 +伊斯 13 +倫敦 13 +克斯 13 +全部 13 +公路 13 +公開 13 +其後 13 +初期 13 +加上 13 +博士 13 +司令 13 +同意 13 +因而 13 +圖書 13 +土地 13 +埃及 13 +基礎 13 +墨西 13 +天主 13 +妻子 13 +娛樂 13 +建設 13 +形式 13 +從事 13 +改變 13 +教會 13 +數學 13 +數據 13 +數量 13 +早期 13 +更多 13 +東京 13 +樂團 13 +模式 13 +死亡 13 +每個 13 +水平 13 +流域 13 +準備 13 +物種 13 +物質 13 +王國 13 +玩家 13 +男性 13 +當選 13 +目標 13 +相關 13 +知識 13 +第四 13 +紀錄 13 +統一 13 +街道 13 +西哥 13 +設定 13 +身份 13 +辦公 13 +速度 13 +運輸 13 +項目 13 +食物 13 +ai 12 +ee 12 +ey 12 +ni 12 +nn 12 +tr 12 +一帶 12 +上帝 12 +中學 12 +中部 12 +之前 12 +人數 12 +什麼 12 +以下 12 +保留 12 +個人 12 +價值 12 +元素 12 +內部 12 +公元 12 +加坡 12 +半島 12 +原本 12 +反應 12 +反映 12 +可是 12 +商業 12 +嚴重 12 +基地 12 +大型 12 +女子 12 +將軍 12 +尤其 12 +居住 12 +帶來 12 +平均 12 +建議 12 +很大 12 +律師 12 +恆星 12 +恐怖 12 +改革 12 +政策 12 +新加 12 +月台 12 +有時 12 +東部 12 +標準 12 +機關 12 +歌手 12 +決賽 12 +汽車 12 +減少 12 +潛艇 12 +熱帶 12 +瑞典 12 +生命 12 +產品 12 +產業 12 +相對 12 +眾多 12 +知道 12 +精神 12 +經營 12 +英格 12 +葡萄 12 +該國 12 +變成 12 +賽事 12 +透過 12 +遭到 12 +遺址 12 +避免 12 +醫院 12 +重建 12 +重慶 12 +阿爾 12 +電子 12 +9多 11 +au 11 +co 11 +di 11 +ip 11 +lo 11 +og 11 +vi 11 +主張 11 +主持 11 +主教 11 +之中 11 +亨利 11 +人士 11 +以前 11 +以色 11 +作者 11 +保持 11 +信仰 11 +先生 11 +全球 11 +出身 11 +創立 11 +創辦 11 +力量 11 +印第 11 +去世 11 +取代 11 +召開 11 +喬治 11 +地利 11 +大會 11 +奧地 11 +威爾 11 +威脅 11 +安全 11 +專輯 11 +強烈 11 +拉克 11 +接近 11 +推出 11 +描述 11 +播放 11 +文字 11 +斯蘭 11 +普遍 11 +柏林 11 +業務 11 +殖民 11 +江蘇 11 +涉及 11 +現時 11 +留下 11 +目的 11 +相信 11 +社區 11 +福建 11 +第十 11 +第安 11 +給予 11 +網站 11 +線路 11 +繼承 11 +色列 11 +試圖 11 +資訊 11 +部門 11 +阿拉 11 +馬來 11 +9- 10 +9千 10 +II 10 +ct 10 +de 10 +id 10 +no 10 +ns 10 +pe 10 +pi 10 +rn 10 +si 10 +並非 10 +事業 10 +交流 10 +以後 10 +來往 10 +儘管 10 +克里 10 +勞動 10 +包含 10 +化學 10 +協會 10 +君主 10 +和平 10 +唱片 10 +國旗 10 +國會 10 +報告 10 +威廉 10 +學位 10 +復興 10 +感到 10 +手術 10 +投入 10 +推動 10 +播出 10 +改名 10 +文明 10 +文藝 10 +明顯 10 +有效 10 +杭州 10 +東方 10 +條件 10 +模型 10 +比亞 10 +河流 10 +法庭 10 +派遣 10 +演員 10 +演唱 10 +火車 10 +爭議 10 +特定 10 +特徵 10 +特殊 10 +獨特 10 +生長 10 +當中 10 +發動 10 +發射 10 +盛頓 10 +確定 10 +神話 10 +移民 10 +空間 10 +終於 10 +結婚 10 +維持 10 +總理 10 +芬蘭 10 +花園 10 +華盛 10 +衝突 10 +西藏 10 +規定 10 +訓練 10 +記載 10 +記錄 10 +該市 10 +警察 10 +變化 10 +責任 10 +起源 10 +逝世 10 +運行 10 +里斯 10 +錦標 10 +關於 10 +陸軍 10 +雜誌 10 +類似 10 +飛行 10 +首都 10 +'' 9 +A9 9 +Gr 9 +Th 9 +be 9 +iv 9 +od 9 +sa 9 +ss 9 +ty 9 +一切 9 +一致 9 +上陣 9 +下降 9 +不斷 9 +不滿 9 +中山 9 +丹麥 9 +之外 9 +事務 9 +互相 9 +介紹 9 +來到 9 +健康 9 +內閣 9 +全長 9 +公布 9 +其實 9 +再度 9 +出來 9 +出售 9 +分支 9 +到達 9 +加利 9 +動畫 9 +十八 9 +千米 9 +南方 9 +危險 9 +古代 9 +古典 9 +各類 9 +吉尼 9 +國務 9 +團體 9 +地產 9 +地點 9 +執行 9 +士兵 9 +奪得 9 +媒體 9 +字母 9 +孩子 9 +學者 9 +安那 9 +對手 9 +就讀 9 +工人 9 +左右 9 +帶領 9 +引擎 9 +強大 9 +律賓 9 +後期 9 +快速 9 +恢復 9 +意外 9 +戰略 9 +打擊 9 +批評 9 +拍攝 9 +接觸 9 +攻入 9 +放棄 9 +政權 9 +教學 9 +斯基 9 +易斯 9 +星期 9 +普通 9 +朋友 9 +未能 9 +本人 9 +本身 9 +核心 9 +森林 9 +標誌 9 +機會 9 +機車 9 +權利 9 +此時 9 +民間 9 +沿海 9 +浙江 9 +湖泊 9 +滿洲 9 +爆炸 9 +父母 9 +爾德 9 +特大 9 +狀況 9 +瑞士 9 +當局 9 +發布 9 +百萬 9 +皇后 9 +皇家 9 +相互 9 +相似 9 +破壞 9 +穩定 9 +空中 9 +第五 9 +米亞 9 +粒子 9 +約翰 9 +絕對 9 +經歷 9 +經理 9 +綜合 9 +總督 9 +老師 9 +而是 9 +聯繫 9 +職務 9 +自行 9 +菲律 9 +處理 9 +觀眾 9 +解放 9 +貢獻 9 +資格 9 +進士 9 +運作 9 +那麼 9 +酒店 9 +金屬 9 +階段 9 +隧道 9 +隨後 9 +集中 9 +電話 9 +青年 9 +頻道 9 +顏色 9 +高等 9 +Go 8 +Me 8 +ge 8 +ld 8 +ru 8 +ul 8 +上升 8 +下來 8 +中環 8 +主題 8 +也納 8 +二世 8 +亞洲 8 +人工 8 +以外 8 +佔地 8 +依據 8 +俄國 8 +保守 8 +信息 8 +價格 8 +光棍 8 +內地 8 +內戰 8 +公分 8 +分鐘 8 +利益 8 +劇情 8 +加哥 8 +加拿 8 +十二 8 +即使 8 +原來 8 +古老 8 +同樣 8 +命令 8 +喜歡 8 +因素 8 +圖案 8 +地鐵 8 +報道 8 +增長 8 +大多 8 +大小 8 +大道 8 +家人 8 +專門 8 +小型 8 +小時 8 +局長 8 +山脈 8 +山西 8 +工藝 8 +工資 8 +巨大 8 +巴斯 8 +幻想 8 +廣播 8 +廣東 8 +往往 8 +從此 8 +意義 8 +意見 8 +或是 8 +房屋 8 +拿大 8 +提升 8 +提高 8 +攝影 8 +效果 8 +教授 8 +文章 8 +方千 8 +方案 8 +旅遊 8 +明確 8 +書記 8 +書院 8 +材料 8 +武漢 8 +比如 8 +氯化 8 +污染 8 +注意 8 +測試 8 +湯姆 8 +澳大 8 +澳洲 8 +瀋陽 8 +燃料 8 +爵士 8 +現存 8 +男子 8 +病逝 8 +發明 8 +白色 8 +的話 8 +監督 8 +真正 8 +知名 8 +秘書 8 +程度 8 +穆斯 8 +立方 8 +符號 8 +等等 8 +維也 8 +維爾 8 +編碼 8 +編輯 8 +羽毛 8 +翻譯 8 +考慮 8 +聚集 8 +股份 8 +臨時 8 +良好 8 +芝加 8 +表達 8 +複雜 8 +襲擊 8 +西南 8 +西方 8 +解釋 8 +討論 8 +賽季 8 +贏得 8 +軟體 8 +過去 8 +部長 8 +里亞 8 +重大 8 +銀河 8 +長度 8 +隨即 8 +雄性 8 +餐廳 8 +首府 8 +高中 8 +麥克 8 +.c 7 +9餘 7 +Ba 7 +Ch 7 +Da 7 +H9 7 +Ha 7 +Ja 7 +La 7 +Mi 7 +Pa 7 +Ro 7 +Sa 7 +ap 7 +gi 7 +ie 7 +mi 7 +nc 7 +oh 7 +pa 7 +rk 7 +sc 7 +tu 7 +um 7 +下午 7 +不可 7 +主人 7 +之下 7 +事實 7 +事情 7 +二戰 7 +交換 7 +任命 7 +伊麗 7 +伯特 7 +住宅 7 +佛教 7 +保險 7 +傳說 7 +入侵 7 +公共 7 +公務 7 +公爵 7 +共產 7 +典型 7 +分析 7 +前身 7 +創造 7 +匈奴 7 +北角 7 +十三 7 +十字 7 +原著 7 +各地 7 +名稱 7 +名義 7 +吸引 7 +哈爾 7 +員工 7 +哲學 7 +唐朝 7 +在此 7 +城堡 7 +城門 7 +基金 7 +場所 7 +大使 7 +天星 7 +天然 7 +失敗 7 +奴隸 7 +姆斯 7 +學術 7 +安排 7 +實現 7 +實行 7 +專科 7 +尋找 7 +尋求 7 +小組 7 +島嶼 7 +差異 7 +巴西 7 +市區 7 +市民 7 +常常 7 +平原 7 +年級 7 +年輕 7 +建國 7 +弗吉 7 +弗朗 7 +強調 7 +形象 7 +很少 7 +德拉 7 +想像 7 +意識 7 +愛爾 7 +找到 7 +拉伯 7 +持有 7 +指導 7 +探測 7 +支援 7 +收斂 7 +教師 7 +斯坦 7 +斯塔 7 +方英 7 +旗下 7 +最多 7 +本地 7 +某些 7 +校園 7 +核糖 7 +格林 7 +條約 7 +榮譽 7 +樂隊 7 +檢查 7 +母音 7 +氣候 7 +水庫 7 +法蘭 7 +海岸 7 +海洋 7 +混合 7 +清真 7 +港島 7 +湖南 7 +激烈 7 +無綫 7 +然後 7 +熊貓 7 +爾特 7 +爾蘭 7 +特有 7 +現有 7 +現象 7 +球員 7 +球季 7 +理工 7 +瑪麗 7 +甘肅 7 +生態 7 +申請 7 +真實 7 +石油 7 +秘密 7 +移動 7 +空軍 7 +突破 7 +策略 7 +簽訂 7 +結合 7 +維新 7 +美麗 7 +翌年 7 +臺灣 7 +興建 7 +興趣 7 +舉辦 7 +航班 7 +航線 7 +莎白 7 +著作 7 +蒙特 7 +蘭克 7 +衛生 7 +表演 7 +表面 7 +規劃 7 +覺得 7 +觀測 7 +觀點 7 +計算 7 +訪問 7 +設施 7 +評論 7 +調整 7 +講述 7 +議院 7 +貴族 7 +貿易 7 +較小 7 +較為 7 +轟炸 7 +迅速 7 +近年 7 +連接 7 +道德 7 +達成 7 +適合 7 +選出 7 +邏輯 7 +醫學 7 +重點 7 +錄製 7 +鏡頭 7 +長期 7 +長達 7 +降低 7 +需求 7 +面對 7 +韓國 7 +領先 7 +領袖 7 +題材 7 +風暴 7 +食用 7 +駐守 7 +體現 7 +體系 7 +高級 7 +高達 7 +魔法 7 +麗莎 7 +Al 6 +Ar 6 +Br 6 +Cl 6 +JR 6 +Ka 6 +Le 6 +Li 6 +M9 6 +Mo 6 +N9 6 +Ph 6 +Wi 6 +ad 6 +ag 6 +ay 6 +ca 6 +do 6 +ed 6 +ef 6 +ew 6 +hn 6 +ho 6 +hr 6 +ig 6 +mo 6 +ob 6 +of 6 +op 6 +pp 6 +rr 6 +so 6 +ts 6 +ud 6 +一世 6 +丈夫 6 +上市 6 +上映 6 +事物 6 +亞歷 6 +亦是 6 +享受 6 +代理 6 +任務 6 +但丁 6 +作出 6 +來源 6 +來越 6 +依然 6 +依靠 6 +促進 6 +信號 6 +個體 6 +做法 6 +優勢 6 +元朗 6 +克拉 6 +克蘭 6 +入口 6 +全家 6 +公民 6 +公眾 6 +出土 6 +判決 6 +劃分 6 +加工 6 +助理 6 +努力 6 +動力 6 +十五 6 +協議 6 +卡斯 6 +卡爾 6 +原始 6 +反射 6 +取消 6 +口號 6 +司法 6 +否認 6 +含有 6 +吸收 6 +呼吸 6 +咖啡 6 +商品 6 +商店 6 +嘗試 6 +四川 6 +困難 6 +國歌 6 +基因 6 +壓力 6 +外國 6 +多樣 6 +大大 6 +大獎 6 +大眾 6 +太空 6 +夫人 6 +夫斯 6 +奧爾 6 +奧運 6 +她們 6 +好友 6 +如同 6 +始建 6 +季節 6 +官方 6 +定居 6 +定義 6 +客運 6 +宣佈 6 +家中 6 +密碼 6 +對應 6 +對抗 6 +對象 6 +導演 6 +展覽 6 +山大 6 +島上 6 +師範 6 +平等 6 +平面 6 +廣告 6 +延伸 6 +強度 6 +形容 6 +形態 6 +形狀 6 +影片 6 +彼此 6 +德爾 6 +情感 6 +意味 6 +懷孕 6 +成熟 6 +成績 6 +成長 6 +手法 6 +打算 6 +批准 6 +投票 6 +拉斯 6 +授予 6 +提名 6 +搖滾 6 +搜索 6 +操作 6 +擴展 6 +改編 6 +效力 6 +敘利 6 +教導 6 +斯林 6 +斯科 6 +新城 6 +方向 6 +方形 6 +日報 6 +日耳 6 +時任 6 +時常 6 +普魯 6 +更名 6 +最近 6 +朝廷 6 +東西 6 +查爾 6 +查理 6 +柯林 6 +校區 6 +校長 6 +歌曲 6 +歷山 6 +死後 6 +民眾 6 +氧化 6 +河道 6 +流行 6 +海盜 6 +消費 6 +深入 6 +深圳 6 +滅亡 6 +無論 6 +無關 6 +爾曼 6 +版權 6 +牙齒 6 +王朝 6 +玻璃 6 +生存 6 +男友 6 +畫家 6 +病毒 6 +發出 6 +發起 6 +發達 6 +確認 6 +神奇 6 +神秘 6 +神經 6 +禁止 6 +私人 6 +秦國 6 +立刻 6 +立場 6 +童年 6 +第七 6 +籃球 6 +米蘭 6 +經典 6 +經驗 6 +緬甸 6 +繪畫 6 +缺乏 6 +習俗 6 +翡翠 6 +耳曼 6 +能量 6 +色彩 6 +荷蘭 6 +藉由 6 +蘇格 6 +虛擬 6 +蛋白 6 +血統 6 +行走 6 +表明 6 +製成 6 +西斯 6 +西蘭 6 +覆蓋 6 +規則 6 +設置 6 +試驗 6 +詩人 6 +詩歌 6 +該片 6 +說服 6 +說法 6 +諮詢 6 +證明 6 +豐富 6 +超人 6 +越來 6 +跑道 6 +車展 6 +輿論 6 +近代 6 +返回 6 +退役 6 +通往 6 +通訊 6 +進步 6 +過來 6 +選區 6 +遺傳 6 +邀請 6 +邊緣 6 +酒精 6 +醫生 6 +醫療 6 +金融 6 +銷售 6 +開展 6 +開放 6 +阻止 6 +陷入 6 +隊員 6 +階級 6 +隨機 6 +雕刻 6 +雲南 6 +電池 6 +非洲 6 +顧問 6 +首先 6 +馬克 6 +馬爾 6 +馬達 6 +騎兵 6 +魯士 6 +9A 5 +9百 5 +Bo 5 +Di 5 +ET 5 +El 5 +Fi 5 +In 5 +Jo 5 +Ne 5 +Ni 5 +Re 5 +Ri 5 +Se 5 +Sh 5 +ab 5 +ac 5 +av 5 +bi 5 +ck 5 +ev 5 +fo 5 +gl 5 +im 5 +ke 5 +ki 5 +lu 5 +nk 5 +oc 5 +ph 5 +rc 5 +rg 5 +rl 5 +rm 5 +su 5 +t. 5 +ue 5 +三十 5 +上述 5 +不僅 5 +不好 5 +中立 5 +中西 5 +中間 5 +丹尼 5 +主流 5 +事故 5 +亞馬 5 +人均 5 +今天 5 +今日 5 +介入 5 +以北 5 +任期 5 +佔據 5 +佛羅 5 +作家 5 +來西 5 +依舊 5 +侵略 5 +保加 5 +保存 5 +信任 5 +信奉 5 +信託 5 +修正 5 +倫比 5 +停止 5 +傑出 5 +傳承 5 +傷害 5 +像是 5 +儀式 5 +免費 5 +公交 5 +公會 5 +其它 5 +其餘 5 +冷卻 5 +出口 5 +分配 5 +分類 5 +列入 5 +別墅 5 +刺激 5 +創建 5 +加熱 5 +加盟 5 +勒密 5 +勒斯 5 +動作 5 +勞工 5 +化合 5 +北海 5 +十六 5 +十多 5 +千億 5 +升級 5 +南北 5 +南極 5 +占庭 5 +參謀 5 +參議 5 +受傷 5 +叫做 5 +司馬 5 +各個 5 +合法 5 +合理 5 +吉爾 5 +同盟 5 +名單 5 +名詞 5 +呈現 5 +周圍 5 +品牌 5 +哈定 5 +啟超 5 +喇嘛 5 +四十 5 +固定 5 +固體 5 +圖像 5 +土耳 5 +在內 5 +地圖 5 +城區 5 +執政 5 +培養 5 +堅持 5 +場地 5 +塞爾 5 +壁畫 5 +外科 5 +多芬 5 +大氣 5 +大西 5 +奧斯 5 +如下 5 +如今 5 +始終 5 +學名 5 +學會 5 +學科 5 +宇宙 5 +安裝 5 +官吏 5 +客戶 5 +客體 5 +宮廷 5 +家長 5 +容納 5 +宿舍 5 +察覺 5 +寫作 5 +專利 5 +專家 5 +對外 5 +對此 5 +少數 5 +尼克 5 +尼士 5 +尼黑 5 +展出 5 +展開 5 +工具 5 +巴克 5 +巴哈 5 +巴爾 5 +市政 5 +希米 5 +席位 5 +年度 5 +底部 5 +廈門 5 +廣西 5 +建成 5 +引發 5 +弟弟 5 +得知 5 +微博 5 +德堡 5 +德里 5 +意志 5 +意思 5 +愛情 5 +感情 5 +感覺 5 +慈善 5 +態度 5 +慕尼 5 +慶祝 5 +成年 5 +成本 5 +成都 5 +戰國 5 +戰後 5 +房間 5 +手中 5 +手段 5 +托勒 5 +托爾 5 +技能 5 +抗議 5 +抵抗 5 +抵達 5 +拉格 5 +拜占 5 +持續 5 +指定 5 +指示 5 +掌握 5 +排名 5 +接管 5 +推進 5 +措施 5 +提到 5 +撤銷 5 +收入 5 +收藏 5 +政務 5 +故宮 5 +教皇 5 +教習 5 +敵人 5 +文忠 5 +文獻 5 +斯堡 5 +斯托 5 +新型 5 +新華 5 +新鮮 5 +方便 5 +方言 5 +施工 5 +旅行 5 +日期 5 +早年 5 +明治 5 +是否 5 +更加 5 +書中 5 +有的 5 +本片 5 +東海 5 +林頓 5 +架構 5 +某種 5 +格拉 5 +格羅 5 +格里 5 +棉花 5 +棒球 5 +構成 5 +樓梯 5 +機制 5 +機器 5 +次年 5 +欣賞 5 +歡迎 5 +正常 5 +正確 5 +武裝 5 +殺害 5 +每天 5 +比利 5 +民兵 5 +氣體 5 +水果 5 +水系 5 +江西 5 +決策 5 +河北 5 +河南 5 +波希 5 +波音 5 +泥塑 5 +泰安 5 +泳兒 5 +洛桑 5 +海峽 5 +海底 5 +海德 5 +消息 5 +游擊 5 +湖北 5 +溫度 5 +溫泉 5 +滅絕 5 +演化 5 +演奏 5 +漢朝 5 +澤東 5 +濃度 5 +物品 5 +物業 5 +物體 5 +特勒 5 +特蘭 5 +狩獵 5 +王子 5 +珊瑚 5 +現場 5 +現實 5 +瑪利 5 +生下 5 +生涯 5 +用作 5 +發送 5 +百餘 5 +直徑 5 +直至 5 +真理 5 +真相 5 +祖先 5 +神聖 5 +移居 5 +程序 5 +種植 5 +種類 5 +稱作 5 +空氣 5 +突變 5 +競賽 5 +符合 5 +第六 5 +簡單 5 +紅軍 5 +紐西 5 +級別 5 +細節 5 +組合 5 +結局 5 +維亞 5 +維吉 5 +維多 5 +編號 5 +練習 5 +總署 5 +羅伯 5 +美洲 5 +群島 5 +群眾 5 +翼龍 5 +耕地 5 +耳其 5 +聯絡 5 +聲明 5 +肯定 5 +興奮 5 +興起 5 +船上 5 +英里 5 +華視 5 +萊姆 5 +落後 5 +薩摩 5 +薩斯 5 +薩爾 5 +藝人 5 +藥物 5 +蘭卡 5 +虎丘 5 +虛構 5 +融合 5 +血壓 5 +行業 5 +裝甲 5 +裝置 5 +裡面 5 +西遊 5 +西門 5 +解散 5 +設備 5 +診斷 5 +該地 5 +該屬 5 +詹姆 5 +認可 5 +認知 5 +認識 5 +誕生 5 +象徵 5 +貝多 5 +財產 5 +貨車 5 +貨運 5 +質量 5 +赤道 5 +超級 5 +越南 5 +趙國 5 +身亡 5 +軍團 5 +輻鰭 5 +轉移 5 +轉變 5 +辭去 5 +辭職 5 +退出 5 +通車 5 +通道 5 +連任 5 +連續 5 +進而 5 +進軍 5 +適當 5 +遭遇 5 +那裡 5 +郵政 5 +鄉鎮 5 +鄰近 5 +醒亞 5 +醫師 5 +鎮壓 5 +長大 5 +長官 5 +長沙 5 +開設 5 +防禦 5 +陝西 5 +院長 5 +階層 5 +障礙 5 +隸屬 5 +電梯 5 +電車 5 +青海 5 +預測 5 +預算 5 +預防 5 +領土 5 +頻率 5 +食品 5 +飲料 5 +首相 5 +馬尼 5 +馬遜 5 +馬里 5 +騎士 5 +體積 5 +體色 5 +高低 5 +黑人 5 +龐大 5 +9: 4 +9B 4 +:9 4 +AA 4 +AC 4 +Ad 4 +BA 4 +BC 4 +CR 4 +Ce 4 +DS 4 +De 4 +Do 4 +E9 4 +Fo 4 +Fr 4 +Ga 4 +Ho 4 +Lo 4 +NA 4 +NB 4 +Na 4 +OR 4 +Pi 4 +SP 4 +So 4 +Sp 4 +Su 4 +To 4 +Wo 4 +Yo 4 +ak 4 +ba 4 +cr 4 +dd 4 +dl 4 +dr 4 +ei 4 +em 4 +gh 4 +gu 4 +lt 4 +ly 4 +mp 4 +nb 4 +nu 4 +ok 4 +ot 4 +ow 4 +oy 4 +ps 4 +rd 4 +sh 4 +tt 4 +ut 4 +wa 4 +一半 4 +一旦 4 +一百 4 +三世 4 +三氯 4 +上下 4 +上演 4 +上訴 4 +下令 4 +下頜 4 +不及 4 +不得 4 +不應 4 +不等 4 +不足 4 +世凱 4 +中全 4 +中東 4 +中止 4 +中視 4 +丹羽 4 +主演 4 +之上 4 +乘坐 4 +乘客 4 +乾燥 4 +乾隆 4 +了解 4 +事變 4 +五世 4 +五十 4 +五百 4 +亞冠 4 +亞當 4 +亞軍 4 +交給 4 +交配 4 +交響 4 +亦為 4 +享年 4 +人事 4 +代言 4 +以為 4 +以西 4 +任職 4 +企圖 4 +伊拉 4 +伺服 4 +供應 4 +依法 4 +侵蝕 4 +保羅 4 +保障 4 +信徒 4 +修復 4 +倫斯 4 +倫理 4 +做出 4 +停留 4 +億9 4 +優惠 4 +優秀 4 +兄弟 4 +充電 4 +先進 4 +克森 4 +克特 4 +克薩 4 +入選 4 +內斯 4 +全會 4 +全面 4 +公安 4 +公式 4 +共振 4 +其間 4 +具體 4 +冬天 4 +出入 4 +出場 4 +出戰 4 +出發 4 +出租 4 +出色 4 +分佈 4 +分成 4 +分期 4 +列表 4 +利普 4 +利特 4 +則天 4 +則為 4 +前期 4 +前線 4 +前進 4 +劇集 4 +加州 4 +加強 4 +勝利 4 +包裝 4 +匈牙 4 +區劃 4 +十七 4 +協定 4 +協調 4 +南側 4 +南延 4 +南海 4 +卡拉 4 +卡洛 4 +卡羅 4 +危機 4 +原有 4 +原理 4 +參考 4 +參賽 4 +古物 4 +只要 4 +各國 4 +各式 4 +各界 4 +合成 4 +合眾 4 +合金 4 +吉他 4 +同事 4 +同治 4 +名將 4 +吾爾 4 +周年 4 +命運 4 +哈伊 4 +哈里 4 +哥倫 4 +商人 4 +啟用 4 +單車 4 +嘲諷 4 +回來 4 +國防 4 +圓形 4 +地底 4 +地形 4 +地面 4 +埃米 4 +基辛 4 +堅決 4 +塔夫 4 +塞維 4 +士頓 4 +外星 4 +多利 4 +大同 4 +大帝 4 +大臣 4 +天皇 4 +夫婦 4 +失望 4 +奧林 4 +妹妹 4 +姐姐 4 +姓氏 4 +委任 4 +威斯 4 +婚姻 4 +婦女 4 +媽媽 4 +學堂 4 +學府 4 +安德 4 +官員 4 +定律 4 +宣稱 4 +實業 4 +實體 4 +寶貝 4 +小學 4 +少女 4 +尼斯 4 +尼西 4 +尼迪 4 +尼龍 4 +局部 4 +展示 4 +屯門 4 +山區 4 +山頂 4 +島式 4 +島津 4 +巡迴 4 +巴塞 4 +巴格 4 +巴納 4 +布拉 4 +布里 4 +布魯 4 +帶給 4 +常春 4 +常用 4 +幅度 4 +年齡 4 +幽默 4 +度假 4 +廚房 4 +廢除 4 +弗里 4 +影像 4 +影業 4 +很快 4 +很難 4 +後者 4 +得分 4 +得名 4 +得寵 4 +循環 4 +徵召 4 +德克 4 +志願 4 +怎麼 4 +性別 4 +性質 4 +恐龍 4 +患者 4 +情報 4 +情形 4 +情節 4 +情緒 4 +應該 4 +戀愛 4 +成人 4 +成分 4 +戰敗 4 +戰死 4 +扮演 4 +批判 4 +技巧 4 +抒情 4 +拉多 4 +拉夫 4 +拓展 4 +招募 4 +指數 4 +按照 4 +挪威 4 +排列 4 +排水 4 +排行 4 +接收 4 +接替 4 +推薦 4 +推行 4 +揚州 4 +擔心 4 +擴大 4 +擴建 4 +擴張 4 +收到 4 +收錄 4 +改造 4 +攻克 4 +教區 4 +教宗 4 +教練 4 +整理 4 +數十 4 +數千 4 +數字 4 +文泰 4 +斯加 4 +斯大 4 +斯洛 4 +斯維 4 +斯里 4 +新疆 4 +新竹 4 +旅客 4 +日常 4 +昆明 4 +明基 4 +星球 4 +星等 4 +春秋 4 +時段 4 +晉國 4 +晚間 4 +暗示 4 +暴力 4 +更換 4 +曼德 4 +最低 4 +最好 4 +最長 4 +有用 4 +服裝 4 +望遠 4 +木材 4 +本作 4 +本土 4 +本科 4 +本線 4 +本魚 4 +杉磯 4 +杜蘭 4 +東區 4 +林匹 4 +校舍 4 +案件 4 +楚國 4 +樂器 4 +標本 4 +樞紐 4 +模仿 4 +橄欖 4 +檢測 4 +正月 4 +此前 4 +此次 4 +步兵 4 +武術 4 +歷任 4 +死神 4 +殺死 4 +每秒 4 +比例 4 +毫克 4 +毫米 4 +水深 4 +永江 4 +污泥 4 +沉澱 4 +沙灘 4 +河川 4 +油價 4 +治亞 4 +法案 4 +法規 4 +波斯 4 +注入 4 +洛斯 4 +洛杉 4 +洛陽 4 +流感 4 +流經 4 +海域 4 +海外 4 +海戰 4 +海水 4 +海灣 4 +海面 4 +液態 4 +液體 4 +測量 4 +港鐵 4 +滿貫 4 +潮濕 4 +濟南 4 +灣仔 4 +火災 4 +炸藥 4 +烏克 4 +無意 4 +無線 4 +無錫 4 +照片 4 +營業 4 +爾尼 4 +爾濱 4 +牙利 4 +牛奶 4 +牧場 4 +特遣 4 +特點 4 +犯罪 4 +狀元 4 +狙擊 4 +獎勵 4 +王后 4 +珍珠 4 +珠爾 4 +現今 4 +現金 4 +球會 4 +理事 4 +理想 4 +琉球 4 +瓦爾 4 +瓷器 4 +甘珠 4 +生化 4 +生意 4 +產地 4 +產量 4 +當天 4 +當年 4 +當日 4 +當然 4 +疫苗 4 +癌症 4 +發育 4 +發言 4 +發酵 4 +皮膚 4 +監獄 4 +直升 4 +直隸 4 +相比 4 +相近 4 +省份 4 +省委 4 +省級 4 +督察 4 +矩陣 4 +短暫 4 +短篇 4 +研發 4 +社團 4 +祖父 4 +神廟 4 +神達 4 +票房 4 +福斯 4 +科爾 4 +租界 4 +種族 4 +稱號 4 +空調 4 +突然 4 +立即 4 +競爭 4 +等級 4 +節日 4 +簽約 4 +米爾 4 +米特 4 +精確 4 +紋理 4 +納德 4 +納粹 4 +索馬 4 +終止 4 +終結 4 +維吾 4 +維埃 4 +網球 4 +緊密 4 +總量 4 +總長 4 +繼任 4 +罕見 4 +罪名 4 +羅倫 4 +羅爾 4 +義務 4 +習慣 4 +老闆 4 +考察 4 +考試 4 +聖母 4 +聲稱 4 +聲譽 4 +背景 4 +胡佛 4 +自動 4 +船隻 4 +艱難 4 +苦艾 4 +英尺 4 +草本 4 +莊園 4 +莫斯 4 +華航 4 +華龍 4 +萊茵 4 +落成 4 +著重 4 +蒙扎 4 +蓉蓉 4 +薩克 4 +蘇家 4 +蘋果 4 +蘭戈 4 +蜘蛛 4 +血栓 4 +行省 4 +術語 4 +衛星 4 +西側 4 +西曼 4 +西洋 4 +西納 4 +親王 4 +評價 4 +評定 4 +詞語 4 +該劇 4 +該區 4 +課程 4 +談話 4 +請求 4 +論文 4 +識字 4 +警署 4 +議長 4 +讀者 4 +財富 4 +財政 4 +貨幣 4 +貨物 4 +費爾 4 +資本 4 +資深 4 +資金 4 +購買 4 +贊助 4 +起義 4 +身上 4 +身分 4 +躲避 4 +車序 4 +軍人 4 +軍力 4 +軍官 4 +軍閥 4 +較多 4 +較少 4 +較高 4 +輸入 4 +輻射 4 +轄下 4 +轉換 4 +辛格 4 +辦事 4 +辦法 4 +辦理 4 +農民 4 +迪絲 4 +逃往 4 +這麼 4 +週期 4 +進口 4 +進球 4 +進程 4 +遊行 4 +過度 4 +過枝 4 +達爾 4 +遷移 4 +遼寧 4 +邊境 4 +部落 4 +郵票 4 +里奧 4 +里蘭 4 +重視 4 +野生 4 +量子 4 +金字 4 +針對 4 +銅鑼 4 +鋼琴 4 +錯誤 4 +鏡片 4 +鏡面 4 +長子 4 +長江 4 +門診 4 +開幕 4 +開闢 4 +關心 4 +防守 4 +阿格 4 +院校 4 +陽光 4 +隊伍 4 +隔離 4 +雕塑 4 +雨水 4 +電力 4 +電台 4 +電磁 4 +電訊 4 +靜態 4 +靜脈 4 +非法 4 +靠近 4 +順位 4 +預期 4 +願意 4 +風險 4 +颱風 4 +飼養 4 +餘下 4 +首領 4 +馬丁 4 +馬利 4 +體內 4 +體長 4 +高架 4 +高溫 4 +鬥爭 4 +鰭魚 4 +鳥類 4 +黃埔 4 +黑色 4 +黨籍 4 +鼓勵 4 +龍鳥 4 +/9 3 +9/ 3 +9N 3 +AS 3 +Ae 3 +An 3 +Av 3 +CM 3 +DC 3 +DP 3 +FC 3 +GD 3 +Ge 3 +HI 3 +He 3 +Je 3 +Ki 3 +Mu 3 +NE 3 +No 3 +PA 3 +PS 3 +Pe 3 +Pl 3 +Po 3 +Pr 3 +Pu 3 +Qu 3 +RH 3 +SM 3 +ST 3 +TV 3 +Te 3 +Un 3 +Vi 3 +We 3 +ah 3 +az 3 +cu 3 +da 3 +ds 3 +e- 3 +eo 3 +ga 3 +go 3 +hl 3 +hu 3 +iP 3 +ka 3 +km 3 +lp 3 +ls 3 +ov 3 +ox 3 +po 3 +py 3 +sb 3 +ua 3 +ub 3 +uc 3 +uk 3 +up 3 +va 3 +we 3 +wi 3 +xx 3 +yc 3 +°C 3 +一中 3 +一八 3 +一同 3 +一時 3 +一郎 3 +三江 3 +三百 3 +上游 3 +上環 3 +上萬 3 +上表 3 +上課 3 +上面 3 +下列 3 +下台 3 +下場 3 +下級 3 +下車 3 +不但 3 +不出 3 +不想 3 +不敵 3 +不明 3 +不遠 3 +不韋 3 +世宗 3 +丟失 3 +中古 3 +中子 3 +中將 3 +中期 3 +中轉 3 +中風 3 +丹佛 3 +主任 3 +主動 3 +主唱 3 +主機 3 +主編 3 +主辦 3 +主體 3 +乃爾 3 +之一 3 +之內 3 +之時 3 +乙烯 3 +九一 3 +也好 3 +二百 3 +二胺 3 +互聯 3 +五角 3 +亞利 3 +亞目 3 +亞視 3 +交往 3 +亥俄 3 +京都 3 +亮度 3 +人性 3 +人次 3 +人生 3 +人身 3 +他人 3 +付出 3 +仙女 3 +以往 3 +以致 3 +任內 3 +任教 3 +份子 3 +企鵝 3 +伊恩 3 +伊賀 3 +伍德 3 +休息 3 +估計 3 +伸出 3 +伽利 3 +低槓 3 +住戶 3 +住院 3 +佔有 3 +佛像 3 +佛學 3 +佛瑞 3 +作霖 3 +來亞 3 +來自 3 +例子 3 +供奉 3 +供給 3 +依賴 3 +俄亥 3 +俘虜 3 +保安 3 +保育 3 +保證 3 +信德 3 +修建 3 +修道 3 +個別 3 +個性 3 +倖存 3 +候選 3 +借用 3 +倫薩 3 +值得 3 +偉大 3 +停車 3 +備受 3 +傳奇 3 +傳教 3 +傳染 3 +傾向 3 +優異 3 +允許 3 +元洪 3 +光源 3 +光緒 3 +克羅 3 +兒女 3 +兒法 3 +內哥 3 +內瓦 3 +內陸 3 +全市 3 +全縣 3 +全體 3 +八百 3 +公國 3 +公尺 3 +公轉 3 +六十 3 +共計 3 +兵力 3 +兼任 3 +冊封 3 +凱撒 3 +出使 3 +出獄 3 +函數 3 +分之 3 +分散 3 +分行 3 +分裂 3 +分解 3 +切斷 3 +刊物 3 +列為 3 +利堅 3 +利斯 3 +利桑 3 +利略 3 +利福 3 +制定 3 +前後 3 +前鋒 3 +前面 3 +剛好 3 +創意 3 +劇團 3 +劇本 3 +劇目 3 +劇院 3 +劍橋 3 +力學 3 +加爾 3 +勒格 3 +勞倫 3 +化工 3 +北冕 3 +北洋 3 +區別 3 +十四 3 +十萬 3 +十餘 3 +千上 3 +千萬 3 +升任 3 +升格 3 +卑斯 3 +南昌 3 +博恩 3 +印象 3 +即將 3 +卻是 3 +厘米 3 +原則 3 +原告 3 +原料 3 +友誼 3 +受損 3 +叛亂 3 +口徑 3 +古城 3 +可惜 3 +台中 3 +史上 3 +史密 3 +各州 3 +各省 3 +吉布 3 +同名 3 +同性 3 +同情 3 +名利 3 +名譽 3 +告訴 3 +周邊 3 +呼聲 3 +和也 3 +和約 3 +品種 3 +哥哥 3 +哥特 3 +哥羅 3 +哺乳 3 +喜愛 3 +喬伊 3 +單一 3 +嘉賓 3 +器官 3 +噴泉 3 +嚴格 3 +四世 3 +回應 3 +回歸 3 +國泰 3 +國籍 3 +國軍 3 +圍繞 3 +園區 3 +土壤 3 +在任 3 +在場 3 +地中 3 +地勢 3 +地獄 3 +地理 3 +地阿 3 +報導 3 +場合 3 +塑造 3 +塞普 3 +塞隆 3 +填充 3 +填海 3 +填補 3 +境地 3 +墜毀 3 +士官 3 +壯觀 3 +夏天 3 +夕法 3 +外來 3 +外界 3 +外部 3 +多倫 3 +多瓦 3 +多達 3 +大夫 3 +大家 3 +大師 3 +大拿 3 +大橋 3 +大權 3 +大致 3 +大街 3 +大賽 3 +大選 3 +天國 3 +天王 3 +天空 3 +太小 3 +太郎 3 +失業 3 +奇異 3 +奈米 3 +契約 3 +奪取 3 +女巫 3 +女王 3 +女神 3 +好評 3 +如何 3 +妨礙 3 +委派 3 +委託 3 +威力 3 +威尼 3 +威格 3 +娃娃 3 +嫁給 3 +嫌疑 3 +嬌嬌 3 +子女 3 +孟席 3 +孫子 3 +安徽 3 +安東 3 +安納 3 +安置 3 +宋朝 3 +完備 3 +完善 3 +完工 3 +完美 3 +官僚 3 +宣傳 3 +宣告 3 +室內 3 +宰相 3 +家寶 3 +家裡 3 +家鄉 3 +密斯 3 +富有 3 +富江 3 +寒冷 3 +實在 3 +實施 3 +寶石 3 +封閉 3 +射入 3 +射擊 3 +專用 3 +對方 3 +對比 3 +導航 3 +小吃 3 +小堂 3 +小孩 3 +小平 3 +就算 3 +尼山 3 +尼爾 3 +尾貓 3 +局面 3 +屋大 3 +屋邨 3 +州長 3 +已婚 3 +已知 3 +布庫 3 +布斯 3 +布爾 3 +布羅 3 +布袋 3 +布賴 3 +希爾 3 +師傅 3 +席斯 3 +帶到 3 +帶走 3 +帶頭 3 +帽子 3 +平台 3 +平民 3 +平衡 3 +年間 3 +幸福 3 +幹線 3 +幾何 3 +序列 3 +底層 3 +度過 3 +庫斯 3 +康乃 3 +延續 3 +延長 3 +建業 3 +弓毛 3 +引入 3 +引力 3 +引用 3 +弟子 3 +弱小 3 +強制 3 +強壯 3 +彈簧 3 +彰化 3 +影視 3 +往來 3 +往後 3 +征服 3 +待遇 3 +很好 3 +很高 3 +後人 3 +後方 3 +後衛 3 +後面 3 +徒步 3 +復工 3 +復辟 3 +徵收 3 +德勒 3 +德川 3 +德意 3 +德烈 3 +德綱 3 +徹底 3 +心情 3 +必然 3 +必要 3 +忽略 3 +思潮 3 +怡和 3 +急速 3 +性格 3 +怪物 3 +怪獸 3 +恩來 3 +悠久 3 +情書 3 +想到 3 +想法 3 +愛上 3 +愛國 3 +愛達 3 +感應 3 +慢慢 3 +憑藉 3 +憤怒 3 +懷疑 3 +懸崖 3 +成份 3 +成千 3 +成果 3 +戒毒 3 +截止 3 +戰俘 3 +戰時 3 +戰艦 3 +戰術 3 +戲劇 3 +房地 3 +房子 3 +手下 3 +扎維 3 +托克 3 +扭曲 3 +扶手 3 +承受 3 +承擔 3 +投手 3 +抗戰 3 +抵擋 3 +拆除 3 +拉丁 3 +拯救 3 +指令 3 +挑戰 3 +捐助 3 +捕捉 3 +捷克 3 +排出 3 +探討 3 +接任 3 +接唱 3 +提議 3 +換乘 3 +損失 3 +損害 3 +搬到 3 +摩爾 3 +撞擊 3 +播映 3 +撰寫 3 +擔當 3 +據說 3 +擴充 3 +支付 3 +支撐 3 +支流 3 +收回 3 +收拾 3 +收購 3 +改制 3 +改善 3 +改稱 3 +改進 3 +攻打 3 +放射 3 +故障 3 +敘述 3 +教科 3 +教養 3 +文人 3 +文件 3 +料理 3 +斯克 3 +斯德 3 +斯拉 3 +斯曼 3 +斯氏 3 +新增 3 +新建 3 +新教 3 +新村 3 +新羅 3 +方呎 3 +旁遮 3 +族群 3 +日內 3 +日後 3 +日間 3 +明星 3 +明珠 3 +明納 3 +昏迷 3 +星光 3 +星際 3 +星雲 3 +映射 3 +春日 3 +春藤 3 +昭和 3 +時機 3 +時空 3 +晚年 3 +晨興 3 +普勒 3 +普斯 3 +普選 3 +景德 3 +景點 3 +晶體 3 +暫時 3 +暴動 3 +更為 3 +書店 3 +曼聯 3 +替代 3 +替換 3 +最佳 3 +最為 3 +會堂 3 +月氏 3 +月球 3 +有利 3 +有機 3 +有權 3 +有趣 3 +服役 3 +服用 3 +朗索 3 +朝日 3 +期望 3 +木板 3 +本來 3 +村民 3 +東側 3 +東尼 3 +東港 3 +東面 3 +松鼠 3 +板塊 3 +柏立 3 +某個 3 +柯爾 3 +栃木 3 +校名 3 +核電 3 +根廷 3 +格式 3 +格納 3 +栽培 3 +栽種 3 +桃園 3 +桃浦 3 +梅妃 3 +梅莉 3 +條例 3 +極度 3 +極端 3 +概念 3 +概率 3 +榮聲 3 +槍手 3 +樂曲 3 +樂章 3 +模擬 3 +機率 3 +檢察 3 +檸檬 3 +權力 3 +權勢 3 +權益 3 +次子 3 +次日 3 +次郎 3 +歌劇 3 +正義 3 +正選 3 +步槍 3 +步道 3 +死傷 3 +死去 3 +毀滅 3 +比起 3 +民進 3 +氣壓 3 +氣泡 3 +氧氣 3 +水上 3 +水域 3 +水塔 3 +水族 3 +水溝 3 +水稻 3 +永遠 3 +求救 3 +江南 3 +江孜 3 +污水 3 +決議 3 +沒收 3 +沙烏 3 +油脂 3 +沼澤 3 +沿著 3 +法人 3 +法學 3 +法官 3 +法尼 3 +波動 3 +波塞 3 +波士 3 +波特 3 +波長 3 +泰國 3 +洋房 3 +洋行 3 +洗浴 3 +洛克 3 +洛夫 3 +洛維 3 +活佛 3 +活力 3 +流動 3 +流量 3 +海上 3 +海珊 3 +消滅 3 +淋巴 3 +淘汰 3 +淡水 3 +清代 3 +港口 3 +湖水 3 +湯瑪 3 +準則 3 +溥儀 3 +溫帶 3 +溶解 3 +滑冰 3 +漂亮 3 +漢城 3 +漳州 3 +潛入 3 +澤西 3 +火箭 3 +災難 3 +為期 3 +烏地 3 +無數 3 +煙草 3 +照相 3 +煩惱 3 +熱庫 3 +熱能 3 +爬行 3 +爾卑 3 +爾士 3 +爾多 3 +爾夫 3 +爾扎 3 +爾維 3 +爾遜 3 +爾馬 3 +牆壁 3 +牛津 3 +物資 3 +特內 3 +特化 3 +特斯 3 +特里 3 +狹窄 3 +獎學 3 +獎項 3 +獲利 3 +獲取 3 +獵食 3 +獻給 3 +率領 3 +王室 3 +珠海 3 +班納 3 +現任 3 +球場 3 +理解 3 +瑞草 3 +瑪斯 3 +瑪莉 3 +生前 3 +生成 3 +生殖 3 +產下 3 +用品 3 +用戶 3 +用法 3 +用途 3 +男女 3 +男孩 3 +畫作 3 +異常 3 +當今 3 +當作 3 +當初 3 +疑問 3 +病故 3 +瘋狂 3 +登上 3 +登基 3 +登堡 3 +登場 3 +登陸 3 +發揮 3 +發源 3 +白人 3 +白金 3 +百科 3 +皇室 3 +盟友 3 +盟旗 3 +監管 3 +目錄 3 +直系 3 +直線 3 +直選 3 +相反 3 +相機 3 +相遇 3 +真人 3 +真武 3 +眼睛 3 +睡蓮 3 +睦斯 3 +瞭解 3 +矚目 3 +知情 3 +短尾 3 +短短 3 +破曉 3 +破產 3 +硬體 3 +碎片 3 +碳化 3 +確保 3 +確立 3 +社群 3 +神父 3 +票價 3 +福尼 3 +福島 3 +禮儀 3 +禮拜 3 +科系 3 +科隆 3 +租借 3 +租賃 3 +種種 3 +積極 3 +窯瓷 3 +立基 3 +立陶 3 +童話 3 +競選 3 +竹子 3 +第八 3 +管道 3 +節慶 3 +節省 3 +米利 3 +精通 3 +精選 3 +糧食 3 +約定 3 +約瑟 3 +紅磡 3 +紅色 3 +納努 3 +納斯 3 +納蒂 3 +紛爭 3 +素貞 3 +紡織 3 +細小 3 +終點 3 +組建 3 +組裝 3 +結成 3 +維京 3 +維基 3 +維耶 3 +編劇 3 +總共 3 +總數 3 +總結 3 +總體 3 +繪製 3 +繼位 3 +纖維 3 +缺席 3 +缺點 3 +置富 3 +羅萊 3 +羊肉 3 +美利 3 +義勇 3 +老鼠 3 +考古 3 +考驗 3 +而非 3 +耶穌 3 +聖地 3 +聖誕 3 +聖靈 3 +聘請 3 +聚會 3 +聯手 3 +聯軍 3 +聲勢 3 +職位 3 +股價 3 +股票 3 +胡安 3 +自傳 3 +自我 3 +自稱 3 +自身 3 +自願 3 +至少 3 +致力 3 +致命 3 +臺南 3 +臼齒 3 +舞蹈 3 +航海 3 +航程 3 +船員 3 +艾女 3 +艾滋 3 +芭比 3 +英九 3 +茶葉 3 +草食 3 +莉迪 3 +莫爾 3 +華格 3 +華麗 3 +菲利 3 +菲爾 3 +菲特 3 +萊特 3 +萊納 3 +著稱 3 +蒂亞 3 +蒂克 3 +蒙大 3 +蒸汽 3 +蓬勃 3 +薩魯 3 +藍色 3 +蘇州 3 +蘇爾 3 +蘇維 3 +蘇里 3 +虎鯨 3 +衍生 3 +衙門 3 +衛視 3 +衝擊 3 +衣服 3 +裁判 3 +補給 3 +裝飾 3 +複合 3 +複製 3 +西安 3 +西湖 3 +西關 3 +西面 3 +見到 3 +規格 3 +視頻 3 +親自 3 +計畫 3 +記憶 3 +評估 3 +評審 3 +該寺 3 +該書 3 +該校 3 +該站 3 +該鎮 3 +誠實 3 +誤認 3 +說明 3 +課室 3 +諷刺 3 +諸多 3 +謀殺 3 +證實 3 +識別 3 +護照 3 +讀書 3 +變形 3 +變數 3 +變體 3 +讚賞 3 +貝克 3 +貴妃 3 +貴州 3 +買家 3 +費德 3 +費雪 3 +資助 3 +賓夕 3 +賦予 3 +走廊 3 +起訴 3 +足協 3 +路徑 3 +身邊 3 +車體 3 +較大 3 +較長 3 +輔助 3 +輔導 3 +輔政 3 +輸出 3 +轄區 3 +轉乘 3 +轉到 3 +轉投 3 +轉讓 3 +近期 3 +迪斯 3 +迫使 3 +追逐 3 +退休 3 +逃離 3 +逐步 3 +通信 3 +通用 3 +通行 3 +速食 3 +連環 3 +連線 3 +逮捕 3 +進出 3 +遇見 3 +遊樂 3 +運營 3 +過渡 3 +道光 3 +達也 3 +達姆 3 +達荷 3 +違法 3 +遠航 3 +適應 3 +遮普 3 +遷徙 3 +選手 3 +選秀 3 +遺體 3 +邊界 3 +那裏 3 +邦聯 3 +部件 3 +都市 3 +鄉議 3 +配樂 3 +配置 3 +酗酒 3 +釀酒 3 +釋放 3 +里米 3 +重傷 3 +重整 3 +金山 3 +金庫 3 +金庸 3 +金鐘 3 +錄音 3 +鐵人 3 +長安 3 +長州 3 +長相 3 +長遠 3 +開播 3 +關節 3 +阿兒 3 +阿根 3 +附屬 3 +降落 3 +陣營 3 +除外 3 +陶宛 3 +陸地 3 +陸續 3 +隱藏 3 +隱語 3 +雅典 3 +雌雄 3 +雙立 3 +雜技 3 +難度 3 +雪梨 3 +雪莉 3 +零售 3 +雷克 3 +雷爾 3 +雷睦 3 +霍普 3 +靈頓 3 +青島 3 +鞏固 3 +音頻 3 +頂層 3 +順利 3 +預先 3 +頭銜 3 +頻譜 3 +題寫 3 +飲食 3 +飾演 3 +首任 3 +首演 3 +馬德 3 +驅動 3 +驅逐 3 +體操 3 +高原 3 +高層 3 +高山 3 +高麗 3 +魯曼 3 +鳳山 3 +鹿兒 3 +麥爾 3 +麥田 3 +黃金 3 +黑子 3 +黑斑 3 +黑洞 3 +黛比 3 +黨員 3 +龍馬 3 +$9 2 +%- 2 +(x 2 +.s 2 +/h 2 +9C 2 +9D 2 +9° 2 +=9 2 +AB 2 +AE 2 +AI 2 +Ab 2 +Ai 2 +Au 2 +BB 2 +BE 2 +BT 2 +C- 2 +CA 2 +CE 2 +CH 2 +CI 2 +CP 2 +DJ 2 +DN 2 +EC 2 +En 2 +Ep 2 +Eu 2 +Ev 2 +F- 2 +Fa 2 +GB 2 +GC 2 +Gi 2 +Hi 2 +Hu 2 +IG 2 +IS 2 +IV 2 +Is 2 +Ju 2 +Ke 2 +Ku 2 +MG 2 +MO 2 +My 2 +ND 2 +OC 2 +Om 2 +Or 2 +PL 2 +PV 2 +RB 2 +RI 2 +RO 2 +Ru 2 +SA 2 +SB 2 +Sc 2 +TF 2 +TO 2 +Ti 2 +Tr 2 +Tu 2 +Tw 2 +U9 2 +UA 2 +US 2 +VB 2 +VC 2 +VV 2 +Va 2 +Wa 2 +Wy 2 +XI 2 +ae 2 +af 2 +ao 2 +bl 2 +bo 2 +br 2 +bu 2 +by 2 +ci 2 +cl 2 +dg 2 +dw 2 +eg 2 +ek 2 +ep 2 +eu 2 +fa 2 +ff 2 +gy 2 +hm 2 +hy 2 +ib 2 +if 2 +ii 2 +iu 2 +kh 2 +ks 2 +lk 2 +lm 2 +lv 2 +m/ 2 +nm 2 +nr 2 +o- 2 +oi 2 +pu 2 +rb 2 +rh 2 +rv 2 +rw 2 +ry 2 +sm 2 +sp 2 +sy 2 +tl 2 +tz 2 +ug 2 +ui 2 +wh 2 +x) 2 +ya 2 +ye 2 +ys 2 +yt 2 +zi 2 +一共 2 +一千 2 +一向 2 +一度 2 +一手 2 +一提 2 +一貫 2 +一面 2 +丁尼 2 +七十 2 +七喜 2 +三中 2 +三井 2 +三棟 2 +三藏 2 +上任 2 +上佳 2 +上加 2 +上午 2 +上吊 2 +上將 2 +上層 2 +上方 2 +上校 2 +上街 2 +下去 2 +下層 2 +下屬 2 +下旬 2 +下水 2 +下海 2 +下游 2 +下野 2 +不一 2 +不停 2 +不再 2 +不列 2 +不受 2 +不夠 2 +不如 2 +不宜 2 +不已 2 +不幸 2 +不法 2 +不清 2 +不用 2 +不管 2 +不聊 2 +不良 2 +不論 2 +不變 2 +不錯 2 +不需 2 +不願 2 +丐幫 2 +世俗 2 +世博 2 +世卿 2 +世家 2 +世民 2 +世襲 2 +世錦 2 +世音 2 +丘陵 2 +中區 2 +中午 2 +中南 2 +中天 2 +中巴 2 +中正 2 +中途 2 +中道 2 +中遠 2 +主上 2 +主力 2 +主因 2 +主場 2 +主權 2 +主管 2 +主線 2 +乘船 2 +乘車 2 +乙級 2 +九州 2 +九巴 2 +也有 2 +也許 2 +乳酪 2 +事後 2 +二甘 2 +二郎 2 +互動 2 +五四 2 +五峰 2 +亞伯 2 +亞得 2 +亞德 2 +亞特 2 +亞韋 2 +交互 2 +交到 2 +交匯 2 +交好 2 +交情 2 +交戰 2 +交手 2 +交趾 2 +人力 2 +人心 2 +人才 2 +人文 2 +人格 2 +人熙 2 +人群 2 +人間 2 +人魚 2 +仁慈 2 +仁記 2 +今年 2 +介乎 2 +介石 2 +仍舊 2 +付款 2 +仙劍 2 +以南 2 +以東 2 +以至 2 +任城 2 +任天 2 +任意 2 +份額 2 +伊利 2 +伊比 2 +伊瓦 2 +伏威 2 +休閒 2 +伯公 2 +伯恩 2 +伯爵 2 +伯納 2 +伯靈 2 +伴隨 2 +似乎 2 +低地 2 +低廉 2 +低溫 2 +住房 2 +佐土 2 +佐夫 2 +佐藤 2 +佛山 2 +佛朗 2 +佛殿 2 +作好 2 +作業 2 +作物 2 +佩劍 2 +併入 2 +使命 2 +使者 2 +使館 2 +來訪 2 +例外 2 +供暖 2 +供熱 2 +供職 2 +依託 2 +侵入 2 +侵犯 2 +便宜 2 +促使 2 +促成 2 +俄明 2 +俗成 2 +俗稱 2 +保有 2 +保機 2 +保級 2 +保衛 2 +信心 2 +信義 2 +信長 2 +信雄 2 +修士 2 +修理 2 +修羅 2 +修習 2 +修訂 2 +修鍊 2 +個案 2 +倒台 2 +倒掛 2 +候鳥 2 +倡導 2 +倫多 2 +假如 2 +假期 2 +假髮 2 +偏差 2 +停戰 2 +停滯 2 +偶然 2 +偶爾 2 +偽造 2 +傑作 2 +催化 2 +傳入 2 +傳到 2 +傳動 2 +傳媒 2 +傳導 2 +傳授 2 +傳聞 2 +傳言 2 +傳送 2 +傳達 2 +債務 2 +傾聽 2 +僅僅 2 +僱員 2 +儀錶 2 +儒家 2 +優先 2 +儲備 2 +元代 2 +元件 2 +元帥 2 +元年 2 +元洲 2 +元璋 2 +元甲 2 +元首 2 +充斥 2 +充當 2 +兆帕 2 +先知 2 +先行 2 +先驅 2 +光線 2 +光譜 2 +光軸 2 +克基 2 +克塞 2 +克尼 2 +克思 2 +克托 2 +克林 2 +克果 2 +克洛 2 +克爾 2 +克用 2 +克西 2 +克隆 2 +克魯 2 +免職 2 +入伍 2 +入圍 2 +入學 2 +入獄 2 +入讀 2 +入門 2 +內務 2 +內外 2 +內心 2 +內流 2 +內爾 2 +全新 2 +全日 2 +全校 2 +全權 2 +全能 2 +全身 2 +兩千 2 +八一 2 +公學 2 +公寓 2 +公署 2 +公認 2 +六七 2 +六千 2 +兵營 2 +其父 2 +其頓 2 +具備 2 +典禮 2 +再造 2 +冠龍 2 +冬季 2 +冰兄 2 +冰峰 2 +冰川 2 +冰雪 2 +凡爾 2 +凱恩 2 +凱特 2 +凱瑞 2 +出家 2 +出席 2 +出演 2 +出產 2 +出賽 2 +出道 2 +分享 2 +分化 2 +分區 2 +分手 2 +分擔 2 +分歧 2 +分隊 2 +切爾 2 +刊載 2 +列佐 2 +列傳 2 +列出 2 +列斯 2 +列顛 2 +初學 2 +初年 2 +初稿 2 +初級 2 +初賽 2 +判斷 2 +判處 2 +別列 2 +利比 2 +利爾 2 +利物 2 +到來 2 +到底 2 +制止 2 +制裁 2 +制訂 2 +刺客 2 +刺死 2 +刻有 2 +則布 2 +削弱 2 +前任 2 +前來 2 +前妻 2 +前途 2 +剝奪 2 +剩下 2 +副本 2 +創始 2 +創新 2 +創業 2 +劃入 2 +劃給 2 +劇烈 2 +劍術 2 +劍齒 2 +力克 2 +功率 2 +加之 2 +加勒 2 +加堆 2 +加斯 2 +加重 2 +劣勢 2 +助戰 2 +勇為 2 +勒拿 2 +勒比 2 +勒沃 2 +勒謝 2 +動一 2 +動機 2 +動漫 2 +動脈 2 +動車 2 +勝出 2 +勳章 2 +勳銜 2 +勾引 2 +包圍 2 +包廂 2 +包衣 2 +匕首 2 +化氫 2 +化纖 2 +化身 2 +化金 2 +化銠 2 +化鋁 2 +北宋 2 +北平 2 +北方 2 +北端 2 +北約 2 +北道 2 +北齊 2 +匯率 2 +匹克 2 +區分 2 +千9 2 +升學 2 +半山 2 +半球 2 +協商 2 +協奏 2 +協約 2 +南下 2 +南安 2 +南山 2 +南斯 2 +南遣 2 +南邊 2 +南陽 2 +南非 2 +南面 2 +南韓 2 +博弈 2 +博彩 2 +占卜 2 +卡夫 2 +卡普 2 +卡梅 2 +卡片 2 +卡特 2 +卡薩 2 +卡達 2 +印加 2 +印尼 2 +印製 2 +即位 2 +即時 2 +即興 2 +卿雲 2 +厄運 2 +原名 2 +原址 2 +原聲 2 +去除 2 +參觀 2 +參選 2 +又是 2 +又稱 2 +及格 2 +友好 2 +反叛 2 +反抗 2 +反擊 2 +叔叔 2 +取決 2 +受審 2 +受益 2 +受體 2 +口中 2 +口述 2 +古屋 2 +古巴 2 +古德 2 +古拉 2 +古斯 2 +古柯 2 +古蹟 2 +召喚 2 +可汗 2 +史學 2 +史提 2 +史蒂 2 +右岸 2 +司機 2 +司長 2 +司鼓 2 +吃肉 2 +吃飯 2 +各樣 2 +各級 2 +各自 2 +各部 2 +合同 2 +合川 2 +合稱 2 +合葬 2 +吉奧 2 +吉林 2 +吉里 2 +同人 2 +同居 2 +同工 2 +同體 2 +名人 2 +名古 2 +名縉 2 +名鎮 2 +向量 2 +君王 2 +吞併 2 +否則 2 +否定 2 +否是 2 +告別 2 +告知 2 +告終 2 +周歲 2 +呼叫 2 +呼籲 2 +和華 2 +和解 2 +和談 2 +咬金 2 +品行 2 +哈林 2 +哈根 2 +哈歐 2 +哥斯 2 +哥本 2 +哪裡 2 +售賣 2 +唯有 2 +唯美 2 +啟動 2 +啟睿 2 +啟航 2 +啟蒙 2 +喀則 2 +善化 2 +善意 2 +喉嚨 2 +喜劇 2 +喪生 2 +喬艾 2 +單元 2 +單曲 2 +嘉慶 2 +嘉木 2 +嘉玲 2 +器物 2 +噪音 2 +噴氣 2 +嚴密 2 +囚禁 2 +四分 2 +回國 2 +回想 2 +回憶 2 +回收 2 +回生 2 +因斯 2 +固醇 2 +國代 2 +國外 2 +國寶 2 +國徽 2 +國璋 2 +國語 2 +國鋒 2 +圍攻 2 +園藝 2 +圓頂 2 +圖樣 2 +圖爾 2 +圖畫 2 +團結 2 +團聚 2 +團長 2 +土原 2 +在位 2 +在來 2 +地外 2 +地帶 2 +坐診 2 +型態 2 +埃蒙 2 +城中 2 +城子 2 +域名 2 +域治 2 +執導 2 +執掌 2 +執教 2 +執法 2 +基勒 2 +基底 2 +基拉 2 +堂區 2 +堅固 2 +堅強 2 +報紙 2 +場場 2 +塔尼 2 +塔爾 2 +塞克 2 +塞塔 2 +塞摩 2 +塞琉 2 +塞羅 2 +境外 2 +墓地 2 +墓室 2 +增多 2 +增建 2 +增強 2 +增設 2 +墮胎 2 +壓倒 2 +壓強 2 +壓迫 2 +士蘭 2 +壯大 2 +壯年 2 +夏伊 2 +夏季 2 +夏茸 2 +外傳 2 +外圍 2 +外在 2 +外援 2 +外甥 2 +外觀 2 +外資 2 +多半 2 +多少 2 +夜晚 2 +夥伴 2 +大亂 2 +大事 2 +大佛 2 +大公 2 +大力 2 +大勝 2 +大半 2 +大堂 2 +大妃 2 +大將 2 +大屋 2 +大廳 2 +大批 2 +大敗 2 +大林 2 +大槍 2 +大火 2 +大碟 2 +大笨 2 +大維 2 +大衛 2 +大連 2 +大阪 2 +大黎 2 +天地 2 +天堂 2 +天子 2 +天師 2 +天敵 2 +天日 2 +天氣 2 +天衣 2 +天雷 2 +太古 2 +太多 2 +太大 2 +太子 2 +太守 2 +太極 2 +太洛 2 +太祖 2 +夫卡 2 +夸脫 2 +奉天 2 +契合 2 +奢侈 2 +奧克 2 +奧多 2 +奧布 2 +奧朗 2 +奧特 2 +奪冠 2 +女士 2 +女孩 2 +女皇 2 +妖精 2 +妖魔 2 +妥善 2 +姊妹 2 +始皇 2 +姐妹 2 +姐弟 2 +姑家 2 +姓名 2 +姿態 2 +娘舅 2 +婆婆 2 +嫉妒 2 +子夜 2 +子珍 2 +孔子 2 +字元 2 +字型 2 +字體 2 +存有 2 +存活 2 +孟能 2 +季前 2 +季軍 2 +孤僻 2 +孤獨 2 +孵化 2 +學制 2 +學問 2 +學士 2 +學年 2 +學期 2 +學童 2 +學系 2 +學費 2 +宇一 2 +守衛 2 +安修 2 +安得 2 +安息 2 +安打 2 +安菲 2 +安邑 2 +完整 2 +宏觀 2 +宗室 2 +官職 2 +定下 2 +定俗 2 +定名 2 +定型 2 +定期 2 +客串 2 +客人 2 +客室 2 +客機 2 +客車 2 +宣戰 2 +害怕 2 +家久 2 +家堡 2 +家境 2 +家屬 2 +家產 2 +家衛 2 +家貓 2 +寄宿 2 +密切 2 +密蘇 2 +富人 2 +富特 2 +實力 2 +實務 2 +實用 2 +實習 2 +審判 2 +審查 2 +寫道 2 +寬廣 2 +寬頻 2 +寬鬆 2 +寺廟 2 +寺院 2 +封神 2 +封面 2 +射殺 2 +專區 2 +專員 2 +專有 2 +專題 2 +尊嚴 2 +尊重 2 +尋常 2 +對峙 2 +對待 2 +對陣 2 +導體 2 +小兒 2 +小姐 2 +小心 2 +小桃 2 +小梅 2 +小鎮 2 +小閻 2 +小青 2 +就任 2 +就業 2 +尼古 2 +尼奧 2 +尼納 2 +尼羅 2 +尼采 2 +尾部 2 +局限 2 +居委 2 +居里 2 +屋苑 2 +展館 2 +履仁 2 +屬名 2 +山丘 2 +山坡 2 +山海 2 +岩石 2 +岳母 2 +岳父 2 +崇拜 2 +崔西 2 +嶺南 2 +嶽麓 2 +工兵 2 +工商 2 +工農 2 +工黨 2 +左上 2 +左側 2 +巧眉 2 +巧言 2 +差距 2 +差點 2 +巴列 2 +巴勒 2 +巴拉 2 +巴比 2 +巴洛 2 +巴特 2 +市值 2 +市內 2 +市商 2 +市郊 2 +市長 2 +布卡 2 +布希 2 +布朗 2 +布萊 2 +布蘭 2 +布雷 2 +希羅 2 +帕克 2 +帕斯 2 +帕洛 2 +帕納 2 +帛琉 2 +帶去 2 +帶有 2 +常務 2 +常年 2 +常德 2 +常規 2 +幫忙 2 +干擾 2 +干涉 2 +干預 2 +平安 2 +平息 2 +平成 2 +平時 2 +平頂 2 +年初 2 +年科 2 +年紀 2 +年譜 2 +幼體 2 +幾十 2 +床墊 2 +序數 2 +店鋪 2 +度母 2 +座堂 2 +庫伊 2 +庫夫 2 +庫容 2 +庫爾 2 +康復 2 +康辛 2 +廉租 2 +廠房 2 +廢墟 2 +廢止 2 +廣大 2 +廣安 2 +廣義 2 +延任 2 +延遲 2 +建御 2 +建有 2 +建銘 2 +式各 2 +引種 2 +引退 2 +引進 2 +強風 2 +彈奏 2 +彈性 2 +彌迦 2 +彙集 2 +彩色 2 +影展 2 +往返 2 +征戰 2 +很近 2 +後端 2 +後裔 2 +徒刑 2 +得拉 2 +得票 2 +得道 2 +得里 2 +從小 2 +從而 2 +從軍 2 +御苑 2 +微山 2 +微粒 2 +德州 2 +德特 2 +德瑞 2 +德羅 2 +德華 2 +德輔 2 +德魯 2 +徹斯 2 +心中 2 +必烈 2 +志剛 2 +快樂 2 +忽必 2 +思念 2 +思明 2 +思科 2 +性交 2 +恆鳳 2 +恐慌 2 +恥辱 2 +恩寵 2 +恩賜 2 +悅強 2 +悲痛 2 +悲觀 2 +情意 2 +惠山 2 +愉景 2 +意料 2 +意願 2 +愛因 2 +愛娜 2 +愛德 2 +愛惜 2 +感動 2 +感受 2 +感染 2 +慈幼 2 +慈鯛 2 +慘敗 2 +慣例 2 +慶尚 2 +慶豐 2 +慾望 2 +憎恨 2 +應對 2 +懊惱 2 +懷俄 2 +懷舊 2 +懸浮 2 +懸索 2 +成仙 2 +成傑 2 +成因 2 +成型 2 +成就 2 +成群 2 +成貓 2 +戰亂 2 +戰列 2 +戰場 2 +戰士 2 +戰線 2 +戴麟 2 +手冊 2 +手動 2 +手可 2 +手機 2 +手裡 2 +才能 2 +打工 2 +打敗 2 +打破 2 +打開 2 +托斯 2 +扶植 2 +找出 2 +找回 2 +找尋 2 +承諾 2 +抄襲 2 +抓住 2 +投影 2 +投降 2 +抗擊 2 +抽取 2 +拆穿 2 +拆解 2 +拉姆 2 +拉底 2 +拉德 2 +拉斐 2 +拉松 2 +拉爾 2 +拉瓦 2 +拉西 2 +拉邦 2 +拔出 2 +拖延 2 +招商 2 +招股 2 +拷貝 2 +拼音 2 +拿到 2 +拿破 2 +拿走 2 +指引 2 +指控 2 +指涉 2 +按鍵 2 +挖角 2 +挽救 2 +捐贈 2 +捕獲 2 +捕食 2 +捷運 2 +排放 2 +排氣 2 +排演 2 +掛架 2 +掠過 2 +掠食 2 +採訪 2 +接待 2 +接掌 2 +接種 2 +接駁 2 +控球 2 +推廣 2 +推翻 2 +推選 2 +描寫 2 +提倡 2 +提及 2 +提夫 2 +提示 2 +插圖 2 +揚聲 2 +換入 2 +換股 2 +損傷 2 +損毀 2 +搞笑 2 +搭檔 2 +搶險 2 +摩斯 2 +摩根 2 +撤出 2 +撤軍 2 +播客 2 +擅長 2 +擊退 2 +擒抱 2 +擔負 2 +據守 2 +擺脫 2 +擾動 2 +支出 2 +支柱 2 +收復 2 +收發 2 +收穫 2 +收視 2 +收集 2 +改回 2 +改寫 2 +改建 2 +改版 2 +改良 2 +改裝 2 +攻佔 2 +攻陷 2 +放映 2 +放置 2 +政協 2 +政變 2 +政黨 2 +故意 2 +故此 2 +故鄉 2 +效忠 2 +效率 2 +敏感 2 +敗給 2 +教友 2 +教員 2 +教徒 2 +教派 2 +整修 2 +整套 2 +整體 2 +敵對 2 +數位 2 +數理 2 +數目 2 +文元 2 +文官 2 +文帝 2 +文康 2 +文英 2 +文華 2 +文革 2 +文體 2 +斐濟 2 +斐爾 2 +斥資 2 +斯丁 2 +斯卡 2 +斯圖 2 +斯多 2 +斯庫 2 +斯康 2 +斯提 2 +斯泰 2 +斯爾 2 +斯理 2 +斯蒂 2 +新世 2 +新宿 2 +新岩 2 +新曲 2 +新澤 2 +新田 2 +新罕 2 +新興 2 +斷裂 2 +方位 2 +方尺 2 +方針 2 +施行 2 +旁邊 2 +旅鴿 2 +旋律 2 +日喀 2 +日益 2 +日航 2 +日行 2 +早上 2 +旺山 2 +旺盛 2 +昆士 2 +明哥 2 +明帝 2 +明頓 2 +易名 2 +昔日 2 +星形 2 +星蟒 2 +春天 2 +時尚 2 +時速 2 +晉升 2 +晚上 2 +晚會 2 +普及 2 +普拉 2 +普朗 2 +普爾 2 +普陀 2 +普頓 2 +景帝 2 +景觀 2 +景象 2 +智慧 2 +暑假 2 +暗殺 2 +暢銷 2 +暫停 2 +暫緩 2 +暴露 2 +曝氣 2 +更好 2 +更改 2 +更深 2 +更高 2 +書信 2 +書寫 2 +書房 2 +書法 2 +曼尼 2 +最久 2 +最小 2 +最少 2 +最新 2 +最遊 2 +會員 2 +會場 2 +會社 2 +會談 2 +會長 2 +月刊 2 +有助 2 +有意 2 +有毒 2 +有罪 2 +服從 2 +朔日 2 +朗克 2 +朗則 2 +朗明 2 +朝代 2 +木星 2 +木犀 2 +木管 2 +末年 2 +末期 2 +本區 2 +本哈 2 +本屆 2 +本班 2 +本站 2 +本質 2 +本願 2 +本龍 2 +村落 2 +村頭 2 +束縛 2 +東亞 2 +東吳 2 +東山 2 +東征 2 +東晉 2 +東正 2 +東視 2 +松潘 2 +林庄 2 +林維 2 +果實 2 +果汁 2 +架設 2 +柏油 2 +染色 2 +柔佛 2 +柔弱 2 +查德 2 +柯鹼 2 +柳江 2 +柴油 2 +柴灣 2 +校內 2 +校隊 2 +核能 2 +根本 2 +格勞 2 +格斯 2 +格曼 2 +格格 2 +格檔 2 +格達 2 +格魯 2 +桃太 2 +桌面 2 +桑葚 2 +桑那 2 +梅塔 2 +梅隆 2 +棕熊 2 +棟屋 2 +植被 2 +楊樹 2 +業者 2 +極大 2 +極性 2 +極高 2 +榮獲 2 +樂農 2 +標語 2 +標題 2 +樞機 2 +樟湖 2 +模具 2 +樣本 2 +樹木 2 +機動 2 +機員 2 +機槍 2 +橡樹 2 +橡膠 2 +橫山 2 +橫濱 2 +橫跨 2 +檢索 2 +檢討 2 +權威 2 +權貴 2 +次數 2 +次級 2 +次要 2 +次長 2 +欺騙 2 +歇爾 2 +歌仔 2 +歌唱 2 +歌聲 2 +歌迷 2 +歐拉 2 +歐斯 2 +正是 2 +正直 2 +正統 2 +正面 2 +此人 2 +此案 2 +此物 2 +此種 2 +此線 2 +此舉 2 +此類 2 +步態 2 +武大 2 +武昌 2 +武松 2 +歧視 2 +歸類 2 +死靈 2 +殘存 2 +殘忍 2 +殘酷 2 +殯葬 2 +殺傷 2 +殺掉 2 +每位 2 +每周 2 +每層 2 +每日 2 +每次 2 +毒性 2 +毒殺 2 +毒藥 2 +比婭 2 +比施 2 +比西 2 +毗鄰 2 +毛利 2 +氏星 2 +民不 2 +民調 2 +民都 2 +氣田 2 +氨酸 2 +氫彈 2 +氯金 2 +水孔 2 +水手 2 +水準 2 +水溫 2 +水滸 2 +水牛 2 +水質 2 +水道 2 +水餃 2 +永嘉 2 +永寧 2 +永樂 2 +汗位 2 +汝霖 2 +江北 2 +江戶 2 +池尻 2 +汪達 2 +決心 2 +決戰 2 +沃夫 2 +沃思 2 +沖繩 2 +沙咀 2 +沙柏 2 +沙河 2 +沙龍 2 +河水 2 +泉州 2 +泊桑 2 +法拉 2 +法醫 2 +泡沫 2 +注射 2 +注重 2 +泰坦 2 +泰姬 2 +泰然 2 +洗手 2 +洛水 2 +洛辛 2 +洛馬 2 +活性 2 +派出 2 +派別 2 +派駐 2 +流傳 2 +流失 2 +流求 2 +流派 2 +流通 2 +浙東 2 +浩劫 2 +浮冰 2 +海亞 2 +海南 2 +海涌 2 +海豹 2 +海邊 2 +海關 2 +海默 2 +消化 2 +消失 2 +淄博 2 +淮河 2 +深厚 2 +深得 2 +深愛 2 +深遠 2 +淹沒 2 +添加 2 +清晨 2 +清楚 2 +清華 2 +清鍾 2 +減輕 2 +游牧 2 +湖州 2 +湯興 2 +溝通 2 +溪流 2 +溫和 2 +溫州 2 +溫布 2 +溫暖 2 +溫特 2 +滄州 2 +滅口 2 +滙豐 2 +滬東 2 +滿足 2 +漁業 2 +漂流 2 +演說 2 +漢佛 2 +漢口 2 +漸漸 2 +潭西 2 +潮州 2 +澤普 2 +澳底 2 +激光 2 +激戰 2 +激起 2 +濃縮 2 +濕原 2 +濕度 2 +濟寧 2 +濱松 2 +濱湖 2 +瀏覽 2 +灌木 2 +火藥 2 +灰狼 2 +灰色 2 +災害 2 +炮台 2 +為數 2 +烏孜 2 +烏孫 2 +烏扎 2 +烏斯 2 +無力 2 +無效 2 +無界 2 +無緣 2 +無辜 2 +無黨 2 +焦耳 2 +煙熏 2 +照料 2 +照顧 2 +煮制 2 +熊隻 2 +熱比 2 +熱衷 2 +燃燒 2 +燒毀 2 +燒餅 2 +燕山 2 +爪獸 2 +爭取 2 +爭執 2 +爭辯 2 +爭霸 2 +父子 2 +爾伯 2 +爾克 2 +爾加 2 +爾卡 2 +爾及 2 +爾幹 2 +爾庫 2 +爾後 2 +爾文 2 +爾滕 2 +爾登 2 +爾良 2 +爾茨 2 +爾薩 2 +爾西 2 +爾賽 2 +爾那 2 +牆體 2 +片段 2 +牙買 2 +牛仔 2 +牛肉 2 +牧師 2 +牧羊 2 +牧養 2 +物價 2 +物浦 2 +特丹 2 +特使 2 +特性 2 +特拉 2 +特權 2 +特烈 2 +特爾 2 +特種 2 +特維 2 +特羅 2 +特裡 2 +犀欖 2 +犬隻 2 +犬齒 2 +狐狸 2 +狸藻 2 +猛烈 2 +猛虎 2 +猶他 2 +猶豫 2 +獎章 2 +獎金 2 +獨居 2 +獨自 2 +獵奇 2 +獵殺 2 +玄機 2 +率軍 2 +玉帶 2 +玉門 2 +王位 2 +王妃 2 +玩具 2 +珀斯 2 +珀西 2 +珍品 2 +現址 2 +現狀 2 +球迷 2 +理念 2 +琳達 2 +琴行 2 +瑞克 2 +瑟夫 2 +瑪納 2 +環島 2 +環形 2 +環球 2 +環礁 2 +瓊璘 2 +瓜分 2 +瓦伊 2 +瓦拉 2 +瓦斯 2 +甘醇 2 +甚少 2 +甚麼 2 +甜甜 2 +生日 2 +生母 2 +生病 2 +產值 2 +產區 2 +產物 2 +用地 2 +用電 2 +由來 2 +由衷 2 +甲板 2 +甲醇 2 +申花 2 +男爵 2 +町村 2 +留存 2 +留學 2 +留意 2 +留香 2 +畫上 2 +畫報 2 +異性 2 +當事 2 +當代 2 +當前 2 +當場 2 +當成 2 +疫情 2 +病人 2 +病理 2 +痕迹 2 +登記 2 +登輝 2 +發售 2 +發回 2 +發掘 2 +發覺 2 +發音 2 +白紙 2 +白馬 2 +百多 2 +百度 2 +皇子 2 +皇宮 2 +皮埃 2 +皮特 2 +盆子 2 +益世 2 +盟校 2 +監察 2 +監製 2 +監視 2 +直人 2 +直布 2 +直轄 2 +直通 2 +相傳 2 +相戀 2 +相等 2 +相識 2 +相連 2 +省立 2 +看似 2 +看法 2 +真宗 2 +真情 2 +真的 2 +眼鏡 2 +眾人 2 +睡衣 2 +矛盾 2 +知節 2 +短面 2 +矮人 2 +石化 2 +石原 2 +石家 2 +砍柴 2 +研製 2 +研討 2 +破崙 2 +硫化 2 +硫磺 2 +碎石 2 +碧翠 2 +碩士 2 +確實 2 +磨損 2 +礦業 2 +示威 2 +社交 2 +祕教 2 +祖外 2 +神代 2 +祺瑞 2 +福來 2 +福利 2 +福部 2 +福音 2 +禮節 2 +禽龍 2 +秀全 2 +秀吉 2 +秋天 2 +科幻 2 +科文 2 +科特 2 +科羅 2 +科赫 2 +科雷 2 +秘魯 2 +租客 2 +移除 2 +稀有 2 +程式 2 +種姓 2 +稱呼 2 +稱臣 2 +稱讚 2 +稻盛 2 +穆爾 2 +穆罕 2 +積分 2 +空缺 2 +穿耳 2 +突出 2 +突厥 2 +突擊 2 +立下 2 +立憲 2 +立熙 2 +站台 2 +竟然 2 +竣工 2 +童星 2 +競技 2 +競馬 2 +笑話 2 +第九 2 +筆下 2 +等到 2 +等待 2 +策劃 2 +管弦 2 +管治 2 +節奏 2 +簡易 2 +簽署 2 +籃壇 2 +籃子 2 +籌建 2 +米拉 2 +米格 2 +米莉 2 +精度 2 +精武 2 +精液 2 +精緻 2 +精美 2 +精采 2 +糖份 2 +約克 2 +約會 2 +紅木 2 +紅樓 2 +紅麴 2 +納姆 2 +納爾 2 +納辛 2 +紐卡 2 +紓緩 2 +純淨 2 +純粹 2 +紙幣 2 +紛紛 2 +素質 2 +索引 2 +索瓦 2 +索菲 2 +細緻 2 +組長 2 +結晶 2 +結識 2 +絕望 2 +統稱 2 +絲綢 2 +經紀 2 +經費 2 +維修 2 +維克 2 +維利 2 +維奇 2 +維奧 2 +維尼 2 +維年 2 +維迪 2 +維鈞 2 +維魯 2 +網上 2 +網友 2 +網民 2 +緊鄰 2 +線粒 2 +線西 2 +編入 2 +編寫 2 +編製 2 +緩存 2 +緩慢 2 +緬因 2 +縣城 2 +縣治 2 +縣長 2 +縱橫 2 +縱貫 2 +總值 2 +總會 2 +總監 2 +總管 2 +總額 2 +繁忙 2 +繁榮 2 +繁殖 2 +繞城 2 +繪圖 2 +續篇 2 +續約 2 +罕布 2 +罕默 2 +罪案 2 +罪行 2 +署名 2 +署長 2 +罷黜 2 +罹患 2 +羅丹 2 +羅伊 2 +羅克 2 +羅塞 2 +羅夫 2 +羅希 2 +羅拉 2 +羅漢 2 +羅素 2 +羅貝 2 +羅那 2 +羅陀 2 +羊曲 2 +羊毛 2 +美女 2 +義和 2 +習性 2 +翠絲 2 +翻新 2 +翻越 2 +老年 2 +老式 2 +老舍 2 +考場 2 +考證 2 +耕作 2 +耕種 2 +耳他 2 +耳道 2 +耶和 2 +耶律 2 +耶魯 2 +聊生 2 +聖三 2 +聖赫 2 +聘任 2 +聚合 2 +聚居 2 +聯名 2 +聰明 2 +聲名 2 +聲望 2 +聲道 2 +職員 2 +肉糕 2 +肉食 2 +肖像 2 +肖金 2 +肝臟 2 +股權 2 +肯尼 2 +育才 2 +育種 2 +肺炎 2 +胎兒 2 +胖子 2 +能源 2 +能級 2 +腓特 2 +腳趾 2 +腹面 2 +腺葉 2 +膝蓋 2 +膠質 2 +臘汁 2 +臣民 2 +臨床 2 +臨淄 2 +臨近 2 +臨邑 2 +自主 2 +自助 2 +自家 2 +自殺 2 +自衛 2 +自轉 2 +臭氧 2 +至於 2 +至關 2 +致死 2 +臺中 2 +臺北 2 +興化 2 +舉人 2 +舉動 2 +舊址 2 +舒服 2 +舒適 2 +舞台 2 +船尾 2 +船廠 2 +船艦 2 +船長 2 +艦艇 2 +艱苦 2 +色度 2 +色素 2 +艾塞 2 +艾爾 2 +花卉 2 +花崗 2 +花樣 2 +花費 2 +苣苔 2 +若干 2 +若是 2 +苦惱 2 +苯乙 2 +英俊 2 +英超 2 +英雄 2 +茅斯 2 +茨威 2 +荷花 2 +莆田 2 +莉拉 2 +莎拉 2 +莎莉 2 +莫名 2 +莫泊 2 +莫雷 2 +莫高 2 +菁英 2 +菩薩 2 +華夏 2 +華隆 2 +菲德 2 +萄牙 2 +萊爾 2 +萬9 2 +萬宜 2 +萬春 2 +萬萬 2 +落入 2 +落差 2 +葉子 2 +葉木 2 +葉海 2 +葉片 2 +著想 2 +著迷 2 +葛馮 2 +葵盛 2 +蒂斯 2 +蒂羅 2 +蒂芬 2 +蒐集 2 +蒙哥 2 +蒙山 2 +蒙蔽 2 +蒸餾 2 +蓄電 2 +蓮屬 2 +蔬菜 2 +蔭權 2 +薩哈 2 +薩拉 2 +薩維 2 +薩達 2 +薪資 2 +藉口 2 +藉著 2 +藍調 2 +藍鯨 2 +藏在 2 +藝員 2 +藤葉 2 +蘇丹 2 +蘇黎 2 +蘭德 2 +蘭特 2 +蘭西 2 +蘭豬 2 +虎豹 2 +虐待 2 +虔誠 2 +處境 2 +蛇夫 2 +蛇類 2 +螺旋 2 +蠟燭 2 +蠻族 2 +血清 2 +血緣 2 +行李 2 +行程 2 +行車 2 +行駛 2 +術士 2 +街區 2 +衛冕 2 +衛戍 2 +表皮 2 +袋中 2 +裁定 2 +補充 2 +補助 2 +裝病 2 +裡忒 2 +製冷 2 +製片 2 +西元 2 +西區 2 +西奧 2 +西沙 2 +西甲 2 +西站 2 +西西 2 +西迪 2 +西鄰 2 +西鐵 2 +西雅 2 +要素 2 +要職 2 +見天 2 +見義 2 +見證 2 +規範 2 +視覺 2 +親密 2 +親屬 2 +親情 2 +親戚 2 +親緣 2 +親近 2 +觀世 2 +觀塘 2 +觀賞 2 +角宿 2 +角逐 2 +解鎖 2 +解體 2 +言論 2 +訂婚 2 +訂購 2 +討伐 2 +記號 2 +許可 2 +許諾 2 +訴說 2 +註冊 2 +評議 2 +評選 2 +詞彙 2 +詩篇 2 +詮釋 2 +話語 2 +該廟 2 +該車 2 +該館 2 +誕辰 2 +誘發 2 +語堂 2 +誤導 2 +說唱 2 +課題 2 +調動 2 +調料 2 +調景 2 +論壇 2 +諸侯 2 +諸塞 2 +諸葛 2 +諾夫 2 +諾斯 2 +諾貝 2 +謙虛 2 +謝尼 2 +謠言 2 +證件 2 +證券 2 +證據 2 +譜寫 2 +警報 2 +警官 2 +警方 2 +警長 2 +譯名 2 +譯法 2 +護士 2 +護法 2 +變得 2 +變換 2 +變更 2 +變異 2 +象棋 2 +象牙 2 +豪華 2 +貓頭 2 +財務 2 +財困 2 +財團 2 +財物 2 +貨櫃 2 +販子 2 +貪污 2 +貴人 2 +買下 2 +買來 2 +買加 2 +賀氏 2 +資方 2 +資產 2 +賈斯 2 +賠償 2 +賢妃 2 +質子 2 +質疑 2 +質素 2 +賴恩 2 +購入 2 +購物 2 +賽馬 2 +赤川 2 +赫勒 2 +赫爾 2 +走出 2 +走路 2 +起飛 2 +趁機 2 +超越 2 +越低 2 +越獄 2 +越遠 2 +越高 2 +趕出 2 +趨同 2 +路上 2 +路口 2 +身長 2 +躲過 2 +車中 2 +車廂 2 +車資 2 +車隊 2 +軍區 2 +軍校 2 +軍法 2 +載重 2 +輔音 2 +輕傷 2 +輕型 2 +輕視 2 +輟學 2 +轄境 2 +轄有 2 +轉介 2 +轉車 2 +轎車 2 +轟動 2 +辛亥 2 +辛堡 2 +辛普 2 +辣妹 2 +辭退 2 +辯護 2 +農地 2 +農場 2 +農曆 2 +農田 2 +農藥 2 +近藤 2 +近衛 2 +迦納 2 +迪克 2 +迪爾 2 +迫害 2 +迴避 2 +迷信 2 +迷幻 2 +追溯 2 +退化 2 +送入 2 +逃出 2 +逃避 2 +透明 2 +逐鹿 2 +通報 2 +通婚 2 +通知 2 +通稱 2 +通航 2 +速寫 2 +速率 2 +造出 2 +造船 2 +連同 2 +連帶 2 +連鎖 2 +週年 2 +進修 2 +進化 2 +進駐 2 +遇到 2 +遊仙 2 +遊客 2 +遊玩 2 +運河 2 +運用 2 +運轉 2 +過世 2 +過勞 2 +過年 2 +過於 2 +過海 2 +過關 2 +道場 2 +道夫 2 +道理 2 +道生 2 +達尼 2 +違反 2 +遞歸 2 +遠東 2 +遭受 2 +遴選 2 +遵守 2 +遷往 2 +選中 2 +選拔 2 +選民 2 +選為 2 +遺囑 2 +遺產 2 +遺跡 2 +遼東 2 +還珠 2 +那些 2 +那樣 2 +邦初 2 +郊外 2 +部下 2 +部族 2 +郵件 2 +都柏 2 +都洛 2 +都統 2 +鄭國 2 +鄭氏 2 +鄰國 2 +配合 2 +配對 2 +酒吧 2 +酒泉 2 +酒醉 2 +醜聞 2 +醫藥 2 +釉下 2 +釋迦 2 +里奇 2 +里巴 2 +里昂 2 +里發 2 +里程 2 +里高 2 +重修 2 +重型 2 +重華 2 +重言 2 +重返 2 +重重 2 +重量 2 +野獸 2 +量表 2 +金星 2 +金漢 2 +金牌 2 +金蓮 2 +金酸 2 +金雞 2 +金馬 2 +鋼鐵 2 +錫金 2 +鍵盤 2 +鐘錶 2 +鐵伊 2 +鐵達 2 +鑄造 2 +長久 2 +長城 2 +長女 2 +長春 2 +長老 2 +長者 2 +長興 2 +長蘆 2 +長軸 2 +長音 2 +門前 2 +門口 2 +門子 2 +門戶 2 +門齒 2 +開創 2 +開口 2 +開心 2 +開拍 2 +開採 2 +開會 2 +開火 2 +開羅 2 +開賽 2 +開通 2 +開門 2 +開除 2 +閏年 2 +間接 2 +間隙 2 +閘門 2 +閱讀 2 +關注 2 +關聯 2 +關說 2 +關鍵 2 +關門 2 +關閉 2 +防範 2 +防衛 2 +阻擋 2 +阻礙 2 +阿保 2 +阿姆 2 +阿森 2 +阿特 2 +阿美 2 +附帶 2 +降級 2 +降解 2 +除籍 2 +陰霾 2 +陵墓 2 +陵寢 2 +陶瓷 2 +陷阱 2 +陽澄 2 +隆頭 2 +隊友 2 +隊長 2 +隋代 2 +隕石 2 +隨之 2 +雅圖 2 +集成 2 +集資 2 +集雨 2 +集體 2 +雍正 2 +雕像 2 +離任 2 +離婚 2 +離心 2 +雨林 2 +雪貂 2 +雲想 2 +零星 2 +雷拉 2 +雷馬 2 +電動 2 +電壓 2 +電流 2 +電纜 2 +電能 2 +電路 2 +電鐵 2 +震動 2 +震盪 2 +震驚 2 +霍爾 2 +霸主 2 +霸王 2 +靈活 2 +靈素 2 +青聯 2 +青藏 2 +青銅 2 +靜電 2 +面熊 2 +面臨 2 +面試 2 +面部 2 +韋斯 2 +音系 2 +音變 2 +韻律 2 +順序 2 +預備 2 +預定 2 +預言 2 +預計 2 +頒布 2 +頒發 2 +領地 2 +頭等 2 +頭部 2 +頭魚 2 +頭鷹 2 +願望 2 +顯得 2 +顯聖 2 +顯著 2 +風俗 2 +風景 2 +風氣 2 +風濕 2 +風雲 2 +風靡 2 +食夢 2 +食材 2 +飢荒 2 +飲品 2 +飲用 2 +餘額 2 +館藏 2 +饑荒 2 +饒舌 2 +首位 2 +首播 2 +首爾 2 +首腦 2 +首部 2 +香蕉 2 +馬其 2 +馬拉 2 +馬歇 2 +馬耳 2 +馬薩 2 +駐紮 2 +駐足 2 +駕駛 2 +骨頭 2 +骨髓 2 +體制 2 +體力 2 +體型 2 +體校 2 +體重 2 +體驗 2 +高傲 2 +高利 2 +高壓 2 +高平 2 +高校 2 +高止 2 +高爾 2 +高能 2 +高興 2 +高郵 2 +鬆散 2 +魅力 2 +魚雷 2 +魚頭 2 +魯斯 2 +魯明 2 +魯殊 2 +魯茲 2 +鮮明 2 +鯉形 2 +鯉科 2 +鰂魚 2 +鱸形 2 +鳥取 2 +鳥綱 2 +鳳凰 2 +鳳翔 2 +麗亞 2 +麗珠 2 +麗茲 2 +麗華 2 +麟趾 2 +麻河 2 +麻省 2 +黃帝 2 +黃色 2 +黎世 2 +黎加 2 +黑幫 2 +黑貓 2 +黑龍 2 +默多 2 +默德 2 +點擊 2 +點數 2 +點球 2 +黨派 2 +鼎盛 2 +鼠猴 2 +鼠疫 2 +齊克 2 +齒擦 2 +齒虎 2 +齒軌 2 +齒龍 2 +齧齒 2 +龐家 2 +'s 1 +-A 1 +-B 1 +-L 1 +-P 1 +-S 1 +-U 1 +-r 1 +.q 1 +.x 1 +9F 1 +9L 1 +9M 1 +9O 1 +9X 1 +9c 1 +9n 1 +9成 1 +AF 1 +AM 1 +AN 1 +AR 1 +Aa 1 +Ac 1 +Ag 1 +B- 1 +B9 1 +BH 1 +BK 1 +BS 1 +Bl 1 +Bu 1 +CB 1 +CD 1 +CN 1 +Ci 1 +Cs 1 +Cu 1 +Cá 1 +D- 1 +D9 1 +DD 1 +DF 1 +DM 1 +DO 1 +Dr 1 +Du 1 +EG 1 +EK 1 +EP 1 +ER 1 +ES 1 +EX 1 +Ed 1 +Em 1 +Es 1 +Ex 1 +F9 1 +FA 1 +FD 1 +FH 1 +FI 1 +FL 1 +FS 1 +FU 1 +Fe 1 +Fl 1 +Fu 1 +G9 1 +GF 1 +GT 1 +GY 1 +Gh 1 +Gu 1 +HC 1 +HE 1 +HK 1 +HO 1 +HP 1 +HS 1 +I- 1 +IA 1 +IB 1 +IF 1 +IN 1 +IP 1 +IT 1 +IU 1 +Il 1 +Ir 1 +It 1 +JP 1 +KI 1 +KK 1 +KR 1 +Kn 1 +Ko 1 +LA 1 +LC 1 +LD 1 +LR 1 +LS 1 +LY 1 +MD 1 +MF 1 +ML 1 +MM 1 +MS 1 +NC 1 +NG 1 +NH 1 +NI 1 +NM 1 +NZ 1 +O. 1 +O9 1 +OK 1 +ON 1 +OS 1 +OV 1 +Od 1 +On 1 +Op 1 +Os 1 +Ot 1 +P9 1 +PF 1 +PH 1 +PT 1 +PU 1 +Ps 1 +R9 1 +RC 1 +RE 1 +RY 1 +Ra 1 +Rh 1 +S- 1 +S9 1 +SE 1 +SH 1 +SI 1 +SS 1 +Si 1 +Sn 1 +Sr 1 +Sy 1 +Sō 1 +T9 1 +TA 1 +TI 1 +TN 1 +Ta 1 +Ts 1 +Ty 1 +UD 1 +UM 1 +UP 1 +Uh 1 +Ut 1 +VA 1 +VF 1 +VS 1 +Vo 1 +WH 1 +WT 1 +Wh 1 +XE 1 +YP 1 +Ye 1 +ZZ 1 +Ze 1 +`` 1 +a. 1 +aw 1 +ax 1 +bb 1 +bd 1 +bs 1 +cm 1 +cn 1 +cq 1 +dm 1 +dn 1 +dt 1 +du 1 +dv 1 +dy 1 +dé 1 +eH 1 +eS 1 +ej 1 +ex 1 +f( 1 +fe 1 +fi 1 +fk 1 +fl 1 +g( 1 +gb 1 +gd 1 +gf 1 +gj 1 +gm 1 +gn 1 +gr 1 +hC 1 +iB 1 +iT 1 +ih 1 +ik 1 +iw 1 +ja 1 +ji 1 +ju 1 +k. 1 +kl 1 +kn 1 +ko 1 +kt 1 +kö 1 +l9 1 +lR 1 +lc 1 +lf 1 +lg 1 +lw 1 +m. 1 +mg 1 +mn 1 +mr 1 +ms 1 +n= 1 +nf 1 +nh 1 +nl 1 +np 1 +nv 1 +nw 1 +ny 1 +oM 1 +oe 1 +oz 1 +pb 1 +pk 1 +pl 1 +pm 1 +pt 1 +q- 1 +q. 1 +qq 1 +r- 1 +rj 1 +rp 1 +rz 1 +sM 1 +sl 1 +sn 1 +t' 1 +tS 1 +tc 1 +tm 1 +tp 1 +u. 1 +uT 1 +uv 1 +ux 1 +uz 1 +vo 1 +vr 1 +w= 1 +wn 1 +wo 1 +x. 1 +xa 1 +xi 1 +xp 1 +yf 1 +yg 1 +yh 1 +yl 1 +ym 1 +yo 1 +za 1 +zo 1 +zp 1 +zu 1 +zz 1 +ál 1 +ém 1 +öy 1 +ōy 1 +​​ 1 +​物 1 +、再 1 +一, 1 +一一 1 +一九 1 +一併 1 +一億 1 +一分 1 +一到 1 +一勞 1 +一反 1 +一句 1 +一字 1 +一式 1 +一成 1 +一戰 1 +一指 1 +一改 1 +一概 1 +一模 1 +一民 1 +一氧 1 +一炮 1 +一無 1 +一爭 1 +一發 1 +一益 1 +一而 1 +一舉 1 +一落 1 +一見 1 +一談 1 +一路 1 +一身 1 +一邊 1 +一點 1 +丁字 1 +丁斯 1 +丁漢 1 +丁目 1 +丁蛋 1 +七七 1 +七里 1 +三、 1 +三一 1 +三亞 1 +三元 1 +三千 1 +三原 1 +三崎 1 +三星 1 +三浦 1 +三王 1 +三索 1 +三船 1 +三菱 1 +三萬 1 +三藩 1 +三軍 1 +三郎 1 +三門 1 +上傳 1 +上去 1 +上古 1 +上司 1 +上埔 1 +上報 1 +上塘 1 +上奏 1 +上學 1 +上尉 1 +上手 1 +上新 1 +上朝 1 +上林 1 +上沖 1 +上班 1 +上端 1 +上網 1 +上線 1 +上色 1 +上蓋 1 +上訪 1 +上調 1 +上路 1 +上身 1 +上車 1 +上選 1 +上部 1 +上限 1 +上集 1 +上雲 1 +上顎 1 +上高 1 +下剋 1 +下圖 1 +下徹 1 +下樓 1 +下河 1 +下潛 1 +下獄 1 +下稱 1 +下蝕 1 +下行 1 +下設 1 +下課 1 +下跌 1 +下遊 1 +下部 1 +下關 1 +下院 1 +下集 1 +下雷 1 +下面 1 +下顎 1 +下風 1 +不丹 1 +不乏 1 +不了 1 +不以 1 +不克 1 +不入 1 +不凡 1 +不利 1 +不到 1 +不力 1 +不動 1 +不去 1 +不吃 1 +不合 1 +不和 1 +不問 1 +不均 1 +不多 1 +不大 1 +不定 1 +不容 1 +不實 1 +不惜 1 +不愛 1 +不懷 1 +不扣 1 +不折 1 +不捨 1 +不收 1 +不敬 1 +不料 1 +不易 1 +不景 1 +不服 1 +不朽 1 +不歸 1 +不渝 1 +不準 1 +不理 1 +不畏 1 +不符 1 +不紊 1 +不純 1 +不絕 1 +不經 1 +不群 1 +不自 1 +不行 1 +不衰 1 +不要 1 +不見 1 +不解 1 +不計 1 +不該 1 +不詳 1 +不豐 1 +不賣 1 +不輸 1 +不辭 1 +不道 1 +不達 1 +不適 1 +不銹 1 +不限 1 +不露 1 +不顧 1 +且是 1 +世上 1 +世人 1 +世代 1 +世充 1 +世則 1 +世子 1 +世昌 1 +世田 1 +世矚 1 +世祿 1 +世綱 1 +世貿 1 +世道 1 +世銘 1 +丙組 1 +丞益 1 +丞相 1 +並無 1 +並稱 1 +並系 1 +並芘 1 +中仙 1 +中信 1 +中原 1 +中堅 1 +中場 1 +中外 1 +中底 1 +中彈 1 +中性 1 +中投 1 +中斷 1 +中旬 1 +中校 1 +中樞 1 +中檔 1 +中殿 1 +中毒 1 +中波 1 +中田 1 +中級 1 +中綴 1 +中線 1 +中耳 1 +中聯 1 +中興 1 +中落 1 +中葉 1 +中藥 1 +中觀 1 +中超 1 +中農 1 +中鐵 1 +串聯 1 +丸都 1 +丹噶 1 +丹姆 1 +丹路 1 +主修 1 +主創 1 +主導 1 +主帶 1 +主幹 1 +主意 1 +主控 1 +主治 1 +主炮 1 +主犯 1 +主筆 1 +主船 1 +主食 1 +乃伊 1 +乃威 1 +乃狄 1 +久經 1 +久藏 1 +之介 1 +之好 1 +之所 1 +之樂 1 +之泰 1 +之申 1 +之道 1 +之銓 1 +之鋒 1 +乘勢 1 +乘搭 1 +乘撘 1 +乘裝 1 +乙二 1 +乙未 1 +乙組 1 +乙苯 1 +九五 1 +九十 1 +九江 1 +九鐵 1 +乞多 1 +也夫 1 +乳房 1 +乾季 1 +乾德 1 +乾淨 1 +乾西 1 +亂倫 1 +亂刀 1 +事先 1 +事態 1 +事發 1 +事與 1 +事跡 1 +事蹟 1 +二中 1 +二二 1 +二八 1 +二宮 1 +二戶 1 +二氮 1 +二烷 1 +二道 1 +于敏 1 +互作 1 +互利 1 +互助 1 +互惠 1 +互通 1 +互選 1 +五一 1 +五中 1 +五八 1 +五分 1 +五常 1 +五弟 1 +五彩 1 +五成 1 +五指 1 +五氧 1 +五萬 1 +井住 1 +井字 1 +井村 1 +井田 1 +些微 1 +亞丁 1 +亞亞 1 +亞他 1 +亞吉 1 +亞哥 1 +亞基 1 +亞堡 1 +亞士 1 +亞奧 1 +亞尼 1 +亞布 1 +亞彬 1 +亞托 1 +亞文 1 +亞斯 1 +亞普 1 +亞東 1 +亞流 1 +亞烏 1 +亞瑟 1 +亞絲 1 +亞莫 1 +亞西 1 +亞豬 1 +亞路 1 +亞辛 1 +亞迪 1 +亞運 1 +亞邦 1 +亞麻 1 +亡故 1 +交付 1 +交代 1 +交出 1 +交口 1 +交回 1 +交州 1 +交替 1 +交棒 1 +交涉 1 +交界 1 +交行 1 +交角 1 +交談 1 +交道 1 +交錯 1 +亦即 1 +亨得 1 +京劇 1 +京王 1 +京釜 1 +亭湖 1 +亮相 1 +人世 1 +人仕 1 +人字 1 +人客 1 +人意 1 +人手 1 +人打 1 +人日 1 +人權 1 +人殉 1 +人氣 1 +人祭 1 +人種 1 +人稱 1 +人行 1 +人道 1 +人選 1 +人麻 1 +什倫 1 +什圖 1 +什沃 1 +什維 1 +什艾 1 +仁傑 1 +仁和 1 +仁壽 1 +仁守 1 +仁宗 1 +仁慕 1 +仁煥 1 +仁牙 1 +仁玕 1 +仁社 1 +仁穆 1 +仁粹 1 +仁青 1 +仇人 1 +今川 1 +介壽 1 +介質 1 +仍是 1 +仍有 1 +仍算 1 +仔林 1 +仔沙 1 +他倆 1 +他定 1 +他家 1 +他能 1 +他那 1 +仙人 1 +仙奴 1 +仙鶴 1 +代之 1 +代亞 1 +代價 1 +代名 1 +代幣 1 +代數 1 +代牧 1 +代相 1 +代碼 1 +令狐 1 +令華 1 +以千 1 +以爲 1 +仰光 1 +仰望 1 +仲雄 1 +任免 1 +任選 1 +伊什 1 +伊克 1 +伊喀 1 +伊塔 1 +伊娃 1 +伊尹 1 +伊德 1 +伊摩 1 +伊朗 1 +伊杜 1 +伊爾 1 +伊犁 1 +伊瑪 1 +伊甸 1 +伊薩 1 +伊里 1 +伊阿 1 +伊頓 1 +伍士 1 +伎倆 1 +伏塔 1 +伏契 1 +伏爾 1 +伏瓦 1 +伐克 1 +休假 1 +休克 1 +休士 1 +休憩 1 +休斯 1 +休閑 1 +休養 1 +伙食 1 +伯來 1 +伯克 1 +伯塔 1 +伯多 1 +伯拉 1 +伯明 1 +伯格 1 +伯溫 1 +伯爾 1 +伯茲 1 +伯莎 1 +伯虎 1 +伯謙 1 +伯達 1 +伯里 1 +伴侶 1 +伴奏 1 +伴有 1 +伴生 1 +伸一 1 +伸冤 1 +伸延 1 +伸港 1 +伽馬 1 +但斯 1 +佈局 1 +佈置 1 +佈道 1 +位在 1 +位居 1 +位階 1 +位面 1 +低下 1 +低估 1 +低價 1 +低層 1 +低平 1 +低座 1 +低檔 1 +低潮 1 +低等 1 +低調 1 +低額 1 +住友 1 +住所 1 +住進 1 +佐佐 1 +佐勞 1 +佐和 1 +佐拉 1 +佐木 1 +佐民 1 +佔用 1 +何利 1 +何力 1 +何方 1 +佛事 1 +佛典 1 +佛森 1 +佛經 1 +佛萊 1 +佛蒙 1 +佛雷 1 +佛頭 1 +佛龍 1 +作對 1 +作怪 1 +作曲 1 +作次 1 +作法 1 +作為 1 +作畫 1 +作自 1 +作雲 1 +作風 1 +你變 1 +佩佐 1 +佩儂 1 +佩克 1 +佩戴 1 +佩斯 1 +佩琪 1 +佩蘭 1 +佳作 1 +佳佳 1 +佳節 1 +併發 1 +使喚 1 +使團 1 +使節 1 +侄子 1 +來杜 1 +來看 1 +來納 1 +來臨 1 +來襲 1 +來館 1 +侈談 1 +侍奉 1 +侍女 1 +侍從 1 +侏羅 1 +供水 1 +供電 1 +供養 1 +依拉 1 +依次 1 +依照 1 +依瑪 1 +依附 1 +侮辱 1 +侵佔 1 +侵害 1 +便利 1 +便捷 1 +便是 1 +便服 1 +便當 1 +便秘 1 +俊業 1 +俘獲 1 +俚頭 1 +保住 1 +保全 1 +保大 1 +保定 1 +保密 1 +保明 1 +保溫 1 +保送 1 +保養 1 +信中 1 +信念 1 +信教 1 +信玄 1 +信神 1 +信竹 1 +信裡 1 +修好 1 +修學 1 +修憲 1 +修斯 1 +修煉 1 +修葺 1 +修鞋 1 +修養 1 +俯瞰 1 +俸祿 1 +俾路 1 +倉促 1 +倉庫 1 +個位 1 +個個 1 +個展 1 +倒下 1 +倒入 1 +倖免 1 +候旨 1 +候補 1 +倚天 1 +倚靠 1 +倩文 1 +倫之 1 +倫努 1 +倫巴 1 +倫布 1 +倫拜 1 +倫春 1 +倫納 1 +倫西 1 +倫貝 1 +倬標 1 +倭國 1 +倭寇 1 +假使 1 +假借 1 +假名 1 +假帳 1 +假設 1 +假說 1 +假象 1 +假釋 1 +假面 1 +偉強 1 +偏低 1 +偏僻 1 +偏向 1 +偏小 1 +偏東 1 +偏重 1 +偏離 1 +做到 1 +停刊 1 +停業 1 +停機 1 +停泊 1 +停職 1 +停辦 1 +停靠 1 +停飛 1 +健壯 1 +健將 1 +健身 1 +側目 1 +側邊 1 +偵察 1 +偵測 1 +偵緝 1 +偶像 1 +偶發 1 +偷取 1 +偷羊 1 +偷襲 1 +偷走 1 +偽季 1 +偽裝 1 +傀儡 1 +傅萊 1 +傍晚 1 +傑克 1 +傑志 1 +傑斐 1 +備忘 1 +備戰 1 +備案 1 +備用 1 +備註 1 +傢具 1 +催芽 1 +傭人 1 +傲不 1 +傳來 1 +傳給 1 +傳記 1 +傳遍 1 +債券 1 +傷及 1 +傷心 1 +傷患 1 +傷悲 1 +傷病 1 +傷透 1 +傾中 1 +傾心 1 +傾談 1 +僅屬 1 +僅用 1 +像差 1 +僕人 1 +僧人 1 +僧孺 1 +僧尼 1 +僧格 1 +僧祐 1 +僱主 1 +僱傭 1 +僵局 1 +價位 1 +價錢 1 +儀器 1 +儒士 1 +儘快 1 +儘量 1 +償付 1 +優值 1 +優良 1 +優裕 1 +優質 1 +儲量 1 +允良 1 +元子 1 +元朝 1 +元氣 1 +元澄 1 +元老 1 +元起 1 +兄長 1 +充任 1 +充分 1 +充氣 1 +充滿 1 +充軍 1 +兆基 1 +兆楠 1 +兆陽 1 +兇多 1 +兇悍 1 +兇猛 1 +先前 1 +先帝 1 +先師 1 +先賢 1 +先鋒 1 +先驗 1 +光啟 1 +光大 1 +光學 1 +光宇 1 +光州 1 +光度 1 +光復 1 +光景 1 +光束 1 +光泰 1 +光滑 1 +光照 1 +光環 1 +光范 1 +光華 1 +光顧 1 +克伍 1 +克佛 1 +克利 1 +克力 1 +克勤 1 +克南 1 +克孜 1 +克安 1 +克家 1 +克巴 1 +克希 1 +克敏 1 +克欽 1 +克沙 1 +克漢 1 +克瑟 1 +克禮 1 +克穆 1 +克萊 1 +克蘇 1 +克裡 1 +克貝 1 +克農 1 +克連 1 +克默 1 +兌換 1 +免生 1 +免疫 1 +免遭 1 +兒島 1 +兒道 1 +兔毛 1 +兢兢 1 +兢業 1 +入世 1 +入地 1 +入塞 1 +入境 1 +入手 1 +入聲 1 +入股 1 +入閘 1 +入院 1 +入駐 1 +內亞 1 +內化 1 +內卡 1 +內在 1 +內埔 1 +內壁 1 +內拉 1 +內政 1 +內特 1 +內瑞 1 +內置 1 +內羅 1 +內胎 1 +內臟 1 +內蒂 1 +內載 1 +內遷 1 +內阿 1 +內韋 1 +全劇 1 +全十 1 +全名 1 +全境 1 +全壘 1 +全套 1 +全島 1 +全州 1 +全得 1 +全德 1 +全效 1 +全敗 1 +全數 1 +全書 1 +全盛 1 +全盤 1 +全省 1 +全福 1 +全程 1 +全稱 1 +全線 1 +全興 1 +全邨 1 +全鎮 1 +全隊 1 +全額 1 +全黑 1 +兩億 1 +八世 1 +八億 1 +八十 1 +八卦 1 +八大 1 +八思 1 +八成 1 +八杉 1 +八面 1 +公仔 1 +公佈 1 +公克 1 +公告 1 +公堂 1 +公墓 1 +公屋 1 +公斤 1 +公款 1 +公正 1 +公狼 1 +公約 1 +公衛 1 +公袥 1 +公視 1 +公超 1 +公關 1 +公頃 1 +公館 1 +六合 1 +六四 1 +六安 1 +六甲 1 +共享 1 +共尾 1 +共生 1 +共苦 1 +共處 1 +共識 1 +共鳴 1 +兵房 1 +兵鋒 1 +其妻 1 +其子 1 +其數 1 +其次 1 +其母 1 +其道 1 +典籍 1 +兼修 1 +兼優 1 +兼具 1 +兼容 1 +兼屬 1 +兼并 1 +冀望 1 +再、 1 +再三 1 +再保 1 +再用 1 +再而 1 +再臨 1 +再補 1 +再見 1 +冒險 1 +冠上 1 +冠峰 1 +冠狀 1 +冠玉 1 +冤案 1 +冥冥 1 +冥想 1 +冬初 1 +冬眠 1 +冬青 1 +冰冰 1 +冰塔 1 +冰晶 1 +冰柱 1 +冰河 1 +冰湖 1 +冰瀑 1 +冰球 1 +冰風 1 +冷凍 1 +冷暖 1 +冷次 1 +冷氣 1 +冷眼 1 +冷遇 1 +冷靜 1 +凄美 1 +准考 1 +凈白 1 +凌日 1 +凌晨 1 +凌辱 1 +凌駕 1 +凍傷 1 +凝結 1 +凡娜 1 +凱勒 1 +凱文 1 +凱爾 1 +凱維 1 +凱美 1 +凱茜 1 +凱蒂 1 +凱馬 1 +凸起 1 +凹版 1 +出世 1 +出人 1 +出到 1 +出動 1 +出去 1 +出名 1 +出品 1 +出國 1 +出城 1 +出奇 1 +出嫁 1 +出局 1 +出師 1 +出廠 1 +出征 1 +出戶 1 +出所 1 +出手 1 +出擊 1 +出校 1 +出榜 1 +出牆 1 +出血 1 +出訪 1 +出路 1 +出逃 1 +出門 1 +出頭 1 +刀鞘 1 +分工 1 +分店 1 +分批 1 +分攤 1 +分數 1 +分文 1 +分明 1 +分枝 1 +分校 1 +分泌 1 +分流 1 +分為 1 +分發 1 +分科 1 +分立 1 +分站 1 +分管 1 +分組 1 +分缺 1 +分貝 1 +分辨 1 +分部 1 +分鏡 1 +分隔 1 +分離 1 +分題 1 +分點 1 +切下 1 +切分 1 +切割 1 +切合 1 +切哇 1 +切實 1 +切成 1 +切望 1 +切片 1 +切華 1 +刑事 1 +刑部 1 +划算 1 +划艇 1 +列塔 1 +列姆 1 +列梅 1 +列維 1 +初中 1 +初始 1 +初時 1 +初次 1 +初步 1 +初見 1 +判令 1 +判定 1 +判寺 1 +判詞 1 +別人 1 +別克 1 +別名 1 +別院 1 +利他 1 +利刃 1 +利南 1 +利卡 1 +利奇 1 +利好 1 +利妮 1 +利帕 1 +利文 1 +利欽 1 +利歐 1 +利沙 1 +利潘 1 +利烏 1 +利牛 1 +利班 1 +利維 1 +利茅 1 +利茲 1 +利華 1 +利菲 1 +利雙 1 +刪剪 1 +刮目 1 +到任 1 +到期 1 +到發 1 +制動 1 +制式 1 +制瓷 1 +制約 1 +制酸 1 +刷到 1 +券頂 1 +刺殺 1 +刺特 1 +刻劃 1 +刻寫 1 +刻板 1 +刻滿 1 +刻畫 1 +則士 1 +則里 1 +削減 1 +剋上 1 +剌旭 1 +前傾 1 +前去 1 +前因 1 +前奏 1 +前委 1 +前嫌 1 +前季 1 +前提 1 +前景 1 +前稱 1 +前端 1 +前綴 1 +前者 1 +前肢 1 +前齒 1 +剛剛 1 +剛性 1 +剛直 1 +剛鐸 1 +剩餘 1 +副長 1 +割據 1 +割破 1 +割讓 1 +割開 1 +創保 1 +創傷 1 +創刊 1 +創煥 1 +創生 1 +剷除 1 +剿滅 1 +劃出 1 +劃歸 1 +劃界 1 +劇中 1 +劇作 1 +劇場 1 +劇組 1 +劉楊 1 +劍俠 1 +劍法 1 +劍麻 1 +劑量 1 +力佛 1 +力圖 1 +力崗 1 +力特 1 +力霸 1 +力馬 1 +功勞 1 +功德 1 +功樂 1 +功績 1 +加下 1 +加侖 1 +加保 1 +加值 1 +加冕 1 +加劇 1 +加勁 1 +加多 1 +加尼 1 +加恩 1 +加拉 1 +加粗 1 +加薪 1 +加藤 1 +加賀 1 +加迪 1 +加速 1 +加達 1 +加電 1 +加霜 1 +助手 1 +助燃 1 +助聽 1 +助長 1 +努兒 1 +努斯 1 +劫匪 1 +劫持 1 +効忠 1 +勁光 1 +勁報 1 +勁敵 1 +勁歌 1 +勃勃 1 +勃起 1 +勇俊 1 +勇士 1 +勇武 1 +勒溫 1 +動人 1 +動向 1 +動土 1 +動用 1 +動能 1 +動蕩 1 +動詞 1 +動量 1 +勘探 1 +務工 1 +勝任 1 +勝昭 1 +勝素 1 +勝者 1 +勝訴 1 +勝賴 1 +勞埃 1 +勞庇 1 +勞斯 1 +勞永 1 +勞爾 1 +勞累 1 +勞賓 1 +募款 1 +募集 1 +勢傾 1 +勢能 1 +勤先 1 +勤快 1 +勳位 1 +勳爵 1 +勵珍 1 +勾形 1 +勾畫 1 +勾結 1 +包袱 1 +包裹 1 +包覆 1 +包頭 1 +化二 1 +化名 1 +化妝 1 +化成 1 +化整 1 +化氦 1 +化用 1 +化碳 1 +化肥 1 +化鉛 1 +化鐵 1 +北伐 1 +北側 1 +北冰 1 +北卡 1 +北景 1 +北歐 1 +北段 1 +北甘 1 +北美 1 +北車 1 +北返 1 +北達 1 +北邊 1 +匯入 1 +匯合 1 +匯報 1 +匯聯 1 +匯集 1 +匹亞 1 +匹斯 1 +匹茲 1 +匾額 1 +區塊 1 +區段 1 +區間 1 +十九 1 +十億 1 +十全 1 +十數 1 +十美 1 +千丈 1 +千五 1 +千兆 1 +千克 1 +千四 1 +千島 1 +千方 1 +千春 1 +千瓦 1 +千計 1 +千里 1 +千陽 1 +千餘 1 +千鶴 1 +升值 1 +升到 1 +升天 1 +升越 1 +升降 1 +升高 1 +午膳 1 +半導 1 +半牧 1 +半農 1 +卑爾 1 +卑詩 1 +卓著 1 +協合 1 +協理 1 +南亞 1 +南人 1 +南伽 1 +南加 1 +南卡 1 +南哲 1 +南多 1 +南大 1 +南寧 1 +南市 1 +南征 1 +南端 1 +南線 1 +南美 1 +南臨 1 +南航 1 +南船 1 +南路 1 +南通 1 +南遷 1 +南鄰 1 +南門 1 +南開 1 +南院 1 +南雄 1 +南麓 1 +博倫 1 +博凱 1 +博多 1 +博夫 1 +博學 1 +博尼 1 +博斯 1 +博格 1 +博洛 1 +博滕 1 +博義 1 +博覽 1 +卜拉 1 +卜楞 1 +占星 1 +卡亞 1 +卡內 1 +卡利 1 +卡力 1 +卡加 1 +卡姆 1 +卡巴 1 +卡希 1 +卡帕 1 +卡波 1 +卡納 1 +卡臣 1 +卡車 1 +卡默 1 +卧底 1 +卧病 1 +卧薪 1 +印信 1 +印刷 1 +印地 1 +印表 1 +危在 1 +危害 1 +危殆 1 +即場 1 +即有 1 +卵內 1 +原先 1 +原型 1 +原姓 1 +原屬 1 +原平 1 +原意 1 +原指 1 +原文 1 +原核 1 +原畫 1 +原籍 1 +原罪 1 +原諒 1 +厭世 1 +厭惡 1 +去搶 1 +去留 1 +去看 1 +參戰 1 +參政 1 +參演 1 +參看 1 +參禮 1 +參贊 1 +參閱 1 +又廷 1 +又或 1 +及利 1 +及後 1 +及時 1 +及爾 1 +友情 1 +友邦 1 +反共 1 +反其 1 +反動 1 +反右 1 +反向 1 +反恐 1 +反省 1 +反綁 1 +反證 1 +反響 1 +反黨 1 +叔父 1 +取下 1 +取出 1 +取名 1 +取回 1 +取悅 1 +取液 1 +取物 1 +取用 1 +取而 1 +受命 1 +受孕 1 +受害 1 +受挫 1 +受洗 1 +受精 1 +受罰 1 +受襲 1 +受賄 1 +受阻 1 +受雇 1 +叛徒 1 +叛變 1 +叛軍 1 +叢刊 1 +叢書 1 +口供 1 +口信 1 +口否 1 +口吻 1 +口感 1 +口服 1 +口秀 1 +口音 1 +口魚 1 +古丁 1 +古喙 1 +古堡 1 +古寺 1 +古廟 1 +古惑 1 +古武 1 +古爾 1 +古特 1 +古迹 1 +古都 1 +古魯 1 +句子 1 +句點 1 +另加 1 +另娶 1 +另立 1 +另築 1 +另類 1 +只好 1 +只是 1 +只會 1 +只知 1 +只能 1 +叫作 1 +叫拜 1 +叫聲 1 +召集 1 +可及 1 +可可 1 +可塑 1 +可夫 1 +可巴 1 +可愛 1 +可憐 1 +可樂 1 +可欣 1 +可歸 1 +可熱 1 +可西 1 +可靠 1 +可風 1 +台南 1 +台標 1 +台視 1 +台詞 1 +台長 1 +史前 1 +史坦 1 +史塔 1 +史官 1 +史帝 1 +史特 1 +史稱 1 +史記 1 +史跡 1 +史館 1 +右任 1 +右手 1 +右方 1 +右神 1 +右臂 1 +司可 1 +司鐸 1 +吁宋 1 +吃上 1 +吃到 1 +吃掉 1 +吃法 1 +吃起 1 +各布 1 +各方 1 +各業 1 +各球 1 +各異 1 +各科 1 +各職 1 +各處 1 +各行 1 +各隊 1 +各項 1 +合共 1 +合力 1 +合台 1 +合和 1 +合唱 1 +合夥 1 +合奏 1 +合流 1 +合約 1 +合計 1 +合資 1 +合辦 1 +合適 1 +合陽 1 +合體 1 +吉克 1 +吉利 1 +吉士 1 +吉姆 1 +吉少 1 +吉拉 1 +吉祥 1 +吉米 1 +吉西 1 +吉阿 1 +吉隆 1 +同仁 1 +同伴 1 +同僚 1 +同台 1 +同型 1 +同志 1 +同日 1 +同校 1 +同步 1 +同母 1 +同父 1 +同甘 1 +同行 1 +同郷 1 +同食 1 +同飲 1 +名作 1 +名分 1 +名城 1 +名帥 1 +名師 1 +名方 1 +名村 1 +名氣 1 +名流 1 +名聲 1 +名臣 1 +名茶 1 +名號 1 +名門 1 +名額 1 +后妃 1 +吐嘈 1 +向前 1 +向滋 1 +君如 1 +君權 1 +君長 1 +君龍 1 +吞下 1 +吞聲 1 +吟唱 1 +否決 1 +吩咐 1 +含糖 1 +含量 1 +吳王 1 +吵醒 1 +吸塵 1 +吸毒 1 +吸菸 1 +吸附 1 +吸食 1 +吹來 1 +吹氣 1 +吹滅 1 +吻部 1 +呂宋 1 +呂智 1 +呈交 1 +告戒 1 +告白 1 +周代 1 +周刊 1 +周敏 1 +周日 1 +周朝 1 +周期 1 +周迅 1 +周遭 1 +味道 1 +呼倫 1 +呼和 1 +命題 1 +和夫 1 +和好 1 +和子 1 +和宜 1 +和康 1 +和必 1 +和暖 1 +和會 1 +和林 1 +和樹 1 +和浩 1 +和睦 1 +和美 1 +和衷 1 +和親 1 +和記 1 +和諧 1 +和議 1 +咧嘴 1 +咬弦 1 +咸平 1 +咸康 1 +咸淳 1 +咸美 1 +咸鏡 1 +咸陽 1 +哀悼 1 +品嘗 1 +品學 1 +品德 1 +品源 1 +哈丹 1 +哈依 1 +哈剌 1 +哈吉 1 +哈布 1 +哈希 1 +哈德 1 +哈恩 1 +哈拉 1 +哈斯 1 +哈珊 1 +哈索 1 +哈羅 1 +哈莫 1 +哈萊 1 +哈薩 1 +哈達 1 +哈頓 1 +哈默 1 +員佐 1 +員外 1 +員遼 1 +哥什 1 +哥利 1 +哥德 1 +哥拉 1 +哥爾 1 +哥華 1 +哥馬 1 +哨所 1 +哲也 1 +哲元 1 +哲孟 1 +哲生 1 +哲蚌 1 +唇槍 1 +唐代 1 +售予 1 +售出 1 +售票 1 +唯獨 1 +唱戲 1 +唱法 1 +唸珠 1 +唾液 1 +啄木 1 +商事 1 +商務 1 +商圈 1 +商城 1 +商埠 1 +商場 1 +商幫 1 +商朝 1 +商湯 1 +商用 1 +商羯 1 +商船 1 +商量 1 +問吧 1 +問話 1 +啟傑 1 +啟明 1 +啟發 1 +啟示 1 +啟程 1 +啟聯 1 +啟鑰 1 +啤酒 1 +喀什 1 +喀拉 1 +喀比 1 +喀里 1 +善事 1 +善作 1 +善如 1 +善待 1 +善後 1 +善惡 1 +善撲 1 +善良 1 +喇薩 1 +喊出 1 +喘息 1 +喙啄 1 +喙端 1 +喙龍 1 +喚回 1 +喚起 1 +喜好 1 +喝醉 1 +喝采 1 +喪失 1 +喬姆 1 +喬木 1 +喬科 1 +單獨 1 +單調 1 +單質 1 +單項 1 +嗅到 1 +嗜酸 1 +嗜鹼 1 +嗣位 1 +嗣業 1 +嘉慕 1 +嘉樂 1 +嘉許 1 +嘉道 1 +嘉陵 1 +嘉靖 1 +嘔吐 1 +嘗膽 1 +嘩然 1 +嘯林 1 +噁心 1 +噁爆 1 +器具 1 +器械 1 +器蓋 1 +器身 1 +噴射 1 +噶爾 1 +噸位 1 +嚇人 1 +嚮導 1 +嚴令 1 +嚴加 1 +嚴島 1 +嚴懲 1 +嚴斥 1 +嚴氏 1 +嚴肅 1 +嚴謹 1 +囊胚 1 +囑咐 1 +囚犯 1 +四周 1 +四平 1 +四方 1 +四牌 1 +四百 1 +四萬 1 +四郎 1 +回信 1 +回合 1 +回填 1 +回家 1 +回寺 1 +回彈 1 +回復 1 +回教 1 +回程 1 +回答 1 +因弗 1 +因後 1 +因茨 1 +因達 1 +困住 1 +困擾 1 +固態 1 +固有 1 +國中 1 +國主 1 +國光 1 +國公 1 +國共 1 +國史 1 +國名 1 +國君 1 +國土 1 +國奧 1 +國妃 1 +國安 1 +國府 1 +國庫 1 +國情 1 +國慶 1 +國成 1 +國松 1 +國父 1 +國牧 1 +國產 1 +國界 1 +國短 1 +國立 1 +國策 1 +國諱 1 +國雄 1 +圍坐 1 +圍棋 1 +圍牆 1 +圍魏 1 +園丁 1 +園主 1 +園內 1 +園明 1 +園林 1 +園蔥 1 +圓圓 1 +圓弧 1 +圓柱 1 +圓滑 1 +圓環 1 +圖取 1 +圖布 1 +圖形 1 +圖片 1 +圖示 1 +圖稿 1 +團圓 1 +團隊 1 +土匪 1 +土司 1 +土石 1 +土虱 1 +在上 1 +在崗 1 +在旦 1 +在校 1 +在身 1 +地亞 1 +地名 1 +地域 1 +地基 1 +地夫 1 +地安 1 +地平 1 +地庫 1 +地政 1 +地板 1 +地標 1 +地盤 1 +地級 1 +地表 1 +地貌 1 +地質 1 +地道 1 +地遠 1 +地震 1 +坂本 1 +均勻 1 +均衡 1 +坎特 1 +坎貝 1 +坎農 1 +坐在 1 +坐監 1 +坐骨 1 +坡子 1 +坤玲 1 +坦克 1 +坦利 1 +坦干 1 +坦然 1 +坦白 1 +坦福 1 +坦貝 1 +坦頓 1 +型式 1 +垮台 1 +埃內 1 +埃弗 1 +埃德 1 +埃拉 1 +埃爾 1 +埃索 1 +埃胡 1 +埃雷 1 +埋名 1 +埋怨 1 +埋葬 1 +埋藏 1 +城主 1 +城光 1 +城內 1 +城南 1 +城嘉 1 +城址 1 +城巴 1 +城池 1 +城牆 1 +城西 1 +城隍 1 +埔寨 1 +埜堂 1 +執委 1 +執業 1 +執飛 1 +培元 1 +培烏 1 +培育 1 +培茲 1 +基層 1 +基希 1 +基平 1 +基徹 1 +基數 1 +基斯 1 +基石 1 +基苯 1 +基酸 1 +基里 1 +基頻 1 +基龍 1 +堂堂 1 +堂正 1 +堅城 1 +堅定 1 +堅尼 1 +堅拒 1 +堅蜥 1 +堆填 1 +堆積 1 +堪憐 1 +堪稱 1 +堪薩 1 +報仇 1 +報刊 1 +報名 1 +報復 1 +報讀 1 +場內 1 +場均 1 +場景 1 +塑像 1 +塑料 1 +塑有 1 +塑膠 1 +塔克 1 +塔利 1 +塔卜 1 +塔台 1 +塔吉 1 +塔塔 1 +塔德 1 +塔拉 1 +塔林 1 +塔樓 1 +塔法 1 +塔納 1 +塔茨 1 +塔莉 1 +塔蒂 1 +塔西 1 +塔龍 1 +塗魚 1 +塗黑 1 +塞冬 1 +塞古 1 +塞德 1 +塞法 1 +塞諸 1 +塞音 1 +塞馬 1 +墓葬 1 +墓頂 1 +墜入 1 +墜落 1 +增殖 1 +增生 1 +增祥 1 +增進 1 +增額 1 +墟內 1 +墨客 1 +墨色 1 +墾田 1 +壓縮 1 +壞球 1 +壩上 1 +壩下 1 +士域 1 +士尼 1 +士打 1 +士滿 1 +士珍 1 +士禛 1 +士評 1 +士達 1 +壯漢 1 +壯烈 1 +壺中 1 +壽命 1 +壽宴 1 +壽星 1 +夏威 1 +夏愨 1 +夏秋 1 +夏至 1 +夏荷 1 +夏默 1 +外借 1 +外公 1 +外力 1 +外加 1 +外務 1 +外匯 1 +外地 1 +外壁 1 +外套 1 +外婆 1 +外層 1 +外形 1 +外殼 1 +外省 1 +外管 1 +外表 1 +外褂 1 +外訪 1 +外語 1 +外銷 1 +多元 1 +多克 1 +多加 1 +多吉 1 +多夫 1 +多尼 1 +多弗 1 +多毗 1 +多汁 1 +多爾 1 +多特 1 +多祿 1 +多納 1 +多莉 1 +多萬 1 +多謝 1 +多雨 1 +夜夜 1 +夜戰 1 +夠大 1 +夢中 1 +夢境 1 +夢幻 1 +夢想 1 +夢雲 1 +夢鴿 1 +夥兒 1 +大不 1 +大丹 1 +大乘 1 +大二 1 +大儒 1 +大區 1 +大友 1 +大受 1 +大吉 1 +大名 1 +大君 1 +大和 1 +大喊 1 +大國 1 +大圍 1 +大城 1 +大堆 1 +大堤 1 +大增 1 +大士 1 +大失 1 +大島 1 +大嶼 1 +大幅 1 +大怒 1 +大悟 1 +大敵 1 +大新 1 +大校 1 +大概 1 +大正 1 +大殿 1 +大汗 1 +大河 1 +大洋 1 +大湖 1 +大溪 1 +大漠 1 +大獲 1 +大理 1 +大發 1 +大白 1 +大窘 1 +大紅 1 +大經 1 +大綱 1 +大腦 1 +大腸 1 +大膽 1 +大興 1 +大舉 1 +大艇 1 +大華 1 +大蒜 1 +大薇 1 +大跌 1 +大路 1 +大辦 1 +大通 1 +大進 1 +大郎 1 +大部 1 +大都 1 +大醉 1 +大釗 1 +大銘 1 +大門 1 +大雄 1 +大韓 1 +大馬 1 +大驚 1 +大體 1 +大鬧 1 +大黨 1 +大鼠 1 +天份 1 +天佐 1 +天使 1 +天倫 1 +天元 1 +天安 1 +天寶 1 +天差 1 +天性 1 +天悅 1 +天慶 1 +天才 1 +天母 1 +天河 1 +天涯 1 +天球 1 +天祐 1 +天窗 1 +天紀 1 +天翔 1 +天翼 1 +天賜 1 +天賦 1 +天馬 1 +太傅 1 +太元 1 +太冷 1 +太初 1 +太后 1 +太宗 1 +太宰 1 +太尉 1 +太常 1 +太湖 1 +太炎 1 +太監 1 +太行 1 +太近 1 +太遠 1 +夫仇 1 +夫喬 1 +夫堡 1 +夫妻 1 +夫尼 1 +夫森 1 +夫納 1 +夫茨 1 +夫魯 1 +央行 1 +失利 1 +失地 1 +失所 1 +失效 1 +失職 1 +失能 1 +失落 1 +失誤 1 +失蹤 1 +夷昧 1 +夸特 1 +夾狀 1 +奇俠 1 +奇幻 1 +奇怪 1 +奇斯 1 +奇曼 1 +奇缺 1 +奇耶 1 +奇里 1 +奇非 1 +奇頓 1 +奈克 1 +奈德 1 +奈爾 1 +奈葉 1 +奉命 1 +奉安 1 +奉律 1 +奉新 1 +奉系 1 +奎德 1 +奎茲 1 +奏鳴 1 +契克 1 +契特 1 +奕詝 1 +套出 1 +套用 1 +奢華 1 +奧伊 1 +奧內 1 +奧利 1 +奧古 1 +奧姆 1 +奧得 1 +奧托 1 +奧格 1 +奧洛 1 +奧瓦 1 +奧的 1 +奧米 1 +奧羅 1 +奧羽 1 +奧蒂 1 +奪去 1 +奬懲 1 +女人 1 +女傭 1 +女僕 1 +女優 1 +女友 1 +女嬰 1 +女木 1 +女水 1 +女版 1 +女生 1 +女眷 1 +女短 1 +奴役 1 +奶爸 1 +她倆 1 +好上 1 +好奇 1 +好意 1 +好手 1 +好氧 1 +好色 1 +如指 1 +如數 1 +如流 1 +如生 1 +妄圖 1 +妊娠 1 +妖怪 1 +妥也 1 +妮科 1 +妮綺 1 +妹夫 1 +妻妹 1 +妻姐 1 +妻室 1 +姆古 1 +姆士 1 +姆希 1 +姆庫 1 +姆德 1 +姆瓦 1 +姆萊 1 +姆齊 1 +姊姊 1 +始發 1 +始祖 1 +始稱 1 +始興 1 +姑娘 1 +姑母 1 +姓埋 1 +委內 1 +委身 1 +姚里 1 +姥姥 1 +姦情 1 +姪女 1 +姬瑪 1 +姿色 1 +威光 1 +威嚇 1 +威塞 1 +威夷 1 +威權 1 +威治 1 +威特 1 +威瑟 1 +威舍 1 +威靈 1 +娘家 1 +娜塔 1 +娜茲 1 +婆羅 1 +婚事 1 +婚宴 1 +婚禮 1 +婢女 1 +婷婷 1 +媒介 1 +媚娘 1 +嫁與 1 +嫘縈 1 +嫣然 1 +嬰孩 1 +子孫 1 +子文 1 +子球 1 +子程 1 +孕育 1 +孕酮 1 +字一 1 +字喃 1 +字幕 1 +字模 1 +字號 1 +存世 1 +存取 1 +存放 1 +孛許 1 +孜別 1 +孝感 1 +孝次 1 +孟加 1 +孟德 1 +孟雄 1 +季後 1 +季惟 1 +季米 1 +季采 1 +季風 1 +季龍 1 +孤島 1 +孤芳 1 +孤身 1 +孩提 1 +學兼 1 +學到 1 +學前 1 +學家 1 +學業 1 +學民 1 +學津 1 +學社 1 +學聯 1 +學苑 1 +宇航 1 +守備 1 +守孝 1 +守文 1 +守法 1 +守臣 1 +守謙 1 +守齋 1 +安二 1 +安妮 1 +安娜 1 +安安 1 +安岳 1 +安徒 1 +安托 1 +安撫 1 +安放 1 +安普 1 +安會 1 +安樂 1 +安正 1 +安民 1 +安汶 1 +安然 1 +安營 1 +安理 1 +安聯 1 +安葬 1 +安蘭 1 +安諾 1 +安達 1 +宋國 1 +完好 1 +完畢 1 +宏偉 1 +宏坤 1 +宏德 1 +宏聲 1 +宏道 1 +宏量 1 +宗偉 1 +宗哈 1 +宗憲 1 +宗谷 1 +宗龍 1 +官兵 1 +官司 1 +官府 1 +官服 1 +官腔 1 +官話 1 +官邸 1 +官野 1 +官長 1 +宙域 1 +定位 1 +定價 1 +定向 1 +定康 1 +定影 1 +定性 1 +定案 1 +定理 1 +定量 1 +宛城 1 +宜合 1 +宜興 1 +宜諾 1 +客場 1 +客家 1 +客觀 1 +客貨 1 +客輪 1 +客量 1 +宣判 1 +宣化 1 +宣帝 1 +宣誓 1 +室外 1 +室溫 1 +宦官 1 +宮人 1 +宮崎 1 +宰李 1 +宴席 1 +宴會 1 +家光 1 +家勁 1 +家務 1 +家口 1 +家可 1 +家外 1 +家奴 1 +家干 1 +家用 1 +家立 1 +家道 1 +家驤 1 +容器 1 +容忍 1 +容許 1 +容量 1 +宿敵 1 +宿根 1 +寄存 1 +寄送 1 +寅成 1 +密山 1 +密文 1 +密歇 1 +密西 1 +密集 1 +富卡 1 +富商 1 +富恩 1 +富翁 1 +富蘭 1 +富裕 1 +富豪 1 +富貴 1 +富邦 1 +察合 1 +察哈 1 +察沃 1 +寡尿 1 +實則 1 +實屬 1 +實情 1 +實戰 1 +實收 1 +實權 1 +實況 1 +實踐 1 +寧波 1 +審批 1 +審理 1 +審計 1 +審評 1 +審議 1 +寫下 1 +寫信 1 +寫入 1 +寫出 1 +寫字 1 +寫成 1 +寫進 1 +寬容 1 +寬度 1 +寬敞 1 +寬條 1 +寬順 1 +寮國 1 +寵物 1 +寵臣 1 +寶光 1 +寶劍 1 +寶如 1 +寶應 1 +寶樓 1 +寶殿 1 +寶玉 1 +寶田 1 +寶血 1 +寶雞 1 +寶雲 1 +寶麗 1 +寺事 1 +寺前 1 +封土 1 +封為 1 +封爵 1 +封穴 1 +封號 1 +封裝 1 +封路 1 +射失 1 +射程 1 +射箭 1 +射線 1 +射鵰 1 +將來 1 +將領 1 +專任 1 +專制 1 +專吃 1 +專指 1 +專政 1 +專機 1 +專橫 1 +專欄 1 +專權 1 +專款 1 +專注 1 +專線 1 +專註 1 +專賣 1 +專長 1 +專項 1 +尊崇 1 +尊敬 1 +尊稱 1 +尋三 1 +尋回 1 +尋親 1 +對上 1 +對付 1 +對撞 1 +對準 1 +對照 1 +對生 1 +對白 1 +對稱 1 +對立 1 +對簿 1 +對話 1 +對面 1 +對飛 1 +導入 1 +導出 1 +導向 1 +導彈 1 +導播 1 +導正 1 +小人 1 +小兔 1 +小刀 1 +小南 1 +小國 1 +小小 1 +小島 1 +小巷 1 +小息 1 +小數 1 +小書 1 +小欖 1 +小水 1 +小河 1 +小津 1 +小浪 1 +小澤 1 +小片 1 +小生 1 +小田 1 +小知 1 +小石 1 +小童 1 +小舖 1 +小虎 1 +小街 1 +小輪 1 +小野 1 +小隊 1 +小順 1 +小顏 1 +小風 1 +小體 1 +少兒 1 +少將 1 +少年 1 +少懷 1 +少林 1 +少見 1 +少許 1 +少量 1 +尖端 1 +尖酸 1 +尖頂 1 +尚州 1 +尚德 1 +尚方 1 +尚書 1 +尤利 1 +尤勒 1 +尤指 1 +尤里 1 +就此 1 +就熟 1 +就職 1 +尷尬 1 +尹氏 1 +尼地 1 +尼夫 1 +尼師 1 +尼庫 1 +尼律 1 +尼拉 1 +尼歐 1 +尼比 1 +尼茲 1 +尼萊 1 +尼蘇 1 +尼諾 1 +尼赫 1 +尼郡 1 +尾巴 1 +尾柄 1 +尾隨 1 +尾雉 1 +尾鰭 1 +尾龍 1 +局勢 1 +局間 1 +居家 1 +居所 1 +居留 1 +居禮 1 +屆滿 1 +屈一 1 +屋宇 1 +屋頂 1 +屍體 1 +屏山 1 +屏東 1 +屏風 1 +展品 1 +展望 1 +展貿 1 +屠村 1 +屠龍 1 +層壓 1 +層次 1 +層疊 1 +層級 1 +層面 1 +履行 1 +屬國 1 +屬於 1 +屬靈 1 +屯南 1 +山下 1 +山內 1 +山口 1 +山地 1 +山姆 1 +山峰 1 +山崖 1 +山手 1 +山月 1 +山村 1 +山楂 1 +山猿 1 +山田 1 +山胞 1 +山葉 1 +山陵 1 +山麓 1 +山龍 1 +岐女 1 +岐阜 1 +岐陽 1 +岔江 1 +岡恩 1 +岡本 1 +岩屋 1 +岩心 1 +岩手 1 +岩漿 1 +岳泰 1 +岷江 1 +岸川 1 +岸賈 1 +岸邊 1 +峯崎 1 +峰倉 1 +峰景 1 +島內 1 +島國 1 +島戴 1 +島蚺 1 +峽灣 1 +峽谷 1 +崇善 1 +崇尚 1 +崇敬 1 +崎頭 1 +崔奇 1 +崔陂 1 +崗斜 1 +崙頂 1 +崞縣 1 +崩坍 1 +崩潰 1 +嵩祝 1 +巔峰 1 +川南 1 +川村 1 +川邊 1 +州界 1 +州舞 1 +巡査 1 +工事 1 +工務 1 +工序 1 +工廠 1 +工會 1 +工法 1 +工潮 1 +左岸 1 +左拉 1 +左派 1 +左膀 1 +左轉 1 +巨作 1 +巨像 1 +巨冊 1 +巨型 1 +巨石 1 +巨賈 1 +巨野 1 +巫師 1 +差分 1 +差別 1 +差勁 1 +差地 1 +差會 1 +差無 1 +差諾 1 +己二 1 +己巳 1 +己酉 1 +已故 1 +已晚 1 +已死 1 +巴亞 1 +巴卑 1 +巴喬 1 +巴城 1 +巴孛 1 +巴巴 1 +巴底 1 +巴庫 1 +巴德 1 +巴思 1 +巴恩 1 +巴羅 1 +巴英 1 +巴莫 1 +巴蒂 1 +巴薩 1 +巴諾 1 +巴賽 1 +巴赫 1 +巴雷 1 +巴頓 1 +市售 1 +市縣 1 +市轄 1 +市面 1 +布丹 1 +布伯 1 +布倫 1 +布列 1 +布哈 1 +布地 1 +布夏 1 +布宜 1 +布尼 1 +布巴 1 +布政 1 +布料 1 +布林 1 +布氏 1 +布置 1 +布隆 1 +布頓 1 +帆布 1 +帆船 1 +希伯 1 +希克 1 +希姆 1 +希涅 1 +希特 1 +希皮 1 +希鵬 1 +帕內 1 +帕器 1 +帕搏 1 +帕沙 1 +帕爾 1 +帕特 1 +帕米 1 +帕維 1 +帕薩 1 +帕西 1 +帕迪 1 +帕那 1 +帕金 1 +帝王 1 +帝芬 1 +帝都 1 +師今 1 +師團 1 +師徒 1 +師從 1 +師父 1 +師生 1 +師益 1 +席勒 1 +帳目 1 +帶上 1 +帶出 1 +帶子 1 +帶少 1 +帶水 1 +帶英 1 +常住 1 +常勝 1 +常客 1 +常態 1 +常盛 1 +常識 1 +常量 1 +常青 1 +常駐 1 +幅員 1 +幕府 1 +幕後 1 +幣原 1 +幪面 1 +幫主 1 +干伊 1 +干王 1 +干達 1 +平反 1 +平和 1 +平地 1 +平坦 1 +平帝 1 +平常 1 +平手 1 +平日 1 +平林 1 +平沼 1 +平滑 1 +平臺 1 +平行 1 +平陵 1 +平陽 1 +平頓 1 +年中 1 +年份 1 +年幼 1 +年息 1 +年益 1 +年第 1 +年老 1 +年號 1 +年資 1 +年青 1 +并行 1 +幸一 1 +幸好 1 +幸運 1 +幹事 1 +幹掉 1 +幹流 1 +幹道 1 +幼子 1 +幼年 1 +幼弟 1 +幼發 1 +幼稚 1 +幼貓 1 +幼魚 1 +幼鯨 1 +幼鳥 1 +幽閣 1 +幾內 1 +幾千 1 +幾多 1 +幾百 1 +庇烏 1 +床鋪 1 +底冊 1 +底格 1 +底比 1 +底片 1 +底特 1 +底稿 1 +底質 1 +店家 1 +庚戌 1 +府中 1 +府二 1 +府城 1 +府尹 1 +府第 1 +度宗 1 +度尼 1 +度蘭 1 +座位 1 +座右 1 +座座 1 +座椅 1 +座苣 1 +座西 1 +座談 1 +庫哈 1 +庫柏 1 +庫欣 1 +庫瑙 1 +庫賽 1 +庫赫 1 +庫迪 1 +庫頁 1 +庭園 1 +庭薺 1 +庭長 1 +康史 1 +康奈 1 +康子 1 +康安 1 +康寧 1 +康德 1 +康樂 1 +康濟 1 +康福 1 +康科 1 +康羅 1 +廉潔 1 +廚師 1 +廝守 1 +廟倉 1 +廟方 1 +廟橋 1 +廟鎮 1 +廢待 1 +廢棄 1 +廢熱 1 +廢舊 1 +廣受 1 +廣權 1 +廣澳 1 +廣稱 1 +廣金 1 +廬山 1 +廳局 1 +廳長 1 +延安 1 +延年 1 +延音 1 +廷和 1 +廷尉 1 +建好 1 +建威 1 +建市 1 +建構 1 +建武 1 +建置 1 +建華 1 +建超 1 +廿五 1 +廿六 1 +弄到 1 +弄清 1 +弄眼 1 +弊案 1 +式一 1 +式塔 1 +式微 1 +弓尾 1 +弓弦 1 +弓箭 1 +引來 1 +引咎 1 +引導 1 +引江 1 +引渡 1 +引申 1 +引資 1 +弗內 1 +弗拉 1 +弗格 1 +弗洛 1 +弗特 1 +弗萊 1 +弗蘭 1 +弘前 1 +弘宣 1 +弭兵 1 +張家 1 +張氏 1 +強勁 1 +強化 1 +強拍 1 +強暴 1 +強權 1 +強求 1 +強盜 1 +強迫 1 +強韌 1 +強項 1 +彈劾 1 +彈塗 1 +彈撥 1 +彈盡 1 +彌撒 1 +彌斯 1 +彌格 1 +彌補 1 +彌賽 1 +彎曲 1 +彗差 1 +彗星 1 +彙編 1 +形像 1 +形同 1 +形翼 1 +形體 1 +彥根 1 +彥直 1 +彩畫 1 +彩繪 1 +彩繽 1 +彩雲 1 +彩鳳 1 +彪馬 1 +彭劉 1 +彭博 1 +彭古 1 +彭定 1 +彭拿 1 +彰信 1 +影帝 1 +影機 1 +影線 1 +影評 1 +影迷 1 +影集 1 +影音 1 +彷彿 1 +彼得 1 +彼特 1 +彼落 1 +彼魯 1 +往上 1 +往世 1 +往日 1 +征西 1 +待到 1 +待舉 1 +很小 1 +很強 1 +很忙 1 +很懶 1 +很是 1 +很深 1 +很遠 1 +很重 1 +很長 1 +律定 1 +律斯 1 +律狄 1 +後世 1 +後代 1 +後勤 1 +後南 1 +後周 1 +後宮 1 +後庄 1 +後悔 1 +後援 1 +後春 1 +後果 1 +後梁 1 +後段 1 +後母 1 +後稱 1 +後續 1 +後置 1 +後藤 1 +後送 1 +後防 1 +後齒 1 +徒具 1 +徒手 1 +徒生 1 +得克 1 +得利 1 +得哥 1 +得堡 1 +得心 1 +得悉 1 +得患 1 +得獎 1 +得益 1 +得維 1 +從來 1 +從句 1 +從周 1 +從善 1 +從政 1 +御史 1 +御名 1 +御墨 1 +御宅 1 +御窯 1 +御雷 1 +復健 1 +復合 1 +復寫 1 +復生 1 +復甦 1 +循道 1 +微型 1 +微妙 1 +微小 1 +微波 1 +微觀 1 +微量 1 +徵兆 1 +徵招 1 +徵祥 1 +德勝 1 +德哥 1 +德奧 1 +德妃 1 +德姆 1 +德威 1 +德宏 1 +德富 1 +德干 1 +德愛 1 +德懷 1 +德文 1 +德曼 1 +德林 1 +德比 1 +德江 1 +德瓦 1 +德甲 1 +德米 1 +德納 1 +德西 1 +德諾 1 +德靈 1 +德馬 1 +德高 1 +徽章 1 +心勃 1 +心境 1 +心宿 1 +心意 1 +心應 1 +心智 1 +心疾 1 +心目 1 +心肌 1 +必和 1 +必拓 1 +必走 1 +必需 1 +忍心 1 +忍氣 1 +忒彌 1 +志摩 1 +志明 1 +志道 1 +忘記 1 +忠於 1 +忠誠 1 +快上 1 +快捷 1 +快綫 1 +忽視 1 +思侯 1 +思巴 1 +思從 1 +思德 1 +思成 1 +思維 1 +思缽 1 +思考 1 +急劇 1 +急忙 1 +急救 1 +急於 1 +急流 1 +急症 1 +急行 1 +性向 1 +性命 1 +性情 1 +性腺 1 +怪圈 1 +怪聲 1 +恆大 1 +恆德 1 +恆河 1 +恐嚇 1 +恐懼 1 +恢豐 1 +恣意 1 +恩利 1 +恩南 1 +恩卡 1 +恩哈 1 +恩慈 1 +恩斯 1 +恩特 1 +恩秀 1 +恩贈 1 +恭子 1 +息率 1 +恰尼 1 +悉心 1 +悉達 1 +悟到 1 +悟空 1 +患失 1 +患得 1 +患病 1 +悲傷 1 +悲劇 1 +悲嘆 1 +悲慘 1 +悲鴻 1 +悼念 1 +情不 1 +情人 1 +情勢 1 +情愁 1 +情愛 1 +情景 1 +情結 1 +情誼 1 +情資 1 +情陷 1 +情願 1 +惇曧 1 +惠亞 1 +惠梨 1 +惠特 1 +惡人 1 +惡化 1 +惡夢 1 +惡性 1 +惡搞 1 +惡臭 1 +惡靈 1 +惡魔 1 +想必 1 +想起 1 +愈加 1 +愈大 1 +愈高 1 +愉快 1 +意圖 1 +意念 1 +意甲 1 +意魔 1 +愙威 1 +愚園 1 +愚昧 1 +愛好 1 +愛思 1 +愛恨 1 +愛意 1 +愛慕 1 +愛明 1 +愛樂 1 +愛河 1 +愛莎 1 +愛迪 1 +愛默 1 +感冒 1 +感謝 1 +慈湖 1 +慈濟 1 +慌亂 1 +慎太 1 +慕容 1 +慕肯 1 +慘叫 1 +慘重 1 +慚愧 1 +慢行 1 +慢駛 1 +慧嫻 1 +慰安 1 +慶典 1 +慶曆 1 +慶貽 1 +慶黎 1 +慷慨 1 +憂憤 1 +憲政 1 +憲民 1 +憲法 1 +憶蓮 1 +應付 1 +應允 1 +應屆 1 +應戰 1 +應手 1 +應昌 1 +應當 1 +應許 1 +應邀 1 +懲罰 1 +懶爪 1 +懶甸 1 +懷仁 1 +懷克 1 +懷好 1 +懷念 1 +懷慶 1 +懷抱 1 +懷水 1 +懷聖 1 +懸掛 1 +懼高 1 +戀人 1 +戀屍 1 +戀童 1 +戈德 1 +戈爾 1 +戈登 1 +戈矛 1 +戈維 1 +戈蘭 1 +成事 1 +成仁 1 +成化 1 +成半 1 +成名 1 +成品 1 +成套 1 +成對 1 +成形 1 +成梁 1 +成行 1 +成語 1 +我國 1 +戟鯨 1 +截然 1 +截至 1 +截頜 1 +戰事 1 +戰力 1 +戰勝 1 +戰地 1 +戰平 1 +戰情 1 +戰船 1 +戲子 1 +戲曲 1 +戲法 1 +戲碼 1 +戲謔 1 +戲院 1 +戴上 1 +戴克 1 +戴勝 1 +戴斯 1 +戴爾 1 +戴維 1 +戴蒙 1 +戴頓 1 +戶田 1 +戶籍 1 +房東 1 +所不 1 +所料 1 +所望 1 +所為 1 +所長 1 +手上 1 +手工 1 +手感 1 +手抄 1 +手指 1 +手提 1 +手旁 1 +手槍 1 +手稿 1 +手筆 1 +手腳 1 +手邊 1 +手風 1 +手龍 1 +才子 1 +才是 1 +才智 1 +扎什 1 +扎爾 1 +扎特 1 +扎阿 1 +打亂 1 +打人 1 +打包 1 +打坐 1 +打撈 1 +打死 1 +打水 1 +打牌 1 +打碎 1 +打菲 1 +打造 1 +打響 1 +扔出 1 +托倫 1 +托加 1 +托弗 1 +托格 1 +托洛 1 +托瓦 1 +托盤 1 +托米 1 +托茂 1 +扣上 1 +扶林 1 +批次 1 +扼止 1 +找來 1 +找續 1 +承天 1 +承德 1 +承接 1 +承斌 1 +承租 1 +技師 1 +技戰 1 +技法 1 +抑制 1 +抑鬱 1 +抒解 1 +抓到 1 +投交 1 +投奔 1 +投標 1 +投球 1 +投身 1 +投靠 1 +抗大 1 +抗拒 1 +抗衡 1 +抗體 1 +折不 1 +折射 1 +折斷 1 +折衷 1 +抨擊 1 +披覆 1 +披頭 1 +抬昇 1 +抱持 1 +抵受 1 +抵禦 1 +押韻 1 +抽檢 1 +抽煙 1 +抽象 1 +抽走 1 +拆分 1 +拆卸 1 +拆掉 1 +拆遷 1 +拉O 1 +拉亞 1 +拉什 1 +拉倫 1 +拉利 1 +拉博 1 +拉卜 1 +拉只 1 +拉圭 1 +拉塞 1 +拉奏 1 +拉尼 1 +拉差 1 +拉布 1 +拉帕 1 +拉彼 1 +拉扎 1 +拉拉 1 +拉日 1 +拉林 1 +拉柯 1 +拉桑 1 +拉森 1 +拉欣 1 +拉法 1 +拉漢 1 +拉特 1 +拉珀 1 +拉瑙 1 +拉瑪 1 +拉籌 1 +拉維 1 +拉罕 1 +拉美 1 +拉華 1 +拉薩 1 +拉諾 1 +拉貝 1 +拉赫 1 +拉越 1 +拉那 1 +拉麥 1 +拉齊 1 +拉龍 1 +拋棄 1 +拋物 1 +拍照 1 +拍賣 1 +拒不 1 +拓務 1 +拓建 1 +拓撲 1 +拔刀 1 +拖進 1 +拖錯 1 +拖鞋 1 +拙劣 1 +招潮 1 +招生 1 +招聘 1 +招降 1 +拜仁 1 +拜拜 1 +括弧 1 +拱廊 1 +拱橋 1 +拳一 1 +拳擊 1 +拳賽 1 +拷問 1 +拼寫 1 +拾糞 1 +拿來 1 +拿島 1 +拿路 1 +拿錯 1 +持久 1 +持球 1 +指使 1 +指掌 1 +指標 1 +指派 1 +指稱 1 +指責 1 +挑選 1 +挖子 1 +挖掘 1 +挪動 1 +挪用 1 +振動 1 +振幅 1 +振林 1 +挹江 1 +挺身 1 +挽回 1 +挾持 1 +捉弄 1 +捉拿 1 +捉襟 1 +捍衛 1 +捐款 1 +捐獻 1 +捕撈 1 +捕殺 1 +捕獵 1 +捕魚 1 +捕鼠 1 +捲入 1 +捷徑 1 +捷沃 1 +授勳 1 +授意 1 +授權 1 +授與 1 +掉頭 1 +掌控 1 +掌摑 1 +掌權 1 +掌鏡 1 +排場 1 +排外 1 +排序 1 +掙扎 1 +掛果 1 +掛牌 1 +掛鉤 1 +掠奪 1 +採信 1 +採摘 1 +採樣 1 +採納 1 +採購 1 +採集 1 +採食 1 +探明 1 +探望 1 +探求 1 +探究 1 +探險 1 +接到 1 +接力 1 +接班 1 +接納 1 +接聽 1 +接見 1 +接辦 1 +接送 1 +接連 1 +控告 1 +控訴 1 +推介 1 +推免 1 +推前 1 +推力 1 +推導 1 +推斷 1 +推測 1 +推演 1 +推特 1 +推理 1 +推舉 1 +推論 1 +推遲 1 +掩蓋 1 +描摹 1 +描繪 1 +提亞 1 +提前 1 +提問 1 +提子 1 +提康 1 +提拔 1 +提攜 1 +提昇 1 +提煉 1 +提督 1 +提籃 1 +提米 1 +提醒 1 +插手 1 +插曲 1 +揚光 1 +揚言 1 +換成 1 +換算 1 +握帶 1 +握持 1 +揭曉 1 +揭發 1 +揭開 1 +揮舞 1 +援助 1 +援外 1 +援引 1 +援手 1 +援救 1 +搜尋 1 +搜狐 1 +搜羅 1 +搜集 1 +搞垮 1 +搞錯 1 +搬動 1 +搬往 1 +搬移 1 +搬遷 1 +搭乘 1 +搭配 1 +搶先 1 +搶劫 1 +搶奪 1 +搶救 1 +摒棄 1 +摘下 1 +摘星 1 +摘錄 1 +摧毀 1 +摩亞 1 +摩加 1 +摩天 1 +摩崖 1 +摩托 1 +摩擦 1 +摩琴 1 +摩登 1 +摩納 1 +摩西 1 +摯友 1 +摸摸 1 +撒冷 1 +撒拉 1 +撒營 1 +撞入 1 +撞死 1 +撤回 1 +撤職 1 +撤退 1 +撤除 1 +撥出 1 +撥號 1 +撫養 1 +播種 1 +撮合 1 +撰述 1 +撲克 1 +撿起 1 +擁堵 1 +擁戴 1 +擁擠 1 +擁而 1 +擁護 1 +擂台 1 +擊中 1 +擊劍 1 +擊斃 1 +擊毀 1 +擊潰 1 +擊破 1 +擋住 1 +操控 1 +操縱 1 +擒拿 1 +擔憂 1 +擔竿 1 +擔綱 1 +據傳 1 +據此 1 +據稱 1 +據點 1 +擠塞 1 +擠壓 1 +擠奶 1 +擠眉 1 +擠迫 1 +擢升 1 +擬桿 1 +擬獅 1 +擬訂 1 +擬議 1 +擴散 1 +擴編 1 +擺弄 1 +擺渡 1 +擾亂 1 +攀爬 1 +攔截 1 +攝像 1 +攝取 1 +攪拌 1 +支取 1 +支廳 1 +支派 1 +支那 1 +支隊 1 +收場 1 +收容 1 +收市 1 +收支 1 +收生 1 +收益 1 +收租 1 +收緊 1 +收聽 1 +收買 1 +收費 1 +收養 1 +攸之 1 +改作 1 +改委 1 +改屬 1 +改投 1 +改採 1 +改換 1 +改派 1 +改發 1 +改穿 1 +改組 1 +改選 1 +改隸 1 +攻下 1 +攻勢 1 +攻堅 1 +攻方 1 +攻殺 1 +攻訐 1 +攻讀 1 +放任 1 +放入 1 +放出 1 +放到 1 +放大 1 +放影 1 +放榜 1 +放牧 1 +放緩 1 +放送 1 +放逐 1 +放開 1 +放鬆 1 +政團 1 +政委 1 +政局 1 +政廳 1 +政敵 1 +政樞 1 +政法 1 +政爭 1 +政界 1 +故郷 1 +效尤 1 +效能 1 +敏能 1 +敏銳 1 +敏錠 1 +救人 1 +救出 1 +救助 1 +救國 1 +救援 1 +救星 1 +救災 1 +救生 1 +救贖 1 +救趙 1 +敕令 1 +敕書 1 +敗局 1 +敗死 1 +敗瓦 1 +敗退 1 +教務 1 +教士 1 +教室 1 +教席 1 +教材 1 +教案 1 +教籍 1 +教總 1 +教義 1 +教職 1 +散射 1 +敦煌 1 +敬仰 1 +敬堯 1 +敬請 1 +敲擊 1 +敲訂 1 +整塊 1 +整所 1 +整架 1 +整為 1 +整片 1 +整篇 1 +整軍 1 +整顆 1 +整齊 1 +敵兵 1 +敵方 1 +數以 1 +數值 1 +數澤 1 +數百 1 +數碼 1 +數萬 1 +數論 1 +文哲 1 +文姬 1 +文岳 1 +文巨 1 +文德 1 +文摘 1 +文政 1 +文書 1 +文本 1 +文楷 1 +文武 1 +文法 1 +文清 1 +文職 1 +文賢 1 +文集 1 +文飾 1 +斐遜 1 +斑塊 1 +斑點 1 +斗貴 1 +斜坡 1 +斥教 1 +斬落 1 +斯佩 1 +斯凱 1 +斯哥 1 +斯塘 1 +斯妥 1 +斯威 1 +斯安 1 +斯尼 1 +斯巴 1 +斯廷 1 +斯楚 1 +斯汀 1 +斯狸 1 +斯珀 1 +斯班 1 +斯瓦 1 +斯聯 1 +斯艾 1 +斯菲 1 +斯雷 1 +斯頓 1 +新任 1 +新修 1 +新元 1 +新址 1 +新埔 1 +新太 1 +新奧 1 +新字 1 +新寧 1 +新屋 1 +新巴 1 +新思 1 +新昌 1 +新明 1 +新春 1 +新月 1 +新核 1 +新榮 1 +新民 1 +新浪 1 +新版 1 +新生 1 +新秀 1 +新篇 1 +新編 1 +新義 1 +新舊 1 +新製 1 +新開 1 +新飛 1 +新馬 1 +新高 1 +新鴻 1 +新黨 1 +斷後 1 +斷盡 1 +斷言 1 +方丈 1 +方八 1 +方尖 1 +方正 1 +方田 1 +方百 1 +方石 1 +方程 1 +方蓋 1 +方蟹 1 +於維 1 +施哈 1 +施奈 1 +施文 1 +施瓦 1 +施用 1 +施韋 1 +旁觀 1 +旅居 1 +旅程 1 +旋渦 1 +旋轉 1 +族雄 1 +族頭 1 +旗艦 1 +旗面 1 +既得 1 +既是 1 +既然 1 +日井 1 +日出 1 +日向 1 +日夜 1 +日子 1 +日日 1 +日照 1 +日用 1 +日色 1 +日落 1 +日誌 1 +日賜 1 +旦增 1 +旦夕 1 +早有 1 +早餐 1 +旭烈 1 +旱災 1 +旻寧 1 +昂納 1 +昆丁 1 +昆蟲 1 +昌吉 1 +昌都 1 +明中 1 +明亞 1 +明亮 1 +明代 1 +明內 1 +明園 1 +明宗 1 +明尼 1 +明憲 1 +明昌 1 +明智 1 +明正 1 +明潭 1 +明白 1 +明碁 1 +明翰 1 +明視 1 +易卜 1 +易守 1 +易幟 1 +易水 1 +易燃 1 +易經 1 +易莎 1 +昔蘭 1 +星團 1 +星塵 1 +星展 1 +星崎 1 +星系 1 +映像 1 +春丕 1 +春季 1 +春會 1 +春田 1 +春筍 1 +春節 1 +春緋 1 +春耕 1 +昨日 1 +昭侯 1 +昭儀 1 +昭宗 1 +昭禮 1 +昭通 1 +是年 1 +是方 1 +是次 1 +時事 1 +時份 1 +時值 1 +時光 1 +時刻 1 +時報 1 +時弊 1 +時稱 1 +時舉 1 +時針 1 +晃動 1 +晉之 1 +晉北 1 +晉哲 1 +晉江 1 +晉級 1 +晒乾 1 +晨間 1 +普世 1 +普什 1 +普伊 1 +普利 1 +普提 1 +普曼 1 +普森 1 +普第 1 +普肯 1 +普芮 1 +普薩 1 +普里 1 +普金 1 +景泰 1 +景灣 1 +晴神 1 +晶瑩 1 +晶閘 1 +智伯 1 +智利 1 +智趣 1 +暑期 1 +暖氣 1 +暗中 1 +暗喻 1 +暗影 1 +暗房 1 +暗指 1 +暗礁 1 +暗紅 1 +暗號 1 +暫別 1 +暫無 1 +暮光 1 +暱稱 1 +暴亂 1 +暴斂 1 +暴死 1 +暴風 1 +暹羅 1 +曄之 1 +曉夫 1 +曉彬 1 +曉得 1 +曉聲 1 +曉舟 1 +曖昧 1 +曬相 1 +曬衣 1 +曲口 1 +曲同 1 +曲張 1 +曲率 1 +曲目 1 +曲線 1 +曲藝 1 +曲阜 1 +曲頜 1 +更低 1 +更佳 1 +更大 1 +更審 1 +更小 1 +更強 1 +更快 1 +更新 1 +更是 1 +更硬 1 +更衣 1 +更輕 1 +更長 1 +曷懶 1 +書本 1 +書裡 1 +書迷 1 +書面 1 +書香 1 +曹家 1 +曹甸 1 +曹記 1 +曼什 1 +曼切 1 +曼哈 1 +曼城 1 +曼寧 1 +曼徹 1 +曼成 1 +曼斯 1 +曼海 1 +曼涅 1 +曼玉 1 +曼科 1 +曼達 1 +曾任 1 +曾孫 1 +曾愛 1 +曾祖 1 +替人 1 +最內 1 +最前 1 +最受 1 +最外 1 +最強 1 +最旺 1 +最最 1 +最末 1 +最東 1 +最純 1 +最遠 1 +會上 1 +會址 1 +會師 1 +會戰 1 +會所 1 +會晤 1 +會章 1 +會見 1 +會計 1 +月色 1 +月薪 1 +有份 1 +有別 1 +有力 1 +有名 1 +有愛 1 +有方 1 +有期 1 +有染 1 +有條 1 +有異 1 +有病 1 +有稱 1 +有花 1 +有點 1 +服刑 1 +朗丹 1 +朗恰 1 +朗杜 1 +朗西 1 +朗豪 1 +朗頓 1 +望族 1 +朝下 1 +朝元 1 +朝政 1 +朝散 1 +朝東 1 +朝聖 1 +朝覲 1 +朝貢 1 +朝陽 1 +期刊 1 +木中 1 +木乃 1 +木刻 1 +木卡 1 +木城 1 +木尼 1 +木屋 1 +木工 1 +木戶 1 +木揚 1 +木斯 1 +木村 1 +木樣 1 +木櫾 1 +木蘭 1 +木造 1 +木鳥 1 +木齊 1 +未入 1 +未敢 1 +未有 1 +未深 1 +未滿 1 +末端 1 +本劇 1 +本名 1 +本城 1 +本始 1 +本季 1 +本島 1 +本市 1 +本德 1 +本書 1 +本營 1 +本目 1 +本省 1 +本社 1 +本縣 1 +本能 1 +本著 1 +本郡 1 +本部 1 +本鄉 1 +本集 1 +本領 1 +札幌 1 +札特 1 +朱里 1 +朴次 1 +朵眼 1 +杉並 1 +李察 1 +杏出 1 +杏子 1 +材官 1 +材質 1 +村旁 1 +杖責 1 +杜乃 1 +杜伊 1 +杜克 1 +杜利 1 +杜成 1 +杜浦 1 +杜甫 1 +杜隆 1 +杜鵑 1 +杯賽 1 +杰仔 1 +東主 1 +東加 1 +東勝 1 +東坡 1 +東姑 1 +東宮 1 +東岸 1 +東巡 1 +東急 1 +東支 1 +東昇 1 +東映 1 +東桑 1 +東條 1 +東武 1 +東涌 1 +東渡 1 +東直 1 +東站 1 +東興 1 +東華 1 +東距 1 +東道 1 +東邊 1 +東郊 1 +東鄉 1 +東鐵 1 +東隧 1 +東風 1 +松下 1 +松坂 1 +松山 1 +松島 1 +松州 1 +松翔 1 +松花 1 +板式 1 +林克 1 +林地 1 +林堡 1 +林場 1 +林威 1 +林布 1 +林斯 1 +林業 1 +林檎 1 +林翼 1 +林胡 1 +林豬 1 +果然 1 +果真 1 +果酒 1 +枝葉 1 +架次 1 +枸杞 1 +柏力 1 +柏加 1 +柏村 1 +柏松 1 +柏臣 1 +染手 1 +染病 1 +柔道 1 +柚木 1 +柝聲 1 +柢固 1 +查找 1 +查普 1 +查氏 1 +查特 1 +柬埔 1 +柯伊 1 +柯克 1 +柱銘 1 +柳川 1 +柳州 1 +柳德 1 +柳葉 1 +柴電 1 +柿本 1 +栗橋 1 +校呔 1 +校簿 1 +校門 1 +栩如 1 +栩栩 1 +株式 1 +核孔 1 +核實 1 +核工 1 +核彈 1 +核發 1 +核研 1 +核算 1 +根培 1 +根深 1 +根生 1 +根究 1 +根莖 1 +根部 1 +格丁 1 +格仔 1 +格但 1 +格來 1 +格司 1 +格奧 1 +格子 1 +格尼 1 +格拿 1 +格森 1 +格瑪 1 +格莫 1 +格陵 1 +格雷 1 +桂陵 1 +桃子 1 +框架 1 +框線 1 +案例 1 +案達 1 +桐生 1 +桑克 1 +桑德 1 +桑托 1 +桑納 1 +桓子 1 +桓玄 1 +桿菌 1 +梁伐 1 +梁贊 1 +梁龍 1 +梅園 1 +梅帕 1 +梅捷 1 +梅里 1 +梓里 1 +條不 1 +條款 1 +條紋 1 +梧州 1 +梨花 1 +梨香 1 +梭羅 1 +梯隊 1 +梳頜 1 +梵安 1 +棉條 1 +棋局 1 +棋盤 1 +棋聖 1 +棋院 1 +棋類 1 +棒錘 1 +棕色 1 +棕褐 1 +森德 1 +森斯 1 +森納 1 +森費 1 +棲地 1 +棲身 1 +植株 1 +椎名 1 +椰林 1 +楓樹 1 +楚克 1 +楚瑜 1 +楚紅 1 +楠桂 1 +楠溪 1 +業主 1 +業業 1 +業餘 1 +極北 1 +極區 1 +極少 1 +極為 1 +極矮 1 +極長 1 +極闊 1 +極限 1 +楷書 1 +楷模 1 +概要 1 +榆林 1 +榔頭 1 +榕樹 1 +榜羅 1 +榨出 1 +榫眼 1 +榮廷 1 +榮洲 1 +榮茂 1 +榴彈 1 +構思 1 +構造 1 +槍尖 1 +槍尾 1 +槍殺 1 +槍舌 1 +槍術 1 +樂園 1 +樂安 1 +樂官 1 +樂山 1 +樂師 1 +樂手 1 +樂敏 1 +樂樂 1 +樂活 1 +樂環 1 +樂美 1 +樂翠 1 +樂觀 1 +樂趣 1 +樓夢 1 +樓宇 1 +樓層 1 +樓底 1 +樓煩 1 +樓盤 1 +樓面 1 +樓高 1 +標售 1 +標志 1 +標明 1 +標有 1 +標示 1 +標籤 1 +標記 1 +標註 1 +標高 1 +樞密 1 +模一 1 +模里 1 +樣品 1 +樣式 1 +樣貌 1 +樸實 1 +樸歸 1 +樹上 1 +樹幹 1 +樹枝 1 +樹龍 1 +橈腳 1 +橋上 1 +橋樑 1 +橋面 1 +機上 1 +機位 1 +機型 1 +機密 1 +機師 1 +機床 1 +機敏 1 +機械 1 +機理 1 +機種 1 +機能 1 +機製 1 +機遇 1 +橫帶 1 +橫徵 1 +橫渡 1 +橫線 1 +檔案 1 +檔次 1 +檜山 1 +檢驗 1 +檨仔 1 +檳榔 1 +檸七 1 +櫃檯 1 +櫟社 1 +欄目 1 +權氏 1 +權限 1 +次克 1 +次席 1 +次月 1 +次生 1 +次程 1 +次茅 1 +欣快 1 +欲絕 1 +款式 1 +歇根 1 +歌人 1 +歌壇 1 +歌星 1 +歌舞 1 +歌詞 1 +歌頌 1 +歐律 1 +歐盟 1 +歐羅 1 +歐青 1 +歐麥 1 +歡慶 1 +歡樂 1 +正值 1 +正傳 1 +正夫 1 +正子 1 +正宇 1 +正巧 1 +正平 1 +正正 1 +正比 1 +正派 1 +正版 1 +正當 1 +正經 1 +正負 1 +正配 1 +正陽 1 +此事 1 +此地 1 +此夢 1 +此書 1 +此樓 1 +此橋 1 +此片 1 +此處 1 +此語 1 +此起 1 +此路 1 +此陵 1 +此項 1 +此魚 1 +步伐 1 +步蟾 1 +步行 1 +步驟 1 +武克 1 +武力 1 +武威 1 +武帝 1 +武廟 1 +武廠 1 +武德 1 +武打 1 +武王 1 +武略 1 +武皇 1 +武者 1 +武藏 1 +歲月 1 +歷代 1 +歷來 1 +歷屬 1 +歷程 1 +歸來 1 +歸入 1 +歸到 1 +歸功 1 +歸咎 1 +歸案 1 +歸真 1 +歸還 1 +歸附 1 +死不 1 +死刑 1 +死回 1 +死因 1 +死地 1 +死戰 1 +死期 1 +死板 1 +死狀 1 +死而 1 +死黨 1 +殉教 1 +殉爆 1 +殉職 1 +殊榮 1 +殘疾 1 +殘破 1 +殘遺 1 +殘部 1 +殲滅 1 +殺人 1 +殺手 1 +殺機 1 +殼層 1 +殼體 1 +殿堂 1 +毀壞 1 +毀容 1 +毅仁 1 +毅然 1 +母拿 1 +母會 1 +母校 1 +母狼 1 +母猴 1 +母艦 1 +母語 1 +母貓 1 +毎年 1 +每元 1 +每座 1 +每戶 1 +每所 1 +每枚 1 +每每 1 +每股 1 +每邊 1 +每集 1 +每鼎 1 +毒​ 1 +毒品 1 +毒死 1 +毒癮 1 +毒舌 1 +毓林 1 +毓楓 1 +毓芳 1 +比哈 1 +比喻 1 +比妥 1 +比得 1 +比恩 1 +比斯 1 +比方 1 +比格 1 +比武 1 +比薩 1 +比袍 1 +比褂 1 +比魯 1 +毗闍 1 +毛色 1 +毛豬 1 +毛髮 1 +毫安 1 +毫無 1 +毯子 1 +氈幕 1 +氏亞 1 +氏奇 1 +民事 1 +民俗 1 +民力 1 +民居 1 +民工 1 +民心 1 +民意 1 +民房 1 +民柬 1 +民權 1 +民法 1 +民盟 1 +民答 1 +民航 1 +民英 1 +民謠 1 +民豐 1 +民選 1 +民鐸 1 +民防 1 +氣吞 1 +氣息 1 +氣態 1 +氣憤 1 +氣旋 1 +氣槍 1 +氣死 1 +氣溫 1 +氣燄 1 +氣胸 1 +氣象 1 +氧釩 1 +氨基 1 +氫化 1 +氫氣 1 +氫鍵 1 +氮素 1 +氯乙 1 +氯氧 1 +氯雷 1 +水世 1 +水份 1 +水圈 1 +水壓 1 +水床 1 +水扁 1 +水攻 1 +水晶 1 +水氯 1 +水汽 1 +水流 1 +水火 1 +水球 1 +水產 1 +水療 1 +水翼 1 +水能 1 +水警 1 +水面 1 +水鳥 1 +水鴨 1 +永久 1 +永元 1 +永升 1 +永吉 1 +永和 1 +永壽 1 +永平 1 +永成 1 +永昌 1 +永權 1 +永續 1 +永輝 1 +永逸 1 +永靖 1 +汁液 1 +求偶 1 +求出 1 +求助 1 +求問 1 +求婚 1 +求情 1 +求援 1 +求籤 1 +求醫 1 +汝寧 1 +汞柱 1 +江協 1 +江口 1 +江浙 1 +江海 1 +江源 1 +江漢 1 +江灣 1 +江谷 1 +江都 1 +江閣 1 +江魚 1 +池塘 1 +池田 1 +污損 1 +污點 1 +汲及 1 +決意 1 +決擇 1 +決然 1 +決裂 1 +汽油 1 +汽船 1 +沃奎 1 +沃季 1 +沃州 1 +沃斯 1 +沃爾 1 +沃羅 1 +沈氏 1 +沉水 1 +沉迷 1 +沉重 1 +沉降 1 +沒能 1 +沒落 1 +沒錯 1 +沖之 1 +沖片 1 +沖走 1 +沙丘 1 +沙伯 1 +沙依 1 +沙尼 1 +沙崙 1 +沙巴 1 +沙普 1 +沙梁 1 +沙池 1 +沙洛 1 +沙漠 1 +沙瓦 1 +沙田 1 +沙畹 1 +沙蠶 1 +沙迦 1 +沙邦 1 +沙鄢 1 +沙里 1 +沢駅 1 +河兒 1 +河卡 1 +河圖 1 +河岸 1 +河心 1 +河段 1 +河漫 1 +河西 1 +油煙 1 +油片 1 +油田 1 +油菜 1 +油量 1 +油電 1 +治中 1 +治勲 1 +治勳 1 +治喪 1 +治國 1 +治學 1 +治水 1 +治理 1 +治軍 1 +沽渚 1 +沾解 1 +沿線 1 +沿襲 1 +沿途 1 +泊苷 1 +法令 1 +法凱 1 +法師 1 +法政 1 +法斯 1 +法格 1 +法比 1 +法海 1 +法特 1 +法登 1 +法羅 1 +法老 1 +法西 1 +法輪 1 +法迪 1 +泛濫 1 +波利 1 +波包 1 +波卡 1 +波及 1 +波因 1 +波圖 1 +波城 1 +波形 1 +波恩 1 +波折 1 +波普 1 +波森 1 +波爾 1 +波瓦 1 +波的 1 +波羅 1 +波西 1 +波里 1 +波錠 1 +波門 1 +波黑 1 +泥土 1 +泥潭 1 +注資 1 +泰共 1 +泰勒 1 +泰北 1 +泰始 1 +泰州 1 +泰曾 1 +泰琳 1 +泰米 1 +泰興 1 +泳屋 1 +泳灘 1 +洋介 1 +洋坪 1 +洗劫 1 +洗衣 1 +洛伊 1 +洛伐 1 +洛佩 1 +洛加 1 +洛城 1 +洛塞 1 +洛尼 1 +洛布 1 +洛恩 1 +洛書 1 +洛曼 1 +洛洛 1 +洛特 1 +洛珊 1 +洛琳 1 +洛茲 1 +洛蒙 1 +洛雷 1 +洛頓 1 +洞子 1 +洞穴 1 +洞窟 1 +津貼 1 +洩慾 1 +洩漏 1 +洪堡 1 +洪家 1 +洪橋 1 +洵美 1 +活出 1 +活化 1 +活埋 1 +活水 1 +活潑 1 +活現 1 +活用 1 +活躍 1 +活靈 1 +派對 1 +派往 1 +流下 1 +流亡 1 +流入 1 +流出 1 +流嶼 1 +流放 1 +流星 1 +流標 1 +流民 1 +流水 1 +流浪 1 +流產 1 +流程 1 +流言 1 +流逝 1 +流露 1 +浚稽 1 +浦市 1 +浦那 1 +浦鎮 1 +浩文 1 +浩特 1 +浪底 1 +浪漫 1 +浪潮 1 +浪費 1 +浪跡 1 +浮動 1 +浴場 1 +海事 1 +海光 1 +海因 1 +海地 1 +海姆 1 +海峰 1 +海布 1 +海平 1 +海廷 1 +海怡 1 +海昌 1 +海景 1 +海淀 1 +海港 1 +海濱 1 +海灘 1 +海爾 1 +海神 1 +海秀 1 +海老 1 +海航 1 +海藍 1 +海螺 1 +海豐 1 +海陸 1 +海風 1 +海鷗 1 +浸染 1 +浸泡 1 +涅夫 1 +涅托 1 +涅日 1 +涅爾 1 +涅米 1 +涇波 1 +涇陽 1 +消極 1 +消耗 1 +消退 1 +消除 1 +涉世 1 +涉嫌 1 +涉足 1 +涪江 1 +涮煮 1 +液化 1 +液壓 1 +涵蓋 1 +淄川 1 +淑妃 1 +淑怡 1 +淘寶 1 +淘金 1 +淡定 1 +淡色 1 +淨土 1 +淪落 1 +淪陷 1 +淫蕩 1 +淮南 1 +淮許 1 +深受 1 +深埋 1 +深層 1 +深度 1 +深感 1 +深有 1 +深柢 1 +深海 1 +深港 1 +深溪 1 +深紅 1 +深綠 1 +深色 1 +深處 1 +深造 1 +淵源 1 +混亂 1 +混凝 1 +混沌 1 +混為 1 +混燃 1 +淹浸 1 +淺水 1 +淺綠 1 +添丁 1 +清償 1 +清凈 1 +清單 1 +清帝 1 +清拆 1 +清教 1 +清文 1 +清明 1 +清潔 1 +清理 1 +清道 1 +清遠 1 +清還 1 +清鄉 1 +減低 1 +減刑 1 +減小 1 +減退 1 +渠子 1 +渣打 1 +渤海 1 +測繪 1 +渭州 1 +港交 1 +港區 1 +港府 1 +渴求 1 +游標 1 +游說 1 +湄洲 1 +湖上 1 +湖人 1 +湖名 1 +湖畔 1 +湘南 1 +湘西 1 +湘陰 1 +湛恩 1 +湧現 1 +湮滅 1 +湯料 1 +源於 1 +源田 1 +準基 1 +準將 1 +準確 1 +溝壑 1 +溝齒 1 +溢漏 1 +溪峪 1 +溪水 1 +溪美 1 +溪鱂 1 +溫克 1 +溫哥 1 +溫坡 1 +溫徹 1 +溫斯 1 +溫柔 1 +溶劑 1 +溶氣 1 +滑板 1 +滑稽 1 +滑鼠 1 +滕氏 1 +滕費 1 +滙業 1 +滬江 1 +滯洪 1 +滲出 1 +滴下 1 +滾動 1 +滾石 1 +滿意 1 +滿清 1 +滿載 1 +漁村 1 +漁梁 1 +漁船 1 +漂浮 1 +漆器 1 +演成 1 +演戲 1 +演技 1 +演繹 1 +演義 1 +演講 1 +漢中 1 +漢姆 1 +漢娜 1 +漢字 1 +漢桓 1 +漫漶 1 +漫長 1 +漱芳 1 +漲幅 1 +漸變 1 +漸趨 1 +潔瑩 1 +潘丘 1 +潘恩 1 +潘迪 1 +潛伏 1 +潛力 1 +潛望 1 +潛水 1 +潛游 1 +潟湖 1 +潢川 1 +潭尾 1 +潭村 1 +潭東 1 +潭陽 1 +潮蟹 1 +潰散 1 +澀谷 1 +澤尻 1 +澤爾 1 +激勵 1 +激發 1 +激素 1 +激進 1 +濃厚 1 +濃煙 1 +濕地 1 +濟世 1 +濟亞 1 +濟科 1 +濟邦 1 +濟鼐 1 +濫用 1 +濱海 1 +濾掉 1 +瀏陽 1 +瀕危 1 +瀘溪 1 +瀝泗 1 +瀟洒 1 +火上 1 +火不 1 +火候 1 +火喉 1 +火山 1 +火心 1 +火掌 1 +火炮 1 +火爆 1 +火鍋 1 +灰棕 1 +灰雲 1 +灰黑 1 +災禍 1 +炎熱 1 +炙手 1 +炭疽 1 +炸彈 1 +炸死 1 +炸毀 1 +炸糕 1 +為一 1 +為二 1 +為力 1 +為時 1 +為然 1 +為零 1 +烈格 1 +烏代 1 +烏來 1 +烏宗 1 +烏干 1 +烏德 1 +烏拉 1 +烏普 1 +烏腳 1 +烏魯 1 +烷基 1 +烹煮 1 +焊接 1 +焗豆 1 +焚屍 1 +焚燒 1 +焜耀 1 +無二 1 +無俚 1 +無分 1 +無危 1 +無厭 1 +無子 1 +無家 1 +無幾 1 +無心 1 +無忌 1 +無所 1 +無暇 1 +無有 1 +無機 1 +無氧 1 +無水 1 +無派 1 +無產 1 +無疑 1 +無盡 1 +無罪 1 +無聊 1 +無能 1 +無與 1 +無色 1 +無處 1 +無視 1 +無誤 1 +無過 1 +無量 1 +無限 1 +無雙 1 +無頭 1 +無點 1 +無齒 1 +焦尼 1 +焦點 1 +然不 1 +煉油 1 +煉金 1 +煙囪 1 +煙槍 1 +煙霧 1 +煜全 1 +煤建 1 +煤氣 1 +照射 1 +煮食 1 +煽動 1 +熄匙 1 +熊族 1 +熊本 1 +熊隊 1 +熏烤 1 +熏陶 1 +熔化 1 +熔岩 1 +熟知 1 +熟釜 1 +熱值 1 +熱刺 1 +熱力 1 +熱夫 1 +熱心 1 +熱愛 1 +熱羅 1 +熱身 1 +熱量 1 +熱電 1 +熱鬧 1 +熾熱 1 +燃氣 1 +燈謎 1 +燒灼 1 +燒荒 1 +燕窩 1 +營口 1 +營團 1 +營地 1 +營寨 1 +營帳 1 +營火 1 +營盤 1 +營造 1 +營長 1 +營養 1 +燦爛 1 +燭光 1 +爪部 1 +爪龍 1 +爬到 1 +爬山 1 +爬梯 1 +爭冠 1 +爭占 1 +爭吵 1 +爭奪 1 +爭寵 1 +爭得 1 +爭界 1 +爭相 1 +爭端 1 +爭競 1 +爭論 1 +爭鬥 1 +父風 1 +爸爸 1 +爺爺 1 +爽文 1 +爾他 1 +爾南 1 +爾吉 1 +爾地 1 +爾基 1 +爾塔 1 +爾布 1 +爾帕 1 +爾恩 1 +爾恰 1 +爾拉 1 +爾摩 1 +爾斐 1 +爾普 1 +爾格 1 +爾歇 1 +爾比 1 +爾汗 1 +爾法 1 +爾溫 1 +爾炘 1 +爾瑪 1 +爾瓦 1 +爾發 1 +爾皮 1 +爾納 1 +爾紐 1 +爾蒙 1 +爾蘇 1 +爾虎 1 +爾諾 1 +爾貝 1 +爾辛 1 +爾達 1 +爾金 1 +爾頓 1 +爾高 1 +爾默 1 +爾齊 1 +牆上 1 +牆身 1 +牆面 1 +片劑 1 +片尾 1 +片斷 1 +片頭 1 +版主 1 +版畫 1 +牌照 1 +牙喙 1 +牙因 1 +牙籤 1 +牙線 1 +牙薩 1 +牙醫 1 +牛斯 1 +牛池 1 +牛潭 1 +牛石 1 +牛花 1 +牛首 1 +牛鼻 1 +牟利 1 +牟合 1 +牟尼 1 +牡蠣 1 +牧區 1 +牧民 1 +牧谷 1 +物件 1 +物產 1 +物象 1 +物鏡 1 +物阜 1 +牲畜 1 +特伊 1 +特伯 1 +特佛 1 +特備 1 +特優 1 +特凱 1 +特利 1 +特務 1 +特勞 1 +特區 1 +特夸 1 +特奇 1 +特威 1 +特尼 1 +特工 1 +特律 1 +特德 1 +特快 1 +特意 1 +特攝 1 +特普 1 +特曼 1 +特森 1 +特洛 1 +特派 1 +特瓦 1 +特產 1 +特異 1 +特福 1 +特米 1 +特菲 1 +特萊 1 +特重 1 +特隆 1 +特雷 1 +特魯 1 +牽引 1 +牽牛 1 +犧牲 1 +犬科 1 +犬種 1 +犬髖 1 +犯人 1 +狂亂 1 +狄克 1 +狄刻 1 +狄拉 1 +狐庸 1 +狡猾 1 +狹小 1 +狼人 1 +狼堡 1 +狼影 1 +狼群 1 +猜忌 1 +猜想 1 +猝死 1 +猴年 1 +猴群 1 +猶大 1 +獅子 1 +獎牌 1 +獎盃 1 +獨一 1 +獨具 1 +獨唱 1 +獨孤 1 +獨家 1 +獨有 1 +獨眠 1 +獨行 1 +獨資 1 +獲准 1 +獲判 1 +獲勳 1 +獲召 1 +獲悉 1 +獲授 1 +獲獎 1 +獲益 1 +獲薦 1 +獲選 1 +獲頒 1 +獵物 1 +獸人 1 +獸族 1 +獻上 1 +獻堂 1 +獻策 1 +獻議 1 +玄天 1 +玄宗 1 +玄武 1 +玄策 1 +玄貓 1 +玉柴 1 +玉純 1 +玉魔 1 +玉鳳 1 +玉麟 1 +王儲 1 +王冠 1 +王墓 1 +王宮 1 +王座 1 +王爾 1 +王蓮 1 +玩伴 1 +玩弄 1 +玩法 1 +玩笑 1 +玫瑰 1 +玲玲 1 +玷染 1 +珍寶 1 +珠璣 1 +珠鋼 1 +班克 1 +班卓 1 +班子 1 +班布 1 +班機 1 +班次 1 +班禪 1 +班級 1 +班讓 1 +現役 1 +現身 1 +球壇 1 +球差 1 +球星 1 +球根 1 +球狀 1 +球道 1 +球面 1 +理性 1 +理曼 1 +理由 1 +琉古 1 +琉西 1 +琴弓 1 +琺琅 1 +瑙恩 1 +瑜伽 1 +瑜陀 1 +瑞坦 1 +瑞安 1 +瑞拉 1 +瑞普 1 +瑞欽 1 +瑞爾 1 +瑞阿 1 +瑞霖 1 +瑟洛 1 +瑟芬 1 +瑣法 1 +瑪君 1 +瑪哈 1 +瑪喀 1 +瑪莎 1 +瑪諾 1 +環保 1 +環帶 1 +環狀 1 +環節 1 +環繞 1 +瓊斯 1 +瓊珊 1 +瓊茲 1 +瓜里 1 +瓦內 1 +瓦卡 1 +瓦史 1 +瓦坦 1 +瓦多 1 +瓦尼 1 +瓦德 1 +瓦本 1 +瓦桑 1 +瓦涅 1 +瓦瓦 1 +瓦納 1 +瓦蒂 1 +瓦薩 1 +瓦解 1 +瓦里 1 +甄別 1 +甘共 1 +甘斯 1 +甘油 1 +甘草 1 +甘馬 1 +甚厚 1 +甚嚴 1 +甚多 1 +甚小 1 +甚深 1 +甚篤 1 +甜兒 1 +甜度 1 +生主 1 +生出 1 +生動 1 +生天 1 +生子 1 +生平 1 +生性 1 +生效 1 +生機 1 +生殺 1 +生氣 1 +生火 1 +生肖 1 +生財 1 +生還 1 +產出 1 +產經 1 +甥女 1 +甦醒 1 +用人 1 +用來 1 +用光 1 +用兵 1 +用字 1 +用完 1 +用手 1 +用有 1 +用水 1 +用藥 1 +用計 1 +用詞 1 +田園 1 +田地 1 +田心 1 +田急 1 +田納 1 +田谷 1 +田野 1 +田頭 1 +甲山 1 +甲殼 1 +申辦 1 +男人 1 +男士 1 +男嬰 1 +男方 1 +男童 1 +界定 1 +界限 1 +留傳 1 +留哥 1 +留待 1 +留空 1 +留聲 1 +留良 1 +畜牧 1 +畜養 1 +畢打 1 +畢氏 1 +畢蘭 1 +畢馬 1 +略帶 1 +略有 1 +略為 1 +畫下 1 +畫中 1 +畫分 1 +畫會 1 +畫畫 1 +畫面 1 +異事 1 +異姓 1 +異度 1 +異形 1 +異曲 1 +異母 1 +異端 1 +當上 1 +當下 1 +當值 1 +當勞 1 +當官 1 +當屆 1 +當政 1 +當斯 1 +當晚 1 +當期 1 +當歸 1 +當面 1 +疆域 1 +疏浚 1 +疏遠 1 +疑點 1 +疙瘩 1 +疲勞 1 +疲弱 1 +疼痛 1 +疾首 1 +病原 1 +病患 1 +病情 1 +病歷 1 +病死 1 +病重 1 +症候 1 +症狀 1 +痕跡 1 +痙攣 1 +痛心 1 +痛欲 1 +痢疾 1 +瘧疾 1 +癥狀 1 +登丹 1 +登尼 1 +發佈 1 +發作 1 +發兵 1 +發呆 1 +發奮 1 +發拉 1 +發揚 1 +發改 1 +發放 1 +發洩 1 +發炎 1 +發燒 1 +發牌 1 +發球 1 +發病 1 +發聲 1 +發財 1 +發車 1 +發配 1 +白丁 1 +白井 1 +白公 1 +白利 1 +白化 1 +白堊 1 +白天 1 +白宮 1 +白砂 1 +白蓮 1 +白蛇 1 +白質 1 +白軍 1 +白銅 1 +白陵 1 +白雲 1 +白面 1 +白頸 1 +白鹿 1 +白麗 1 +百事 1 +百五 1 +百代 1 +百億 1 +百兆 1 +百六 1 +百帕 1 +百幾 1 +百廢 1 +百濟 1 +百無 1 +百老 1 +百花 1 +百計 1 +百貨 1 +百鳴 1 +的士 1 +的尼 1 +的斯 1 +的確 1 +的黎 1 +皇位 1 +皇冠 1 +皇城 1 +皇太 1 +皇妃 1 +皇廷 1 +皇權 1 +皇發 1 +皈依 1 +皓若 1 +皮亞 1 +皮克 1 +皮內 1 +皮奇 1 +皮奧 1 +皮杜 1 +皮耶 1 +皮雅 1 +皰疹 1 +盆地 1 +盈盈 1 +益友 1 +益城 1 +益壽 1 +益新 1 +益處 1 +盔甲 1 +盛事 1 +盛大 1 +盛妝 1 +盛揮 1 +盛產 1 +盛行 1 +盜用 1 +盟軍 1 +盡到 1 +盡喪 1 +盡情 1 +盡糧 1 +盡頭 1 +監工 1 +監控 1 +監測 1 +監禁 1 +監聽 1 +盤踞 1 +盧加 1 +盧普 1 +盧溝 1 +盧瓦 1 +盧甘 1 +盧福 1 +目相 1 +目睹 1 +目鏡 1 +直勉 1 +直屬 1 +直覺 1 +直言 1 +直說 1 +直間 1 +相位 1 +相大 1 +相容 1 +相差 1 +相悖 1 +相應 1 +相挺 1 +相異 1 +相看 1 +相稱 1 +相約 1 +相繼 1 +相聲 1 +相若 1 +相處 1 +相見 1 +相較 1 +相通 1 +相速 1 +相鄰 1 +相間 1 +盾座 1 +盾系 1 +省務 1 +省思 1 +省油 1 +眈眈 1 +眉山 1 +眉弄 1 +看中 1 +看出 1 +看台 1 +看得 1 +看看 1 +看管 1 +看見 1 +看透 1 +看重 1 +真光 1 +真北 1 +真名 1 +真好 1 +真希 1 +真木 1 +真核 1 +眯眼 1 +眷村 1 +眼下 1 +眼淚 1 +眼狀 1 +眼球 1 +眼皮 1 +眼神 1 +眾經 1 +眾說 1 +睡眠 1 +睡覺 1 +督撫 1 +睾丁 1 +睿智 1 +瞪羚 1 +瞬時 1 +瞭如 1 +矗立 1 +矢口 1 +知府 1 +知曉 1 +知留 1 +知足 1 +短少 1 +短期 1 +短毛 1 +短草 1 +短裙 1 +短詩 1 +短語 1 +短音 1 +短髮 1 +矮星 1 +石像 1 +石器 1 +石塊 1 +石材 1 +石湖 1 +石灰 1 +石牆 1 +石牌 1 +石華 1 +石頭 1 +砂拉 1 +砂漿 1 +砂紙 1 +砍伐 1 +砒霜 1 +研磨 1 +砝碼 1 +破損 1 +破滅 1 +破舊 1 +破落 1 +硝庫 1 +硝酸 1 +硫酸 1 +硬幣 1 +碑亭 1 +碑刻 1 +碧嘉 1 +碧波 1 +碧琴 1 +碰撞 1 +碳紙 1 +碳酸 1 +確知 1 +確診 1 +磁性 1 +磐田 1 +磚室 1 +磨坊 1 +磨折 1 +磨槽 1 +磷化 1 +磷素 1 +磷酸 1 +礦場 1 +礦物 1 +礦石 1 +礦藏 1 +示人 1 +示愛 1 +社皮 1 +社論 1 +社長 1 +祁鏞 1 +祈願 1 +祐希 1 +祖上 1 +祖圭 1 +祖宗 1 +祖籍 1 +祖魯 1 +神仙 1 +神偷 1 +神器 1 +神明 1 +神殿 1 +神社 1 +神策 1 +神籤 1 +神魔 1 +祥子 1 +票據 1 +票數 1 +祭司 1 +祭壇 1 +祭師 1 +祭物 1 +祭祀 1 +祭酒 1 +祿勸 1 +祿山 1 +禁煙 1 +禁用 1 +禁藥 1 +禁賽 1 +福克 1 +福安 1 +福康 1 +福德 1 +福慧 1 +福池 1 +福清 1 +福瓦 1 +禪師 1 +禮堂 1 +禮濤 1 +禮炮 1 +禮物 1 +禱文 1 +禽流 1 +秀實 1 +秀康 1 +秀怡 1 +秀珠 1 +私下 1 +私交 1 +私奔 1 +私宅 1 +私家 1 +私立 1 +私財 1 +秉國 1 +秋人 1 +秋季 1 +秋山 1 +秋爽 1 +秋興 1 +秋香 1 +科他 1 +科伊 1 +科多 1 +科屬 1 +科德 1 +科恩 1 +科教 1 +科朗 1 +科目 1 +科維 1 +秘指 1 +秘果 1 +租予 1 +租務 1 +租地 1 +租戶 1 +租用 1 +秦城 1 +秦州 1 +秦晉 1 +秦朝 1 +秦石 1 +秩序 1 +移交 1 +移往 1 +移植 1 +移至 1 +移送 1 +稀釋 1 +稅項 1 +稍為 1 +稗官 1 +種內 1 +種名 1 +種子 1 +種屬 1 +稱海 1 +稱病 1 +稱銜 1 +稻子 1 +稻草 1 +稼祥 1 +穀物 1 +穆宗 1 +穆拉 1 +穆薩 1 +積山 1 +積良 1 +穩固 1 +穩妥 1 +究底 1 +究竟 1 +穹哇 1 +空出 1 +空前 1 +空名 1 +空客 1 +空戰 1 +空隙 1 +空難 1 +穿幫 1 +穿戴 1 +穿甲 1 +穿行 1 +穿過 1 +突尼 1 +突感 1 +突現 1 +窄袖 1 +窗口 1 +窗外 1 +窘境 1 +窟檐 1 +窮苦 1 +窮追 1 +窯洞 1 +竄紅 1 +竊聽 1 +立交 1 +立國 1 +立村 1 +立營 1 +立花 1 +立蒙 1 +立面 1 +立體 1 +站內 1 +站名 1 +站坪 1 +站廳 1 +站點 1 +章回 1 +章斐 1 +童女 1 +童男 1 +端川 1 +競相 1 +竹器 1 +竹治 1 +竹溪 1 +竹片 1 +符桐 1 +第廿 1 +第比 1 +第谷 1 +笳冬 1 +等位 1 +等客 1 +等號 1 +筐仔 1 +筒狀 1 +答應 1 +答那 1 +策軍 1 +算出 1 +算術 1 +管制 1 +管子 1 +箬松 1 +箱型 1 +箴言 1 +節度 1 +節節 1 +範疇 1 +篇累 1 +篡位 1 +篡國 1 +篡地 1 +簡化 1 +簡約 1 +簡訊 1 +簽名 1 +簽定 1 +簽認 1 +簽證 1 +簽賬 1 +簿公 1 +籃筐 1 +籌伯 1 +籌備 1 +籌措 1 +籌款 1 +籌資 1 +籌辦 1 +籍貫 1 +籠式 1 +籠草 1 +米內 1 +米加 1 +米南 1 +米古 1 +米哈 1 +米思 1 +米沙 1 +米洛 1 +米烏 1 +米琳 1 +米線 1 +米酒 1 +米高 1 +粉碎 1 +粉紅 1 +粉絲 1 +粒體 1 +粗壯 1 +粗鱗 1 +粵明 1 +粽子 1 +精力 1 +精子 1 +精密 1 +精心 1 +精湛 1 +精算 1 +精索 1 +精蓄 1 +精裝 1 +糖尿 1 +糖蒜 1 +糟糕 1 +糧儲 1 +糧絕 1 +糧餉 1 +系數 1 +糾正 1 +糾紛 1 +紀元 1 +約尼 1 +約拉 1 +約熱 1 +約長 1 +紅旗 1 +紅日 1 +紅杏 1 +紅樹 1 +紅玉 1 +紅磨 1 +紅茶 1 +紅襪 1 +紅遍 1 +紅酒 1 +紅點 1 +紋路 1 +紋飾 1 +納克 1 +納入 1 +納加 1 +納哥 1 +納塔 1 +納多 1 +納夫 1 +納巴 1 +納波 1 +納澤 1 +納特 1 +納瓦 1 +納蘇 1 +納西 1 +納雷 1 +紐國 1 +紐斯 1 +紐澤 1 +紐芬 1 +紐華 1 +紐黑 1 +純一 1 +純凈 1 +純樸 1 +純陽 1 +紙上 1 +紙條 1 +紙盒 1 +級數 1 +紛紜 1 +素包 1 +素食 1 +素餡 1 +索不 1 +索倫 1 +索尼 1 +索居 1 +索比 1 +索洛 1 +索溪 1 +索爾 1 +索維 1 +索西 1 +索賠 1 +索頜 1 +紮實 1 +累牘 1 +累計 1 +細岡 1 +細窄 1 +細菌 1 +細部 1 +細長 1 +紳士 1 +紹儀 1 +紹榮 1 +紺三 1 +終審 1 +終身 1 +組件 1 +組像 1 +組別 1 +組口 1 +組態 1 +組隊 1 +結交 1 +結冰 1 +結尾 1 +結雅 1 +絕壁 1 +絕大 1 +絕後 1 +絕版 1 +絕罰 1 +絞刑 1 +絞死 1 +絞痛 1 +給定 1 +給職 1 +給藥 1 +給體 1 +統帥 1 +統籌 1 +絲山 1 +絲帶 1 +綏遠 1 +經國 1 +經意 1 +經文 1 +經昌 1 +經期 1 +經由 1 +經界 1 +綜理 1 +綜錄 1 +綠化 1 +綠帶 1 +綠滙 1 +綠燈 1 +綠社 1 +綠黨 1 +維健 1 +維勒 1 +維匯 1 +維塔 1 +維希 1 +維德 1 +維拿 1 +維斯 1 +維景 1 +維生 1 +維祀 1 +維羅 1 +維茲 1 +維西 1 +維記 1 +維護 1 +綱領 1 +網址 1 +網易 1 +網線 1 +網購 1 +綺塍 1 +綺色 1 +綽號 1 +綿羊 1 +緊張 1 +緊緊 1 +緊要 1 +緊貼 1 +緊逼 1 +緊閉 1 +線上 1 +線前 1 +線度 1 +線條 1 +線索 1 +線道 1 +締造 1 +編上 1 +編導 1 +編程 1 +編篡 1 +編繪 1 +編纂 1 +編者 1 +編腔 1 +編隊 1 +緩衝 1 +緩解 1 +緩鬢 1 +緩龍 1 +緯來 1 +練兵 1 +縣市 1 +縣裡 1 +縫製 1 +縮寫 1 +縮小 1 +縱使 1 +縱觀 1 +縱隊 1 +總區 1 +總和 1 +總局 1 +總站 1 +總行 1 +總裁 1 +總計 1 +總辦 1 +績效 1 +繁多 1 +繁瑣 1 +繁盛 1 +繁雜 1 +繁體 1 +織胺 1 +繞境 1 +繞開 1 +繩架 1 +繳付 1 +繳納 1 +繼業 1 +繼科 1 +繽紛 1 +續航 1 +續部 1 +纏足 1 +纜車 1 +缺口 1 +缺失 1 +缺少 1 +缺氧 1 +缺血 1 +罕有 1 +罪惡 1 +置有 1 +置物 1 +罰則 1 +署理 1 +罵聲 1 +罷免 1 +罷工 1 +罹癌 1 +罹難 1 +羅乞 1 +羅什 1 +羅來 1 +羅先 1 +羅加 1 +羅培 1 +羅姆 1 +羅巴 1 +羅德 1 +羅恩 1 +羅拔 1 +羅提 1 +羅曼 1 +羅柔 1 +羅森 1 +羅洛 1 +羅涅 1 +羅納 1 +羅索 1 +羅維 1 +羅費 1 +羅迪 1 +羅里 1 +羅隆 1 +羊圈 1 +羊犬 1 +美味 1 +美孚 1 +美寶 1 +美幸 1 +美擬 1 +美林 1 +美爾 1 +美特 1 +美琴 1 +美知 1 +美稱 1 +美索 1 +美聯 1 +美聲 1 +美薇 1 +美術 1 +美西 1 +美觀 1 +美譽 1 +美里 1 +美頓 1 +美食 1 +羚羊 1 +羞恥 1 +群峰 1 +群族 1 +群索 1 +群組 1 +群落 1 +群速 1 +群雄 1 +群體 1 +羨慕 1 +義久 1 +義安 1 +義工 1 +義弘 1 +義春 1 +義民 1 +義父 1 +義項 1 +羯羅 1 +羱羊 1 +羽田 1 +羽絨 1 +翌日 1 +習經 1 +翔麟 1 +翠鳥 1 +翰內 1 +翰麥 1 +翻覆 1 +翼手 1 +耀樞 1 +耀武 1 +耀邦 1 +老人 1 +老匯 1 +老名 1 +老大 1 +老套 1 +老婦 1 +老將 1 +老少 1 +老弱 1 +老橋 1 +老漢 1 +考上 1 +考夫 1 +考尼 1 +考柯 1 +考牙 1 +考生 1 +考究 1 +考績 1 +考進 1 +考選 1 +而三 1 +而代 1 +而再 1 +而出 1 +而已 1 +而復 1 +而至 1 +耐受 1 +耐庵 1 +耐玩 1 +耐航 1 +耳光 1 +耳勺 1 +耳孔 1 +耳忒 1 +耳朵 1 +耳珠 1 +耳環 1 +耳癤 1 +耳蝸 1 +耳門 1 +耳骨 1 +耶特 1 +耶索 1 +耶路 1 +耽擱 1 +聆聽 1 +聊賴 1 +聖人 1 +聖保 1 +聖克 1 +聖名 1 +聖彌 1 +聖彼 1 +聖徒 1 +聖拉 1 +聖歌 1 +聖水 1 +聖求 1 +聖潔 1 +聖祖 1 +聖神 1 +聖經 1 +聖訓 1 +聖路 1 +聖體 1 +聘問 1 +聘用 1 +聚氯 1 +聚禮 1 +聚苯 1 +聚變 1 +聚體 1 +聞名 1 +聞言 1 +聯姻 1 +聯播 1 +聯江 1 +聯浦 1 +聯產 1 +聯美 1 +聯酋 1 +聰敏 1 +聲恆 1 +聲援 1 +聲波 1 +聲谷 1 +聲門 1 +聲音 1 +聶丞 1 +職棒 1 +聽到 1 +聽命 1 +聽從 1 +聽眾 1 +聽聞 1 +聾人 1 +肅宗 1 +肆意 1 +肉夾 1 +肉湯 1 +肉瘤 1 +肉緊 1 +肌肉 1 +肖嚴 1 +肚臍 1 +肚餓 1 +股市 1 +股本 1 +肥牛 1 +肥田 1 +肥胖 1 +肯亞 1 +肯特 1 +育有 1 +育樂 1 +育空 1 +肺病 1 +胃石 1 +背上 1 +背依 1 +背包 1 +背叛 1 +背後 1 +背靠 1 +背面 1 +背鰭 1 +胚胎 1 +胞弟 1 +胡德 1 +胡特 1 +胡禮 1 +胡蜂 1 +胡馬 1 +胸痛 1 +胸管 1 +胸部 1 +胸鰭 1 +能人 1 +能否 1 +能幹 1 +能為 1 +脊椎 1 +脫疽 1 +脫落 1 +脫隊 1 +脫離 1 +脱口 1 +脾氣 1 +腐敗 1 +腐蝕 1 +腓力 1 +腔蛇 1 +腫瘤 1 +腳掌 1 +腳本 1 +腳點 1 +腸胃 1 +腸道 1 +腸骨 1 +腿部 1 +膝傷 1 +膝頭 1 +膠州 1 +膠東 1 +膠澳 1 +膠體 1 +膨脹 1 +膽酸 1 +臉頰 1 +臉龐 1 +臥兒 1 +臥龍 1 +臨榆 1 +臨終 1 +臨高 1 +自作 1 +自保 1 +自信 1 +自卑 1 +自受 1 +自在 1 +自學 1 +自帶 1 +自強 1 +自從 1 +自成 1 +自用 1 +自發 1 +自禁 1 +自製 1 +自訂 1 +自負 1 +自賞 1 +自辦 1 +至上 1 +至善 1 +至是 1 +至柔 1 +至正 1 +至死 1 +致使 1 +致函 1 +致恐 1 +致病 1 +致瘋 1 +致癌 1 +臺大 1 +舀出 1 +舅父 1 +與倫 1 +與姆 1 +與願 1 +興國 1 +興學 1 +興業 1 +興海 1 +興祖 1 +舉世 1 +舉例 1 +舉國 1 +舉止 1 +舉薦 1 +舉起 1 +舊友 1 +舊屋 1 +舊時 1 +舊稱 1 +舊部 1 +舊金 1 +舌劍 1 +舌頭 1 +舍爾 1 +舍訥 1 +舒查 1 +舒爾 1 +舜初 1 +舞劇 1 +舞陽 1 +航天 1 +航站 1 +般若 1 +船塢 1 +船山 1 +船業 1 +船體 1 +艦身 1 +良師 1 +良心 1 +良性 1 +良新 1 +良田 1 +良知 1 +艱巨 1 +色佳 1 +色布 1 +色帶 1 +色情 1 +色目 1 +色調 1 +色龍 1 +艷姬 1 +艷麗 1 +艾伍 1 +艾倫 1 +艾利 1 +艾因 1 +艾夏 1 +艾崔 1 +艾巴 1 +艾度 1 +艾琳 1 +艾瑞 1 +艾瑪 1 +艾登 1 +艾美 1 +艾蓮 1 +艾薩 1 +艾迴 1 +艾雲 1 +艾麗 1 +芬妮 1 +芬華 1 +芬迪 1 +芭蕉 1 +芭黎 1 +花上 1 +花俏 1 +花坮 1 +花城 1 +花店 1 +花旗 1 +花月 1 +花果 1 +花枝 1 +花瓶 1 +花甲 1 +花蜜 1 +花鞋 1 +花齊 1 +芳自 1 +苗栗 1 +苗穗 1 +苟且 1 +若愚 1 +若羌 1 +若英 1 +苦力 1 +苦悶 1 +苦情 1 +苦苣 1 +苦讀 1 +苯並 1 +英一 1 +英乙 1 +英倫 1 +英傑 1 +英勇 1 +英吋 1 +英寸 1 +英年 1 +英廷 1 +英男 1 +英額 1 +英麗 1 +英龍 1 +茂名 1 +范恩 1 +茄南 1 +茄芮 1 +茅家 1 +茨卡 1 +茨海 1 +茨科 1 +茨門 1 +茲堡 1 +茲海 1 +茲羅 1 +茲與 1 +茲薇 1 +茲貝 1 +茵蘭 1 +茶樓 1 +茶湯 1 +茶館 1 +茸切 1 +茸穹 1 +荃灣 1 +荃麟 1 +草原 1 +草地 1 +草坪 1 +草席 1 +草稿 1 +荊州 1 +荒地 1 +荒蕪 1 +荒誕 1 +荔灣 1 +荷林 1 +荷爾 1 +荷銀 1 +莉亞 1 +莉安 1 +莊嚴 1 +莊王 1 +莎尼 1 +莎樂 1 +莫吉 1 +莫埃 1 +莫尼 1 +莫扎 1 +莫札 1 +莫桑 1 +莫瑙 1 +莫瓦 1 +莫納 1 +莫臥 1 +莫過 1 +莫里 1 +莫鱷 1 +莽山 1 +菊花 1 +華倫 1 +華克 1 +華少 1 +華新 1 +華族 1 +華林 1 +華爾 1 +華界 1 +華石 1 +華秀 1 +華納 1 +華絲 1 +華西 1 +華頓 1 +菲亞 1 +菲力 1 +菲國 1 +菲萊 1 +菲詩 1 +菸害 1 +萊利 1 +萊博 1 +萊因 1 +萊夫 1 +萊希 1 +萊德 1 +萊斯 1 +萊明 1 +萊曼 1 +萊蕪 1 +萊采 1 +萊默 1 +萌芽 1 +萎縮 1 +萬一 1 +萬三 1 +萬丹 1 +萬億 1 +萬多 1 +萬貴 1 +落下 1 +落千 1 +落實 1 +落敗 1 +落葉 1 +葆玖 1 +葉利 1 +葉士 1 +葉序 1 +葉綠 1 +葉魚 1 +著手 1 +著有 1 +著譯 1 +葛力 1 +葛朱 1 +葛浩 1 +葛羅 1 +葛蕾 1 +葛量 1 +葡超 1 +葫蘆 1 +葬禮 1 +葵青 1 +蒂利 1 +蒂娜 1 +蒂洛 1 +蒂爾 1 +蒂迦 1 +蒙丹 1 +蒙卡 1 +蒙塔 1 +蒙巴 1 +蒙得 1 +蒙羞 1 +蒙面 1 +蒙馬 1 +蒲飛 1 +蒸氣 1 +蒸發 1 +蒼白 1 +蓄水 1 +蓄銳 1 +蓋兒 1 +蓋因 1 +蓋多 1 +蓋曼 1 +蓋朗 1 +蓋爾 1 +蓋頂 1 +蓓天 1 +蓬塔 1 +蓬拉 1 +蓬皮 1 +蓮娜 1 +蓮安 1 +蓮花 1 +蔑稱 1 +蔡斯 1 +蔣公 1 +蔥蝸 1 +蕙嫻 1 +蕨類 1 +蕩漾 1 +蕾妮 1 +蕾絲 1 +薄弱 1 +薄扶 1 +薛慶 1 +薩凡 1 +薩卡 1 +薩平 1 +薩德 1 +薩瑞 1 +薩諸 1 +薩諾 1 +薩迪 1 +薩馬 1 +薪俸 1 +薪嘗 1 +藉助 1 +藉此 1 +藍儂 1 +藍寶 1 +藍尼 1 +藍本 1 +藍欽 1 +藍潟 1 +藍灰 1 +藍田 1 +藍白 1 +藍背 1 +藍邊 1 +藍領 1 +藍黨 1 +藏之 1 +藏寶 1 +藏有 1 +藝名 1 +藝能 1 +藝謀 1 +藝電 1 +藤原 1 +藤木 1 +藤本 1 +藤村 1 +藤枝 1 +藤藝 1 +藥品 1 +藥師 1 +藥材 1 +藥水 1 +藥石 1 +藩主 1 +藩士 1 +藩市 1 +藩西 1 +蘇利 1 +蘇北 1 +蘇尋 1 +蘇斯 1 +蘇木 1 +蘇美 1 +蘇萊 1 +蘇達 1 +蘇醒 1 +蘇魯 1 +蘊藏 1 +蘭利 1 +蘭堡 1 +蘭多 1 +蘭大 1 +蘭封 1 +蘭尼 1 +蘭弗 1 +蘭登 1 +虎式 1 +虎棒 1 +虎翼 1 +虎視 1 +虔信 1 +處之 1 +處女 1 +處決 1 +處置 1 +處長 1 +虛弱 1 +虛榮 1 +虛無 1 +號吾 1 +號子 1 +號稱 1 +號誌 1 +虢國 1 +虹橋 1 +蚊類 1 +蚩尤 1 +蛇油 1 +蛇種 1 +蛇魔 1 +蜂擁 1 +蜂蜜 1 +蜆殼 1 +蜚聲 1 +蜥蜴 1 +蜿蜒 1 +蝴蝶 1 +蝸牛 1 +融入 1 +融化 1 +融和 1 +融雪 1 +螞蟻 1 +螢幕 1 +蟬聯 1 +蟲洞 1 +蠟浸 1 +蠶院 1 +蠻子 1 +血型 1 +血液 1 +血竭 1 +血管 1 +血腥 1 +行人 1 +行使 1 +行列 1 +行各 1 +行將 1 +行用 1 +行禮 1 +行長 1 +行騙 1 +街上 1 +街名 1 +街小 1 +街市 1 +街路 1 +街頭 1 +衛理 1 +衝動 1 +衝鋒 1 +衡量 1 +衢山 1 +衣冠 1 +衣物 1 +衣索 1 +表型 1 +表妹 1 +表姐 1 +表徵 1 +表情 1 +表態 1 +表揚 1 +表格 1 +表決 1 +表白 1 +表述 1 +衰敗 1 +衰落 1 +袖手 1 +袖箭 1 +被告 1 +被子 1 +裁決 1 +裁減 1 +裂縫 1 +裂變 1 +裋褐 1 +裕智 1 +裕軍 1 +裙子 1 +補償 1 +補天 1 +補教 1 +補時 1 +補褂 1 +裝修 1 +裝備 1 +裝嵌 1 +裝有 1 +裝瓶 1 +裝葯 1 +裝設 1 +裝載 1 +裡斯 1 +裴林 1 +裸子 1 +裸照 1 +製備 1 +製得 1 +複數 1 +褐色 1 +褪色 1 +褲子 1 +褲袋 1 +襄助 1 +襄王 1 +襄陽 1 +襟見 1 +襲封 1 +西京 1 +西佗 1 +西利 1 +西卡 1 +西向 1 +西周 1 +西哈 1 +西坑 1 +西域 1 +西夏 1 +西奇 1 +西宮 1 +西尼 1 +西岸 1 +西島 1 +西廠 1 +西式 1 +西弗 1 +西拉 1 +西晉 1 +西段 1 +西比 1 +西河 1 +西漢 1 +西爾 1 +西甌 1 +西米 1 +西絲 1 +西線 1 +西美 1 +西翼 1 +西蒙 1 +西薩 1 +西距 1 +西鄉 1 +西醫 1 +西里 1 +要是 1 +要脅 1 +要衝 1 +要道 1 +見人 1 +見稱 1 +見聞 1 +見肘 1 +見解 1 +見識 1 +見鍾 1 +見長 1 +規例 1 +覓食 1 +視乎 1 +視作 1 +視圖 1 +視眈 1 +視角 1 +親人 1 +親信 1 +親政 1 +親朋 1 +親筆 1 +親臨 1 +親身 1 +覺察 1 +觀光 1 +觀察 1 +觀念 1 +觀戰 1 +觀望 1 +觀看 1 +觀者 1 +角膜 1 +解僱 1 +解夢 1 +解析 1 +解答 1 +解職 1 +解脫 1 +解說 1 +觸怒 1 +觸手 1 +觸覺 1 +觸診 1 +言官 1 +言語 1 +言辭 1 +言閒 1 +訂位 1 +訃告 1 +訄書 1 +訇開 1 +計其 1 +計委 1 +計謀 1 +討逆 1 +託泊 1 +記念 1 +記述 1 +記集 1 +訥費 1 +設站 1 +許昌 1 +許願 1 +訴求 1 +訴諸 1 +註明 1 +註銷 1 +詐死 1 +詔書 1 +評出 1 +評判 1 +評鑑 1 +詛咒 1 +詞幹 1 +詞義 1 +詢問 1 +試劑 1 +試播 1 +試種 1 +試製 1 +試音 1 +試飛 1 +詩文 1 +該事 1 +該人 1 +該墓 1 +該島 1 +該年 1 +該批 1 +該族 1 +該會 1 +該條 1 +該段 1 +該科 1 +該系 1 +該處 1 +該路 1 +該黨 1 +詳情 1 +詳細 1 +詼諧 1 +誇德 1 +誇祖 1 +誌家 1 +認一 1 +認同 1 +認定 1 +認罪 1 +認證 1 +認輔 1 +誓言 1 +誕下 1 +誕不 1 +誘因 1 +語文 1 +語法 1 +語流 1 +語訓 1 +語調 1 +語速 1 +語音 1 +誠意 1 +誤信 1 +誤差 1 +誤會 1 +誤槍 1 +誤譯 1 +誥命 1 +說出 1 +說客 1 +說成 1 +說紛 1 +說話 1 +說謊 1 +說道 1 +課本 1 +誹謗 1 +調值 1 +調停 1 +調入 1 +調和 1 +調控 1 +調水 1 +調沙 1 +調研 1 +調節 1 +調職 1 +調解 1 +諂媚 1 +談判 1 +談妥 1 +談論 1 +請來 1 +請辭 1 +請願 1 +論事 1 +諜海 1 +諧波 1 +諸如 1 +諸暨 1 +諸河 1 +諺言 1 +諾丁 1 +諾域 1 +諾娃 1 +諾曼 1 +諾爾 1 +諾瓦 1 +謀取 1 +謀士 1 +謀求 1 +謀職 1 +謁者 1 +謊言 1 +謙卑 1 +講完 1 +講究 1 +講談 1 +講道 1 +謝世 1 +謝列 1 +謝爾 1 +謝瓦 1 +謝蓋 1 +謹慎 1 +譜代 1 +警務 1 +警句 1 +警告 1 +警員 1 +警戒 1 +警衛 1 +警覺 1 +警鐘 1 +譯作 1 +譯員 1 +譯場 1 +譯本 1 +議席 1 +譴責 1 +護佑 1 +護城 1 +護墊 1 +護送 1 +讀取 1 +讀法 1 +變動 1 +變差 1 +變色 1 +變調 1 +變身 1 +變遷 1 +變革 1 +讓步 1 +讓開 1 +讚喻 1 +讚揚 1 +讚美 1 +讚譽 1 +谷山 1 +谷氨 1 +豆瓣 1 +豎立 1 +豎起 1 +豐久 1 +豐厚 1 +豐城 1 +豐臣 1 +豐隆 1 +象數 1 +象晉 1 +豢養 1 +豪宅 1 +豪門 1 +豫南 1 +豬圈 1 +豬油 1 +豬籠 1 +豬肉 1 +貓咪 1 +貓囒 1 +貓科 1 +貝加 1 +貝南 1 +貝斯 1 +貝格 1 +貝碧 1 +貝納 1 +貝都 1 +貝類 1 +貞昌 1 +貞潔 1 +貞觀 1 +負擔 1 +負粒 1 +負芻 1 +負荷 1 +負面 1 +負額 1 +財之 1 +財經 1 +財落 1 +貢品 1 +貢哥 1 +貢嘎 1 +貢巴 1 +貧乏 1 +貧窮 1 +貧鈾 1 +貨品 1 +貨機 1 +販賣 1 +貪圖 1 +貪婪 1 +貪心 1 +貪瀆 1 +貫徹 1 +貫穿 1 +貫通 1 +責怪 1 +責難 1 +貴子 1 +貴築 1 +貴賓 1 +貴陽 1 +貴霜 1 +貶意 1 +買入 1 +買賣 1 +費曼 1 +費用 1 +費盡 1 +費羅 1 +費雷 1 +貼身 1 +賀特 1 +賀立 1 +賄選 1 +資政 1 +資陽 1 +賈亞 1 +賈克 1 +賈多 1 +賈氏 1 +賓客 1 +賓尼 1 +賓州 1 +賓登 1 +賞識 1 +賠禮 1 +賡臣 1 +賢思 1 +賣出 1 +賣到 1 +賣地 1 +賣家 1 +賣掉 1 +賣空 1 +賤女 1 +賤民 1 +質詢 1 +賭徒 1 +賭檔 1 +賴宣 1 +賴滕 1 +賺取 1 +賺錢 1 +購得 1 +購置 1 +賽亞 1 +賽場 1 +賽拉 1 +賽普 1 +賽爾 1 +賽車 1 +賽道 1 +贈送 1 +贊博 1 +贊成 1 +贊比 1 +贊諾 1 +贏家 1 +贖回 1 +赤坂 1 +赤壁 1 +赤樹 1 +赤狐 1 +赤鱲 1 +赫伯 1 +赫塔 1 +赫姆 1 +赫斯 1 +赫曼 1 +赫比 1 +赫盧 1 +赫莫 1 +赫雷 1 +赫魯 1 +走上 1 +走到 1 +走勢 1 +走漏 1 +走私 1 +起事 1 +起伏 1 +起初 1 +起名 1 +起因 1 +起始 1 +起建 1 +起彼 1 +起止 1 +起死 1 +起碼 1 +起端 1 +起舞 1 +起落 1 +起訖 1 +起降 1 +起點 1 +超出 1 +超導 1 +超強 1 +超我 1 +超時 1 +超武 1 +超然 1 +超重 1 +超齡 1 +越亮 1 +越共 1 +越前 1 +越好 1 +越弱 1 +越戰 1 +越早 1 +越暗 1 +越牆 1 +越發 1 +越近 1 +越過 1 +趕往 1 +趙氏 1 +趣事 1 +趨勢 1 +趨於 1 +足不 1 +足夠 1 +足見 1 +足跡 1 +趾爪 1 +趾骨 1 +跋扈 1 +跑壘 1 +跑步 1 +跑車 1 +跑馬 1 +跟操 1 +跟班 1 +跟蹤 1 +跟進 1 +跟隨 1 +跨國 1 +跨度 1 +跨步 1 +跨足 1 +跨過 1 +路士 1 +路撒 1 +路支 1 +路政 1 +路殊 1 +路濟 1 +路綫 1 +路網 1 +路透 1 +路過 1 +路障 1 +路面 1 +跳動 1 +跳槽 1 +跳過 1 +跳遠 1 +跳高 1 +踏上 1 +踏入 1 +踢進 1 +躁動 1 +躍升 1 +身受 1 +身型 1 +身大 1 +身旁 1 +身為 1 +身無 1 +身而 1 +身著 1 +身軀 1 +身高 1 +躬耕 1 +躲到 1 +車上 1 +車仁 1 +車型 1 +車士 1 +車外 1 +車尾 1 +車市 1 +車廠 1 +車手 1 +車票 1 +車程 1 +車窗 1 +車系 1 +車號 1 +車費 1 +車路 1 +車迷 1 +車頭 1 +軋箏 1 +軌跡 1 +軍中 1 +軍備 1 +軍功 1 +軍務 1 +軍委 1 +軍師 1 +軍援 1 +軍方 1 +軍服 1 +軍營 1 +軍艦 1 +軍裝 1 +軍階 1 +軍需 1 +軒轅 1 +軟化 1 +軟硬 1 +軟骨 1 +軸心 1 +較低 1 +較佳 1 +較厚 1 +較快 1 +較深 1 +載人 1 +載淳 1 +輔佐 1 +輕微 1 +輕易 1 +輕軌 1 +輕鐵 1 +輕髻 1 +輕鬆 1 +輝彥 1 +輪周 1 +輪廓 1 +輪流 1 +輪船 1 +輪迴 1 +輯錄 1 +輸掉 1 +輸精 1 +輸血 1 +輸送 1 +輻轍 1 +輾轉 1 +轉交 1 +轉任 1 +轉動 1 +轉化 1 +轉向 1 +轉型 1 +轉差 1 +轉往 1 +轉念 1 +轉播 1 +轉會 1 +轉正 1 +轉角 1 +轉賣 1 +轉赴 1 +辛勞 1 +辛哈 1 +辛基 1 +辛奈 1 +辛納 1 +辛辛 1 +辛那 1 +辟邪 1 +辦學 1 +辦有 1 +辨別 1 +辨明 1 +辨識 1 +辭典 1 +辭官 1 +辭歲 1 +辭辛 1 +辯證 1 +辰國 1 +辰男 1 +農事 1 +農墾 1 +農書 1 +農林 1 +農舍 1 +迅即 1 +迅猛 1 +迎神 1 +迎賓 1 +迎送 1 +迎面 1 +近似 1 +近侍 1 +近平 1 +近日 1 +近東 1 +近海 1 +近現 1 +近親 1 +近鄰 1 +返樸 1 +迢迢 1 +迦南 1 +迦牟 1 +迦罕 1 +迪士 1 +迪尼 1 +迪恩 1 +迪文 1 +迪歐 1 +迪比 1 +迪沙 1 +迪特 1 +迪生 1 +迪米 1 +迪納 1 +迫切 1 +迴流 1 +迷你 1 +迷唐 1 +迷路 1 +追兇 1 +追回 1 +追封 1 +追尋 1 +追尾 1 +追思 1 +追憶 1 +追查 1 +追根 1 +追殺 1 +追求 1 +追究 1 +追討 1 +追述 1 +退位 1 +退回 1 +退夷 1 +退居 1 +退敵 1 +退隱 1 +送來 1 +送到 1 +送回 1 +送殯 1 +送給 1 +送院 1 +逃亡 1 +逃奔 1 +逃至 1 +逃跑 1 +逆戟 1 +逍遙 1 +透徹 1 +透支 1 +透水 1 +透視 1 +透鏡 1 +逐客 1 +途中 1 +途人 1 +途經 1 +這兒 1 +這時 1 +通俗 1 +通商 1 +通天 1 +通宏 1 +通州 1 +通渭 1 +通貨 1 +通通 1 +通運 1 +通靈 1 +通風 1 +逛街 1 +速往 1 +速銷 1 +造價 1 +造反 1 +造就 1 +造幣 1 +造福 1 +造血 1 +造訪 1 +造謠 1 +逢吉 1 +連串 1 +連克 1 +連坐 1 +連年 1 +連座 1 +連德 1 +連成 1 +連拍 1 +連筆 1 +連篇 1 +連結 1 +連絡 1 +連通 1 +連進 1 +連餓 1 +週末 1 +週邊 1 +進位 1 +進來 1 +進動 1 +進犯 1 +逼使 1 +逼停 1 +逼到 1 +逾期 1 +遂起 1 +遇上 1 +遇刺 1 +遇有 1 +遇陛 1 +遇難 1 +遊憩 1 +遊擊 1 +遊歷 1 +遊艇 1 +遊覽 1 +遊說 1 +遊離 1 +運回 1 +運往 1 +運煤 1 +運算 1 +運糧 1 +運補 1 +運載 1 +遍布 1 +過冷 1 +過剩 1 +過多 1 +過往 1 +過敏 1 +過橋 1 +過濾 1 +過甚 1 +過繼 1 +過苛 1 +過路 1 +過頭 1 +道世 1 +道中 1 +道具 1 +道刺 1 +道墟 1 +道士 1 +道學 1 +道宇 1 +道安 1 +道格 1 +道歉 1 +道綽 1 +道羅 1 +道義 1 +道靜 1 +達上 1 +達人 1 +達信 1 +達倉 1 +達克 1 +達加 1 +達古 1 +達多 1 +達恩 1 +達拉 1 +達拏 1 +達拖 1 +達智 1 +達母 1 +達濠 1 +達科 1 +達章 1 +達米 1 +達羅 1 +達華 1 +達賴 1 +達農 1 +違背 1 +遙陽 1 +遜位 1 +遞交 1 +遞增 1 +遠呂 1 +遠嫁 1 +遠揚 1 +遠日 1 +遠洋 1 +遠處 1 +遠遠 1 +遠離 1 +遣返 1 +適之 1 +適用 1 +遭殃 1 +遮天 1 +遮蔭 1 +遮陰 1 +遲遲 1 +遷出 1 +遷居 1 +遷校 1 +選上 1 +選修 1 +選定 1 +選用 1 +選美 1 +選訓 1 +選調 1 +選進 1 +選題 1 +遺物 1 +遺留 1 +遺腹 1 +遺迹 1 +遺骸 1 +遼西 1 +遼闊 1 +避禍 1 +避開 1 +邁克 1 +邁向 1 +邁阿 1 +還擊 1 +還有 1 +邊區 1 +邗江 1 +那修 1 +那峨 1 +那提 1 +那時 1 +那普 1 +那曲 1 +那瑞 1 +那瓦 1 +那罕 1 +那順 1 +邦國 1 +邦德 1 +邦蒂 1 +邦達 1 +邪惡 1 +邪神 1 +邪馬 1 +邱家 1 +邳縣 1 +邵伯 1 +邵氏 1 +郊狼 1 +郡區 1 +郡縣 1 +郡艾 1 +部位 1 +部字 1 +部將 1 +部首 1 +郪江 1 +郫縣 1 +郭家 1 +郵報 1 +郵輪 1 +都因 1 +都城 1 +都察 1 +都尉 1 +都斯 1 +都會 1 +都有 1 +都督 1 +都靈 1 +鄂倫 1 +鄂溫 1 +鄂霍 1 +鄉內 1 +鄉團 1 +鄉村 1 +鄉長 1 +鄰域 1 +鄰居 1 +鄰里 1 +酃縣 1 +酊大 1 +配上 1 +配件 1 +配備 1 +配器 1 +配有 1 +配角 1 +酒家 1 +酒杯 1 +酒樓 1 +酒鬼 1 +酩酊 1 +酵母 1 +酷似 1 +酷刑 1 +酸根 1 +酸甘 1 +酸銨 1 +酸鎂 1 +醉醺 1 +醋酸 1 +醫書 1 +醫科 1 +醫術 1 +醬貨 1 +醴陵 1 +醺醺 1 +釀成 1 +釀造 1 +采巴 1 +釉色 1 +釋出 1 +里先 1 +里內 1 +里利 1 +里南 1 +里卡 1 +里士 1 +里多 1 +里夫 1 +里姆 1 +里希 1 +里德 1 +里拉 1 +里施 1 +里森 1 +里波 1 +里港 1 +里納 1 +里維 1 +里茨 1 +里西 1 +里賽 1 +里迢 1 +里達 1 +里馬 1 +重創 1 +重力 1 +重回 1 +重復 1 +重心 1 +重情 1 +重播 1 +重核 1 +重物 1 +重獲 1 +重現 1 +重生 1 +重用 1 +重疊 1 +重禮 1 +重組 1 +重義 1 +重考 1 +重製 1 +重複 1 +重見 1 +重讀 1 +重鎮 1 +重開 1 +重陽 1 +重音 1 +重鳳 1 +野史 1 +野外 1 +野心 1 +野戰 1 +野木 1 +野球 1 +野菜 1 +量壽 1 +量度 1 +量洪 1 +金剛 1 +金寶 1 +金帶 1 +金幣 1 +金平 1 +金德 1 +金斯 1 +金森 1 +金氏 1 +金泉 1 +金浦 1 +金湖 1 +金牛 1 +金獎 1 +金箔 1 +金羅 1 +金美 1 +金華 1 +金質 1 +金邊 1 +金銀 1 +金錢 1 +金門 1 +金靴 1 +金頂 1 +金魚 1 +金鵰 1 +釜山 1 +針劑 1 +釧路 1 +鈺源 1 +鉑金 1 +銀杏 1 +銀熊 1 +銀牌 1 +銀白 1 +銀紅 1 +銀色 1 +銅仁 1 +銅像 1 +銅削 1 +銅斧 1 +銅柄 1 +銅臿 1 +銅製 1 +銅銎 1 +銅錛 1 +銅錢 1 +銘皖 1 +銘銘 1 +銜稱 1 +銳利 1 +銷毀 1 +銷量 1 +鋪成 1 +鋪有 1 +鋸齒 1 +鋼板 1 +錄影 1 +錄得 1 +錄放 1 +錘樹 1 +錢上 1 +錦俊 1 +錦承 1 +錦江 1 +錦田 1 +錫伯 1 +錫勇 1 +錫昌 1 +錯視 1 +錯覺 1 +錳礦 1 +鍊金 1 +鍋中 1 +鍋內 1 +鍋爐 1 +鍛鍊 1 +鍾情 1 +鎖妖 1 +鎖閉 1 +鎮守 1 +鎮岳 1 +鎮朔 1 +鎮賚 1 +鎮里 1 +鎮靜 1 +鎳銀 1 +鏡波 1 +鏡湖 1 +鐵削 1 +鐵匾 1 +鐵木 1 +鐵棍 1 +鐵民 1 +鐵爐 1 +鐵管 1 +鐵釘 1 +鐵銹 1 +鐵錛 1 +鑑別 1 +鑑定 1 +鑑泉 1 +鑑證 1 +鑒定 1 +鑫新 1 +鑽入 1 +鑽出 1 +鑽探 1 +鑿出 1 +長凳 1 +長史 1 +長婁 1 +長孫 1 +長尾 1 +長岡 1 +長崎 1 +長廊 1 +長廷 1 +長方 1 +長榮 1 +長毛 1 +長治 1 +長溝 1 +長滿 1 +長瑪 1 +長盛 1 +長笛 1 +長篇 1 +長編 1 +長跑 1 +長頸 1 +長髮 1 +門修 1 +門坎 1 +門廳 1 +門式 1 +閃米 1 +閃長 1 +閃電 1 +閉日 1 +開價 1 +開光 1 +開啟 1 +開場 1 +開墾 1 +開學 1 +開工 1 +開往 1 +開戰 1 +開拓 1 +開挖 1 +開支 1 +開教 1 +開業 1 +開槍 1 +開球 1 +開瑞 1 +開票 1 +開車 1 +開辦 1 +開錄 1 +閑聊 1 +閑談 1 +閒言 1 +閒語 1 +間斷 1 +間碟 1 +間距 1 +閘口 1 +閘機 1 +閩侯 1 +閩南 1 +闖進 1 +關中 1 +關斷 1 +關緊 1 +關連 1 +關重 1 +闡述 1 +阡陌 1 +阪神 1 +防凍 1 +防止 1 +防盜 1 +防護 1 +阻塞 1 +阻撓 1 +阻隔 1 +阿一 1 +阿仙 1 +阿信 1 +阿修 1 +阿內 1 +阿勒 1 +阿勝 1 +阿勞 1 +阿基 1 +阿堯 1 +阿奇 1 +阿宋 1 +阿密 1 +阿寧 1 +阿尼 1 +阿布 1 +阿斗 1 +阿普 1 +阿曼 1 +阿東 1 +阿比 1 +阿波 1 +阿猴 1 +阿瑜 1 +阿穆 1 +阿納 1 +阿羅 1 +阿耳 1 +阿聯 1 +阿育 1 +阿茲 1 +阿諾 1 +阿賈 1 +阿赫 1 +阿連 1 +阿道 1 +阿達 1 +阿里 1 +阿隆 1 +陀斯 1 +陀耶 1 +附上 1 +附加 1 +附蟲 1 +附表 1 +附身 1 +降將 1 +降格 1 +降水 1 +降班 1 +降臨 1 +降魔 1 +限定 1 +限時 1 +陡壁 1 +院士 1 +院子 1 +院落 1 +除冰 1 +除夕 1 +除此 1 +除非 1 +陪葬 1 +陪都 1 +陰天 1 +陰暗 1 +陰陽 1 +陳國 1 +陳屍 1 +陳相 1 +陳述 1 +陵園 1 +陵蘭 1 +陶恩 1 +陷落 1 +陸仔 1 +陸域 1 +陸行 1 +陽安 1 +陽明 1 +隆亨 1 +隆坡 1 +隆坦 1 +隆基 1 +隆拿 1 +隆納 1 +隆索 1 +隆赫 1 +隊列 1 +隊名 1 +隔日 1 +隔開 1 +隕星 1 +隕鐵 1 +際春 1 +隠居 1 +隨丁 1 +隨便 1 +隨同 1 +隨往 1 +隨時 1 +隨軍 1 +隨隊 1 +險些 1 +險要 1 +隱含 1 +隱姓 1 +隱居 1 +隱性 1 +隱私 1 +隻身 1 +雄師 1 +雄獅 1 +雅克 1 +雅加 1 +雅可 1 +雅各 1 +雅君 1 +雅福 1 +集寧 1 +集結 1 +集聚 1 +雌性 1 +雌獸 1 +雌鯨 1 +雙十 1 +雙子 1 +雙收 1 +雙江 1 +雜姓 1 +雜糧 1 +雜處 1 +雜食 1 +雞腿 1 +雞頭 1 +離別 1 +離域 1 +離場 1 +離子 1 +離島 1 +離群 1 +離職 1 +難吃 1 +難得 1 +難攻 1 +難過 1 +雨季 1 +雨後 1 +雪上 1 +雪佛 1 +雪兒 1 +雪崩 1 +雪弟 1 +雪梅 1 +雲中 1 +雲亭 1 +雲岩 1 +雲松 1 +雲里 1 +零件 1 +零部 1 +零食 1 +雷他 1 +雷切 1 +雷利 1 +雷姆 1 +雷定 1 +雷托 1 +雷斯 1 +雷昂 1 +雷曼 1 +雷格 1 +雷特 1 +雷王 1 +雷羅 1 +雷蒂 1 +雷西 1 +雷雨 1 +電信 1 +電器 1 +電極 1 +電氣 1 +電瓶 1 +電線 1 +電通 1 +電邀 1 +需時 1 +霆鋒 1 +震寰 1 +震波 1 +震災 1 +霍亂 1 +霍伊 1 +霍夫 1 +霍姆 1 +霍巴 1 +霍斯 1 +霍次 1 +露出 1 +露比 1 +露臉 1 +露西 1 +霸佔 1 +霸權 1 +靈前 1 +靈力 1 +靈性 1 +靈感 1 +靈柩 1 +靈異 1 +靈籤 1 +靈長 1 +靈魂 1 +青梅 1 +青森 1 +青睞 1 +青訓 1 +青金 1 +靖雯 1 +靜安 1 +靜岡 1 +靜華 1 +非鯽 1 +靠右 1 +靠左 1 +面具 1 +面向 1 +面貌 1 +革除 1 +鞦韆 1 +韃靼 1 +韋塔 1 +韋契 1 +韋德 1 +韋拉 1 +韋拿 1 +韋比 1 +韋科 1 +韓氏 1 +韓浜 1 +音律 1 +音色 1 +音量 1 +音高 1 +韶之 1 +響號 1 +頂上 1 +頂尖 1 +頂峰 1 +頂端 1 +頂級 1 +項鏈 1 +順宗 1 +順岸 1 +順德 1 +順應 1 +順懷 1 +順治 1 +順滑 1 +順陽 1 +頌平 1 +頌揚 1 +預估 1 +預告 1 +預知 1 +預示 1 +預約 1 +頑石 1 +頒給 1 +頗多 1 +頗大 1 +頗有 1 +頗盛 1 +頗豐 1 +領事 1 +領取 1 +領奏 1 +領航 1 +領軍 1 +領隊 1 +頜形 1 +頜翼 1 +頜腔 1 +頜鯉 1 +頭上 1 +頭前 1 +頭型 1 +頭士 1 +頭尾 1 +頭槌 1 +頭版 1 +頭盔 1 +頭紗 1 +頭門 1 +頭髮 1 +頸部 1 +頸長 1 +頸鹿 1 +頹垣 1 +頻寬 1 +頻散 1 +頻繁 1 +頻頻 1 +題獻 1 +題記 1 +額外 1 +額度 1 +願違 1 +類別 1 +類固 1 +顯光 1 +顯徑 1 +顯現 1 +顯靈 1 +風化 1 +風尚 1 +風波 1 +風行 1 +風間 1 +風雨 1 +風雪 1 +飛往 1 +飛抵 1 +飛毛 1 +飛沫 1 +飛碟 1 +飛鏢 1 +飛靶 1 +飛鳥 1 +飛龍 1 +食人 1 +食肆 1 +食肉 1 +食蟲 1 +食鹽 1 +飲茶 1 +飼料 1 +飼草 1 +飽和 1 +飽經 1 +飾曲 1 +飾物 1 +餃子 1 +養份 1 +養大 1 +養女 1 +養母 1 +養父 1 +養精 1 +養育 1 +養菊 1 +養蠶 1 +餐車 1 +餘熱 1 +餘眾 1 +餘萬 1 +館前 1 +館名 1 +館址 1 +饑餓 1 +饒平 1 +饕餮 1 +首仗 1 +首個 1 +首名 1 +首場 1 +首屈 1 +首席 1 +首戰 1 +首批 1 +首日 1 +首映 1 +首條 1 +首艦 1 +首讀 1 +香世 1 +香亭 1 +香儂 1 +香吉 1 +香味 1 +香坊 1 +香塍 1 +香水 1 +香洲 1 +香火 1 +香織 1 +馬上 1 +馬修 1 +馬內 1 +馬六 1 +馬匹 1 +馬台 1 +馬喇 1 +馬圈 1 +馬塔 1 +馬奇 1 +馬威 1 +馬托 1 +馬提 1 +馬特 1 +馬球 1 +馬粦 1 +馬約 1 +馬莎 1 +馬賽 1 +馬赫 1 +馬路 1 +馬雅 1 +馬雍 1 +馬鞍 1 +馬黑 1 +馳名 1 +馴化 1 +駐任 1 +駐地 1 +駐防 1 +駕崩 1 +駙馬 1 +駛入 1 +駛過 1 +駿業 1 +騁遠 1 +騎馬 1 +騏一 1 +騙徒 1 +騰出 1 +騰訊 1 +騷擾 1 +驗屍 1 +驗票 1 +驗證 1 +驗電 1 +驚人 1 +驚動 1 +驚喜 1 +驚嘆 1 +驚訝 1 +驚醒 1 +驟減 1 +驟逝 1 +驢肉 1 +骨幹 1 +骯髒 1 +骷髏 1 +體側 1 +體外 1 +體委 1 +體工 1 +體教 1 +體會 1 +體溫 1 +髖骨 1 +高下 1 +高出 1 +高升 1 +高在 1 +高地 1 +高大 1 +高峰 1 +高座 1 +高手 1 +高效 1 +高新 1 +高杉 1 +高梅 1 +高檔 1 +高清 1 +高漲 1 +高潮 1 +高熱 1 +高燥 1 +高琦 1 +高盧 1 +高聳 1 +高處 1 +高買 1 +高質 1 +高超 1 +高雄 1 +高高 1 +髮生 1 +髮辮 1 +鬆髻 1 +鬚鯨 1 +鬥雞 1 +鬧出 1 +鬼影 1 +鬼怪 1 +鬼道 1 +魁智 1 +魅惑 1 +魏國 1 +魏救 1 +魏斯 1 +魏氏 1 +魏澤 1 +魔力 1 +魔界 1 +魔石 1 +魔鬼 1 +魚尾 1 +魚腹 1 +魚苗 1 +魚類 1 +魯伯 1 +魯克 1 +魯國 1 +魯敉 1 +魯曉 1 +魯木 1 +魯特 1 +魯瓊 1 +魯登 1 +魯良 1 +魯茨 1 +魯西 1 +魯道 1 +鮑亞 1 +鮑克 1 +鮑爾 1 +鮑維 1 +鮑里 1 +鮑魚 1 +鮮有 1 +鮮用 1 +鮮虞 1 +鯉齒 1 +鰓蓋 1 +鰭條 1 +鰺沢 1 +鱗甲 1 +鱗蟒 1 +鱗骨 1 +鳥獸 1 +鳥種 1 +鳳彬 1 +鳳花 1 +鳴叫 1 +鳴放 1 +鳴道 1 +鴛鴦 1 +鴻南 1 +鴻基 1 +鴻章 1 +鴻績 1 +鴻華 1 +鴻超 1 +鴻逵 1 +鴻銘 1 +鹽城 1 +鹽州 1 +鹽酸 1 +鹿鼎 1 +麗卡 1 +麗晶 1 +麗泰 1 +麗特 1 +麗珍 1 +麗金 1 +麗閣 1 +麗雨 1 +麗魚 1 +麥加 1 +麥卡 1 +麥拉 1 +麥格 1 +麥當 1 +麥芽 1 +麥迪 1 +麩氨 1 +麵團 1 +麵皮 1 +麻呂 1 +麻城 1 +麻塞 1 +麻將 1 +麻布 1 +麻木 1 +麻痹 1 +黃岡 1 +黃巾 1 +黃昏 1 +黃沙 1 +黃河 1 +黃蜂 1 +黎家 1 +黎明 1 +黎波 1 +黎筍 1 +黎絲 1 +黑奴 1 +黑帶 1 +黑手 1 +黑文 1 +黑暗 1 +黑木 1 +黑板 1 +黑死 1 +黑海 1 +黑衫 1 +黑錢 1 +黑鐵 1 +黑雲 1 +黑髮 1 +黑麻 1 +默古 1 +默史 1 +默比 1 +默生 1 +默默 1 +黛安 1 +黛絲 1 +點陣 1 +點頭 1 +點點 1 +黨團 1 +黨委 1 +黨校 1 +黨歌 1 +黨衛 1 +黨部 1 +黨魁 1 +鼎灶 1 +鼎芬 1 +鼎金 1 +鼓手 1 +鼬鼠 1 +鼻栓 1 +齊國 1 +齊放 1 +齊蒂 1 +齊蓋 1 +齒狀 1 +齒輪 1 +齒鼩 1 +齲齒 1 +龍台 1 +龍女 1 +龍文 1 +龍眼 1 +龍耳 1 +龍華 1 +龍頭 1 +龐特 1 +龐貝 1 +龜茲 1 diff --git a/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/label-map b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/label-map new file mode 100644 index 0000000000000000000000000000000000000000..d46b2d1bcbc014e63b3447e64d68cd8becbdd739 --- /dev/null +++ b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/label-map @@ -0,0 +1,43 @@ +42 +punct 12965 +nmod 11147 +nsubj 7134 +obj 6016 +nummod 4732 +case:suff 4179 +acl 4163 +root 3797 +mark 3445 +det 3434 +advmod 2962 +case 2739 +case:dec 2517 +conj 2421 +obl 2000 +dep 1998 +mark:relcl 1833 +clf 1722 +ccomp 1655 +amod 1525 +xcomp 1382 +acl:relcl 1356 +cop 1349 +cc 1334 +nmod:tmod 1199 +appos 1089 +case:aspect 718 +aux 675 +case:pref 569 +aux:pass 324 +csubj 280 +flat:foreign 250 +nsubj:pass 211 +discourse 151 +aux:caus 149 +advcl 125 +mark:advb 79 +iobj 61 +dislocated 45 +mark:comp 17 +csubj:pass 5 +vocative 1 diff --git a/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/lcword-map b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/lcword-map new file mode 100644 index 0000000000000000000000000000000000000000..4315174293f17157ddb0dfacedef874877c8bd28 --- /dev/null +++ b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/lcword-map @@ -0,0 +1,16263 @@ +16262 +, 5851 +的 4289 +. 3759 +在 1273 +年 1165 +9999 1070 +、 952 +是 918 +為 887 +一 863 +於 680 +99 647 +和 639 +9 617 +了 614 +人 467 +個 466 +月 456 +有 453 +他 439 +( 429 +) 429 +與 380 +中 376 +日 356 +」 321 +「 320 +被 315 +這 300 +會 258 +並 255 +以 253 +而 245 +也 244 +上 228 +中國 218 +由 215 +《 213 +》 213 +之 211 +兩 203 +後 202 +及 191 +時 188 +位 186 +· 183 +999 178 +等 175 +到 172 +但 162 +對 158 +大 157 +此 157 +不 156 +其 155 +所 150 +種 143 +或 140 +將 139 +次 132 +美國 131 +成 130 +者 127 +至 125 +該 123 +區 118 +開始 118 +部 117 +三 116 +家 116 +可以 115 +她 115 +都 114 +來 113 +因 113 +國 109 +人口 108 +軍 107 +市 104 +使用 102 +省 102 +從 101 +名 98 +著 97 +則 95 +多 94 +用 94 +日本 93 +沒有 93 +地 92 +曾 92 +第一 92 +他們 91 +州 90 +公司 88 +就 88 +性 88 +由於 88 +其中 87 +地區 87 +新 87 +稱 87 +國家 86 +政府 86 +: 84 +已 84 +主要 83 +小 82 +; 81 +世界 81 +可 81 +大學 81 +下 80 +不同 79 +自 79 +香港 79 +縣 77 +自己 77 +前 76 +因為 76 +研究 76 +總 76 +最 75 +面積 75 +李 74 +還 73 +向 72 +王 72 +進行 72 +它 71 +包括 69 +站 69 +四 68 +號 67 +當時 66 +這些 66 +部分 66 +工作 65 +米 65 +認為 65 +也是 64 +以及 64 +學 64 +村 64 +發現 64 +說 64 +作 63 +又 62 +屬 62 +平方公里 62 +中華 61 +同時 60 +學院 60 +條 60 +成立 59 +第二 59 +二 58 +五 58 +亦 58 +代表 58 +發展 58 +發生 58 +美 58 +能 58 +之後 57 +使 57 +社會 57 +要 57 +一些 56 +人民 56 +內 56 +其他 56 +約 56 +世紀 54 +元 54 +場 54 +過 54 +建築 53 +為了 53 +線 53 +只 52 +張 52 +把 52 +獲得 52 +目前 52 +台 51 +文化 51 +英國 51 +重要 51 +中心 50 +但是 50 +局 50 +更 50 +許多 50 +之間 49 +可能 49 +如 49 +歷史 49 +遊戲 49 +公里 48 +共 48 +帝國 48 +期間 48 +歲 48 +處 48 +音樂 48 +黨 48 +一般 47 +年代 47 +根據 47 +行星 47 +隊 47 +電影 47 +政治 46 +鐵路 46 +城市 45 +故事 45 +組織 45 +便 44 +學校 44 +所有 44 +科學 44 +英 44 +- 43 +任 43 +作品 43 +指 43 +最後 43 +機 43 +語 43 +通過 43 +間 43 +關係 43 +已經 42 +建立 42 +時間 42 +當 42 +電視 42 +共和 41 +後來 41 +比 41 +管理 41 +表示 41 +讓 41 +通常 41 +高 41 +出現 40 +影響 40 +成功 40 +戰爭 40 +提供 40 +系統 40 +動物 39 +地方 39 +就是 39 +座 39 +設計 39 +負責 39 +鎮 39 +長 39 +館 39 +卻 38 +國際 38 +德國 38 +技術 38 +方面 38 +最終 38 +父親 38 +車站 38 +上海 37 +人物 37 +出 37 +分 37 +台灣 37 +各 37 +層 37 +山 37 +方 37 +河 37 +即 36 +參加 36 +擔任 36 +時期 36 +服務 36 +正式 36 +生活 36 +給 36 +要求 36 +路 36 +運動 36 +9,999 35 +一直 35 +再 35 +單位 35 +委員 35 +很 35 +書 35 +段 35 +民國 35 +法國 35 +理論 35 +人類 34 +均 34 +女 34 +才 34 +教 34 +文 34 +歐洲 34 +決定 34 +漢 34 +現在 34 +第三 34 +航空 34 +行政 34 +足球 34 +雖然 34 +八 33 +問題 33 +小說 33 +我 33 +教育 33 +製作 33 +不是 32 +保護 32 +全國 32 +北 32 +印度 32 +員 32 +形成 32 +很多 32 +得到 32 +活動 32 +節目 32 +西班牙 32 +主義 31 +寺 31 +屆 31 +島 31 +市鎮 31 +方式 31 +時代 31 +最高 31 +生 31 +街 31 +起 31 +需要 31 +99% 30 +中央 30 +另 30 +另外 30 +器 30 +天 30 +得 30 +控制 30 +擁有 30 +每 30 +產生 30 +經濟 30 +羅馬 30 +進入 30 +隨 30 +仍 29 +公園 29 +具有 29 +去 29 +大陸 29 +式 29 +接受 29 +東 29 +球隊 29 +當地 29 +院 29 +雙 29 +9.99 28 +並且 28 +北京 28 +受到 28 +同 28 +如果 28 +學生 28 +工程 28 +時候 28 +港 28 +物 28 +級 28 +計劃 28 +超過 28 +道 28 +電腦 28 +存在 27 +室 27 +對於 27 +情況 27 +戰鬥 27 +方法 27 +林 27 +機場 27 +比賽 27 +總統 27 +義大利 27 +都是 27 +非 27 +非常 27 +點 27 +人員 26 +做 26 +原因 26 +國民 26 +支持 26 +數 26 +法 26 +派 26 +然而 26 +獨立 26 +甚至 26 +生物 26 +聯合 26 +項 26 +主 25 +兒子 25 +出版 25 +劉 25 +南 25 +巴士 25 +幾 25 +我們 25 +權 25 +海拔 25 +第99 25 +經過 25 +議會 25 +賽 25 +99.99 24 +交通 24 +例如 24 +分布 24 +加入 24 +化 24 +同年 24 +城 24 +大量 24 +於是 24 +族 24 +最大 24 +未 24 +海 24 +湖 24 +生產 24 +皇帝 24 +科 24 +第9 24 +系列 24 +高度 24 +9.9 23 +事件 23 +們 23 +內容 23 +命名 23 +型 23 +宣布 23 +導致 23 +帶 23 +必須 23 +成員 23 +本 23 +正 23 +清朝 23 +演出 23 +無 23 +直接 23 +行為 23 +裡 23 +西 23 +距離 23 +軍事 23 +部隊 23 +鄉 23 +銀行 23 +集團 23 +99,999 22 +一樣 22 +不少 22 +不過 22 +傳統 22 +僅 22 +副 22 +反對 22 +單 22 +增加 22 +它們 22 +思想 22 +有關 22 +業 22 +此外 22 +母親 22 +水 22 +灣 22 +版 22 +紐約 22 +組成 22 +結構 22 +聯盟 22 +聯賽 22 +能力 22 +華 22 +設 22 +語言 22 +附近 22 +除 22 +一起 21 +作用 21 +出生 21 +制 21 +力 21 +受 21 +古 21 +只有 21 +唯一 21 +地位 21 +府 21 +廣泛 21 +植物 21 +海軍 21 +無法 21 +獲 21 +率 21 +球 21 +環境 21 +紀念 21 +結束 21 +舉行 21 +角色 21 +議員 21 +選舉 21 +里 21 +量 21 +韓 21 +體 21 +主席 20 +仍然 20 +六 20 +冠軍 20 +出任 20 +分子 20 +原子 20 +參與 20 +地下 20 +城鎮 20 +天津 20 +工業 20 +希臘 20 +度 20 +引起 20 +採用 20 +攻擊 20 +整個 20 +文學 20 +文物 20 +朝鮮 20 +東北 20 +核 20 +機構 20 +比較 20 +清 20 +猶太 20 +現代 20 +管轄 20 +範圍 20 +細胞 20 +經常 20 +胡 20 +自治 20 +自由 20 +角 20 +逐漸 20 +重新 20 +類型 20 +不久 19 +不能 19 +代 19 +以上 19 +佔領 19 +全 19 +分別 19 +原 19 +台北 19 +唐 19 +多數 19 +天文 19 +字 19 +巴黎 19 +最早 19 +會議 19 +有些 19 +民族 19 +洋 19 +結果 19 +繼續 19 +能夠 19 +趙 19 +造成 19 +達 19 +達到 19 +部份 19 +鄭 19 +風格 19 +不會 18 +亞 18 +令 18 +任何 18 +企業 18 +先後 18 +列車 18 +功能 18 +半 18 +取得 18 +合併 18 +外交 18 +子 18 +廣州 18 +戰役 18 +所以 18 +明朝 18 +期 18 +每年 18 +毛 18 +治療 18 +法院 18 +畢業 18 +疾病 18 +相當 18 +節 18 +艦隊 18 +身體 18 +軍隊 18 +進 18 +陳 18 +離開 18 +領導 18 +體育 18 +99.9 17 +七 17 +你 17 +再次 17 +十 17 +名字 17 +大戰 17 +宗教 17 +家族 17 +希望 17 +廣場 17 +想 17 +戰 17 +採取 17 +提出 17 +改 17 +教堂 17 +新聞 17 +星 17 +曲 17 +最初 17 +歐 17 +漫畫 17 +片 17 +物理 17 +特別 17 +發行 17 +經 17 +總部 17 +自然 17 +蘇聯 17 +行動 17 +製造 17 +西北 17 +資料 17 +選擇 17 +那 17 +金 17 +領域 17 +顆 17 +類 17 +飛機 17 +九龍 16 +低 16 +像 16 +共同 16 +利用 16 +制度 16 +前往 16 +創作 16 +勢力 16 +區域 16 +協助 16 +各種 16 +大樓 16 +家庭 16 +實驗 16 +居民 16 +山東 16 +心理 16 +或者 16 +拒絕 16 +步 16 +武器 16 +民主 16 +法律 16 +爆發 16 +狀態 16 +而且 16 +藝術 16 +表現 16 +記者 16 +設有 16 +設立 16 +資源 16 +軌道 16 +過程 16 +道路 16 +還是 16 +革命 16 +首次 16 +高速 16 +下轄 15 +中共 15 +主角 15 +作戰 15 +初 15 +則是 15 +化石 15 +十分 15 +南京 15 +南部 15 +商 15 +噸 15 +回到 15 +國內 15 +國王 15 +地球 15 +基督 15 +大廈 15 +大約 15 +太陽 15 +女兒 15 +女性 15 +如此 15 +學習 15 +完全 15 +實際 15 +常 15 +常見 15 +幾乎 15 +應用 15 +承認 15 +投資 15 +指出 15 +指揮 15 +普查 15 +未來 15 +東南 15 +橋 15 +此後 15 +火星 15 +版本 15 +牠們 15 +發表 15 +白 15 +直到 15 +碼頭 15 +科技 15 +立法 15 +組 15 +統治 15 +老 15 +職業 15 +著名 15 +蒙古 15 +西部 15 +調查 15 +跟 15 +路線 15 +車輛 15 +農業 15 +這樣 15 +酒 15 +鐵道 15 +集 15 +/ 14 +99999 14 +999萬 14 +一定 14 +交易 14 +人們 14 +今 14 +以來 14 +位置 14 +使得 14 +俄羅斯 14 +俱樂部 14 +傳播 14 +兒童 14 +公主 14 +劇 14 +北部 14 +博物 14 +合作 14 +基本 14 +境內 14 +外 14 +太平 14 +失去 14 +完成 14 +容易 14 +密度 14 +專業 14 +市場 14 +幫助 14 +建造 14 +抗 14 +擊敗 14 +旗 14 +曾經 14 +有限 14 +架 14 +案 14 +棲息 14 +波蘭 14 +澳門 14 +營運 14 +特色 14 +獎 14 +男 14 +相同 14 +看到 14 +簡稱 14 +系 14 +統計 14 +網路 14 +聯邦 14 +色 14 +董事 14 +規模 14 +視 14 +解決 14 +言 14 +起來 14 +車 14 +這裡 14 +進攻 14 +開發 14 +限制 14 +顯示 14 +黃 14 +99萬 13 +九 13 +倫敦 13 +全部 13 +公路 13 +公開 13 +其後 13 +初期 13 +加上 13 +博士 13 +司令 13 +同意 13 +因而 13 +圖書 13 +土地 13 +埃及 13 +基礎 13 +堂 13 +墨西哥 13 +天主 13 +妻子 13 +娛樂 13 +建 13 +建設 13 +形 13 +形式 13 +從事 13 +手 13 +打 13 +改變 13 +故 13 +教會 13 +數學 13 +數據 13 +數量 13 +早期 13 +更多 13 +東京 13 +梁 13 +樂團 13 +樓 13 +模式 13 +死 13 +死亡 13 +每個 13 +水平 13 +流域 13 +準備 13 +物種 13 +物質 13 +王國 13 +玩家 13 +男性 13 +當選 13 +病 13 +目 13 +目標 13 +相關 13 +知識 13 +社 13 +第四 13 +紀錄 13 +統一 13 +舊 13 +街道 13 +設定 13 +身份 13 +較 13 +辦公 13 +速度 13 +運輸 13 +郡 13 +項目 13 +食物 13 +馬 13 +---- 12 +一帶 12 +上帝 12 +且 12 +中學 12 +中部 12 +之前 12 +京 12 +人數 12 +什麼 12 +以下 12 +份 12 +保留 12 +個人 12 +價值 12 +元素 12 +內部 12 +公元 12 +具 12 +半島 12 +原本 12 +反應 12 +反映 12 +可是 12 +商業 12 +嚴重 12 +基地 12 +大型 12 +女子 12 +孫 12 +將軍 12 +尤其 12 +居住 12 +師 12 +帶來 12 +平均 12 +建議 12 +很大 12 +律師 12 +恆星 12 +恐怖 12 +應 12 +據 12 +改革 12 +政策 12 +新加坡 12 +月台 12 +有時 12 +東部 12 +楊 12 +標準 12 +機關 12 +歌手 12 +決賽 12 +汽車 12 +減少 12 +潛艇 12 +熱帶 12 +瑞典 12 +生命 12 +產品 12 +產業 12 +盃 12 +相對 12 +眾 12 +眾多 12 +知道 12 +神 12 +精神 12 +經營 12 +船 12 +該國 12 +變成 12 +賽事 12 +近 12 +透過 12 +遭到 12 +遺址 12 +避免 12 +郭 12 +醫院 12 +重建 12 +重慶 12 +門 12 +電子 12 +? 11 +主張 11 +主持 11 +主教 11 +之中 11 +亨利 11 +人士 11 +以前 11 +以色列 11 +件 11 +伊斯蘭 11 +佔 11 +作者 11 +保持 11 +信仰 11 +先生 11 +全球 11 +出身 11 +創立 11 +創辦 11 +力量 11 +去世 11 +反 11 +取代 11 +召開 11 +周 11 +園 11 +團 11 +大會 11 +奧地利 11 +威脅 11 +季 11 +安全 11 +專輯 11 +帝 11 +平方米 11 +強烈 11 +接近 11 +推出 11 +描述 11 +播放 11 +文字 11 +普遍 11 +末 11 +朱 11 +業務 11 +殖民 11 +江 11 +江蘇 11 +涉及 11 +現時 11 +界 11 +留下 11 +目的 11 +相信 11 +看 11 +社區 11 +福建 11 +管 11 +給予 11 +網站 11 +線路 11 +繼承 11 +英格蘭 11 +見 11 +試圖 11 +資訊 11 +超 11 +邊 11 +部門 11 +隻 11 +面 11 +首 11 +99.9% 10 +99.99% 10 +並非 10 +事 10 +事業 10 +交流 10 +以後 10 +來往 10 +供 10 +俄 10 +儘管 10 +勞動 10 +包含 10 +化學 10 +協會 10 +君主 10 +和平 10 +唱片 10 +圈 10 +國旗 10 +國會 10 +報 10 +報告 10 +威廉 10 +學位 10 +寬 10 +廠 10 +徐 10 +復興 10 +感到 10 +手術 10 +投入 10 +接 10 +推動 10 +播出 10 +支 10 +改名 10 +文明 10 +文藝 10 +明顯 10 +有效 10 +杭州 10 +東方 10 +條件 10 +模型 10 +殺 10 +河流 10 +法庭 10 +波 10 +洲 10 +派遣 10 +演員 10 +演唱 10 +火車 10 +爭議 10 +特定 10 +特徵 10 +特殊 10 +獨特 10 +生長 10 +當中 10 +症 10 +發動 10 +發射 10 +確定 10 +神話 10 +移民 10 +空間 10 +立 10 +篇 10 +終於 10 +結婚 10 +綫 10 +維持 10 +總理 10 +群 10 +若 10 +華盛頓 10 +葡萄 10 +蔡 10 +藏 10 +蘇 10 +衝突 10 +西藏 10 +規定 10 +訓練 10 +記 10 +記載 10 +記錄 10 +話 10 +該市 10 +警察 10 +變化 10 +責任 10 +起源 10 +逝世 10 +運行 10 +醫 10 +錦標 10 +關於 10 +陸軍 10 +雜誌 10 +需 10 +類似 10 +飛行 10 +首都 10 +駐 10 +'' 9 +一切 9 +一致 9 +上陣 9 +下降 9 +不斷 9 +不滿 9 +中山 9 +丹麥 9 +之外 9 +事務 9 +互相 9 +介紹 9 +來到 9 +健康 9 +光 9 +內閣 9 +全長 9 +公布 9 +其實 9 +再度 9 +出來 9 +出售 9 +分支 9 +到達 9 +動畫 9 +南方 9 +危險 9 +古代 9 +古典 9 +叫 9 +吃 9 +各類 9 +品 9 +國務 9 +團體 9 +地點 9 +執行 9 +塔 9 +士兵 9 +奪得 9 +好 9 +媒體 9 +字母 9 +孩子 9 +學者 9 +寫 9 +對手 9 +就讀 9 +工人 9 +帶領 9 +廟 9 +引擎 9 +強 9 +強大 9 +後期 9 +快速 9 +恢復 9 +意外 9 +戰略 9 +打擊 9 +批評 9 +拍攝 9 +接觸 9 +攻入 9 +放棄 9 +政權 9 +教學 9 +星期 9 +普通 9 +朋友 9 +未能 9 +本人 9 +本身 9 +枚 9 +柏林 9 +核心 9 +森林 9 +標誌 9 +機會 9 +機車 9 +權利 9 +此時 9 +殿 9 +民間 9 +沿海 9 +浙江 9 +湖泊 9 +滿洲 9 +爆炸 9 +特大 9 +狀況 9 +現 9 +瑞士 9 +當局 9 +發布 9 +皇后 9 +皇家 9 +相互 9 +相似 9 +石 9 +破壞 9 +穩定 9 +空中 9 +第五 9 +絕對 9 +經歷 9 +經理 9 +綜合 9 +總督 9 +老師 9 +而是 9 +聯繫 9 +職務 9 +肉 9 +自行 9 +芬蘭 9 +花園 9 +菲律賓 9 +處理 9 +觀眾 9 +解放 9 +評 9 +貢獻 9 +資格 9 +進士 9 +運作 9 +遭 9 +那麼 9 +酒店 9 +金屬 9 +階段 9 +隧道 9 +隨後 9 +集中 9 +電話 9 +青年 9 +頻道 9 +顏色 9 +高等 9 +-- 8 +the 8 +上升 8 +下來 8 +中環 8 +主題 8 +亞洲 8 +人工 8 +以外 8 +佔地 8 +何 8 +依據 8 +俄國 8 +保守 8 +信息 8 +傅 8 +價格 8 +儒 8 +光棍 8 +內地 8 +內戰 8 +公分 8 +分鐘 8 +利益 8 +劇情 8 +劑 8 +加 8 +加拿大 8 +十一 8 +即使 8 +原來 8 +口 8 +古老 8 +同樣 8 +命令 8 +喜歡 8 +因素 8 +圖 8 +圖案 8 +地鐵 8 +報道 8 +增長 8 +大多 8 +大小 8 +大道 8 +始 8 +官 8 +家人 8 +專門 8 +小型 8 +小時 8 +尚 8 +局長 8 +山脈 8 +山西 8 +工藝 8 +工資 8 +左右 8 +巨大 8 +平方千米 8 +幻想 8 +廣播 8 +廣東 8 +廳 8 +往往 8 +從此 8 +德 8 +意義 8 +意見 8 +或是 8 +房屋 8 +批 8 +按 8 +提升 8 +提高 8 +攝影 8 +政 8 +效果 8 +教授 8 +文章 8 +方案 8 +旅遊 8 +早 8 +明確 8 +書記 8 +書院 8 +曹 8 +材料 8 +武 8 +武漢 8 +比如 8 +污染 8 +注意 8 +測試 8 +澳大利亞 8 +澳洲 8 +瀋陽 8 +燃料 8 +爵士 8 +父母 8 +現存 8 +男子 8 +病逝 8 +發明 8 +白色 8 +的話 8 +皆 8 +監督 8 +真正 8 +知 8 +知名 8 +秘書 8 +秦 8 +程度 8 +立方米 8 +符號 8 +等等 8 +粒子 8 +紅 8 +維也納 8 +編碼 8 +編輯 8 +署 8 +羽毛 8 +翻譯 8 +考慮 8 +聚集 8 +股份 8 +臨時 8 +良好 8 +芝加哥 8 +葉 8 +表達 8 +複雜 8 +襲擊 8 +西南 8 +解釋 8 +討論 8 +許 8 +詞 8 +變 8 +貓 8 +賽季 8 +贏得 8 +軟體 8 +轉 8 +通 8 +過去 8 +邨 8 +部長 8 +鄧 8 +重 8 +重大 8 +銀河 8 +鏡 8 +長度 8 +隨即 8 +雄性 8 +靠 8 +餐廳 8 +首府 8 +高中 8 +a 7 +a999 7 +~ 7 +下午 7 +不可 7 +主人 7 +之下 7 +事實 7 +事情 7 +二世 7 +二戰 7 +交換 7 +任命 7 +伊麗莎白 7 +住宅 7 +佛教 7 +保險 7 +倍 7 +傳說 7 +入侵 7 +公共 7 +公務 7 +公爵 7 +共產 7 +典型 7 +分析 7 +列 7 +前身 7 +創造 7 +匈奴 7 +北角 7 +十字 7 +卡 7 +原著 7 +右 7 +各地 7 +名稱 7 +名義 7 +吳 7 +吸引 7 +命 7 +員工 7 +哲學 7 +唐朝 7 +喬治 7 +回 7 +在此 7 +城堡 7 +城門 7 +基金 7 +場所 7 +大使 7 +天星 7 +天然 7 +失敗 7 +套 7 +奴隸 7 +學術 7 +安排 7 +宋 7 +實現 7 +實行 7 +專科 7 +尋找 7 +尋求 7 +小組 7 +島嶼 7 +左 7 +差異 7 +市區 7 +市民 7 +常常 7 +幣 7 +平原 7 +年級 7 +年輕 7 +店 7 +建國 7 +弗吉尼亞 7 +強調 7 +形象 7 +很少 7 +想像 7 +意識 7 +愛爾蘭 7 +戲 7 +找到 7 +持有 7 +指導 7 +探測 7 +支援 7 +收斂 7 +放 7 +教師 7 +施 7 +旗下 7 +明 7 +最多 7 +本地 7 +某些 7 +校園 7 +核糖 7 +條約 7 +榮譽 7 +樂隊 7 +檢查 7 +款 7 +母音 7 +氏 7 +氣候 7 +水庫 7 +沒 7 +海岸 7 +海洋 7 +混合 7 +清真 7 +港島 7 +湖南 7 +湯姆 7 +滿 7 +激烈 7 +無綫 7 +然後 7 +熊貓 7 +熱 7 +特有 7 +班 7 +現有 7 +現象 7 +球員 7 +球季 7 +理工 7 +甘肅 7 +生態 7 +申請 7 +真實 7 +石油 7 +礁 7 +秘密 7 +移動 7 +空軍 7 +突破 7 +策略 7 +簽訂 7 +約翰 7 +結合 7 +維新 7 +綱 7 +網 7 +翌年 7 +臺灣 7 +興建 7 +興趣 7 +舉辦 7 +航班 7 +航線 7 +艦 7 +茶 7 +著作 7 +衛生 7 +表演 7 +表面 7 +裔 7 +西方 7 +規劃 7 +覺得 7 +觀測 7 +觀點 7 +計算 7 +訪問 7 +設施 7 +評論 7 +調整 7 +講述 7 +議院 7 +讀 7 +貴族 7 +貿易 7 +較小 7 +較為 7 +輛 7 +轟炸 7 +迅速 7 +近年 7 +連接 7 +道德 7 +達成 7 +適合 7 +選出 7 +邏輯 7 +醫學 7 +重點 7 +錄製 7 +鏡頭 7 +長期 7 +長達 7 +關 7 +降低 7 +雖 7 +需求 7 +面對 7 +韓國 7 +領先 7 +領袖 7 +題材 7 +風暴 7 +食用 7 +駐守 7 +體現 7 +體系 7 +高級 7 +高達 7 +魔法 7 +魚 7 +999999 6 +999億 6 +999多 6 +jr 6 +丈夫 6 +上市 6 +上映 6 +乘 6 +事物 6 +二十 6 +亦是 6 +享受 6 +亮 6 +代理 6 +任務 6 +但丁 6 +住 6 +作出 6 +來源 6 +依然 6 +依靠 6 +促進 6 +信號 6 +個體 6 +做法 6 +側 6 +傳 6 +優勢 6 +元朗 6 +全家 6 +公民 6 +公眾 6 +兼 6 +出土 6 +判決 6 +剛 6 +劃分 6 +加工 6 +助理 6 +努力 6 +動力 6 +十八 6 +協議 6 +卡爾 6 +原始 6 +反射 6 +取消 6 +口號 6 +司 6 +司法 6 +含有 6 +吸收 6 +呂 6 +呼吸 6 +咖啡 6 +商品 6 +商店 6 +嘗試 6 +四川 6 +困難 6 +國歌 6 +地產 6 +基 6 +基因 6 +壓力 6 +外國 6 +多樣 6 +大大 6 +大獎 6 +大眾 6 +太空 6 +夫人 6 +奧運 6 +她們 6 +好友 6 +如同 6 +始建 6 +嬴 6 +季節 6 +官方 6 +定居 6 +定義 6 +客運 6 +宣佈 6 +宮 6 +家中 6 +密碼 6 +封 6 +對應 6 +對抗 6 +對象 6 +導演 6 +展覽 6 +島上 6 +師範 6 +席 6 +平等 6 +平面 6 +底 6 +廣告 6 +延伸 6 +強度 6 +形容 6 +形態 6 +形狀 6 +影片 6 +彼此 6 +徒 6 +情感 6 +意 6 +意味 6 +愛 6 +感 6 +懷孕 6 +戀 6 +成熟 6 +成績 6 +成長 6 +手法 6 +打算 6 +批准 6 +投票 6 +授予 6 +提名 6 +搖滾 6 +搜索 6 +操作 6 +擴展 6 +改編 6 +效力 6 +敘利亞 6 +教導 6 +新城 6 +方向 6 +方形 6 +日報 6 +日耳曼 6 +時任 6 +時常 6 +普魯士 6 +更名 6 +最近 6 +朝廷 6 +杯 6 +校區 6 +校長 6 +楚 6 +樹 6 +歌曲 6 +止 6 +死後 6 +民眾 6 +池 6 +河道 6 +流行 6 +海盜 6 +消費 6 +深入 6 +深圳 6 +滅亡 6 +火 6 +無論 6 +版權 6 +牙齒 6 +王朝 6 +玻璃 6 +生存 6 +男友 6 +町 6 +畫 6 +畫家 6 +病毒 6 +發出 6 +發起 6 +發達 6 +短 6 +碑 6 +確認 6 +神奇 6 +神經 6 +禁止 6 +私人 6 +秦國 6 +穆斯林 6 +立刻 6 +立場 6 +童年 6 +端 6 +第七 6 +籃球 6 +米蘭 6 +經典 6 +經驗 6 +緬甸 6 +繪畫 6 +缺乏 6 +羅 6 +美麗 6 +習俗 6 +翡翠 6 +職 6 +能量 6 +色彩 6 +蔣 6 +蕭 6 +藉由 6 +虛擬 6 +血統 6 +行 6 +行走 6 +表明 6 +袁 6 +製成 6 +覆蓋 6 +規則 6 +設置 6 +試驗 6 +詩 6 +詩人 6 +詩歌 6 +該片 6 +說服 6 +說法 6 +論 6 +諮詢 6 +證明 6 +豐富 6 +走 6 +超人 6 +越來越 6 +跑道 6 +路易斯 6 +車展 6 +輿論 6 +近代 6 +返回 6 +退役 6 +通往 6 +通訊 6 +造 6 +進步 6 +過來 6 +選區 6 +遺傳 6 +邀請 6 +邊緣 6 +邱 6 +酒精 6 +醫生 6 +醫療 6 +金融 6 +銷售 6 +開展 6 +開放 6 +阻止 6 +陷入 6 +隊員 6 +階級 6 +隨機 6 +雕刻 6 +離 6 +雲南 6 +電池 6 +非洲 6 +須 6 +顧問 6 +首先 6 +騎兵 6 +黎 6 +9,999,999 5 +99.9萬 5 +999,999 5 +99億 5 +9千億 5 +『 5 +上述 5 +不僅 5 +不好 5 +中立 5 +中間 5 +主流 5 +事故 5 +亞歷山大 5 +亞馬遜 5 +人均 5 +今天 5 +今日 5 +介入 5 +以北 5 +任期 5 +佔據 5 +作家 5 +依舊 5 +侵略 5 +保存 5 +信 5 +信任 5 +信奉 5 +信託 5 +修正 5 +停止 5 +傑出 5 +傳承 5 +傷害 5 +像是 5 +儀式 5 +先 5 +免費 5 +入 5 +公交 5 +公會 5 +兵 5 +其它 5 +其餘 5 +冷卻 5 +分配 5 +分類 5 +列入 5 +別墅 5 +刺激 5 +創建 5 +加熱 5 +加盟 5 +動作 5 +勞工 5 +化合 5 +北海 5 +十二 5 +千 5 +升級 5 +南北 5 +南極 5 +印第安那 5 +參謀 5 +參議 5 +受傷 5 +叫做 5 +史 5 +司馬 5 +各個 5 +合 5 +合法 5 +合理 5 +同盟 5 +名單 5 +否認 5 +呈 5 +呈現 5 +周圍 5 +品牌 5 +哈定 5 +啟超 5 +善 5 +喇嘛 5 +固定 5 +固體 5 +圍 5 +圖像 5 +土耳其 5 +在內 5 +地圖 5 +城區 5 +執政 5 +培養 5 +堅持 5 +堡 5 +場地 5 +壁畫 5 +壘 5 +外科 5 +大氣 5 +大西 5 +如下 5 +如今 5 +妻 5 +始終 5 +孔 5 +學名 5 +學會 5 +學科 5 +宇宙 5 +安裝 5 +官吏 5 +客戶 5 +客體 5 +宮廷 5 +家長 5 +容納 5 +宿舍 5 +察覺 5 +寫作 5 +專利 5 +專家 5 +對外 5 +對此 5 +少數 5 +展出 5 +展開 5 +岸 5 +工 5 +工具 5 +巴西 5 +市政 5 +席位 5 +年度 5 +底部 5 +廈門 5 +廖 5 +廣 5 +廣西 5 +建成 5 +引發 5 +弟弟 5 +得知 5 +微博 5 +心 5 +意思 5 +愛情 5 +感情 5 +感覺 5 +慈善 5 +態度 5 +慶祝 5 +成年 5 +成本 5 +成都 5 +戰國 5 +戰後 5 +房間 5 +手中 5 +手段 5 +托勒密 5 +找 5 +技能 5 +抗議 5 +抵抗 5 +抵達 5 +拜占庭 5 +持續 5 +指定 5 +指示 5 +掌握 5 +排名 5 +接管 5 +推進 5 +措施 5 +提到 5 +換 5 +撤銷 5 +收入 5 +收藏 5 +政務 5 +故宮 5 +教皇 5 +教習 5 +敵人 5 +文忠 5 +文獻 5 +斯 5 +新型 5 +新華 5 +新鮮 5 +方便 5 +方言 5 +施工 5 +旅行 5 +日期 5 +早年 5 +明治 5 +更加 5 +書中 5 +有的 5 +朝 5 +本片 5 +杜 5 +東海 5 +東西 5 +架構 5 +某種 5 +查爾斯 5 +查理 5 +柯林頓 5 +棉花 5 +棒球 5 +極 5 +榜 5 +構成 5 +樓梯 5 +機制 5 +機器 5 +次年 5 +欣賞 5 +歡迎 5 +正常 5 +正確 5 +武裝 5 +歸 5 +殺害 5 +每天 5 +民 5 +民兵 5 +氣體 5 +水果 5 +水系 5 +汞 5 +江西 5 +決策 5 +河北 5 +河南 5 +波音 5 +泥塑 5 +泰安 5 +泳兒 5 +洛桑 5 +洪 5 +海峽 5 +海底 5 +消息 5 +游擊 5 +湖北 5 +溫 5 +溫度 5 +溫泉 5 +滅絕 5 +演化 5 +演奏 5 +漢朝 5 +潘 5 +澤東 5 +澳 5 +濃度 5 +炎 5 +無關 5 +牌 5 +物品 5 +物業 5 +物體 5 +狗 5 +狩獵 5 +王子 5 +珊瑚 5 +現場 5 +現實 5 +甘 5 +生下 5 +生涯 5 +用作 5 +發送 5 +百 5 +直徑 5 +直至 5 +相 5 +真理 5 +眼 5 +督 5 +祖先 5 +神秘 5 +神聖 5 +秋 5 +移居 5 +程 5 +程序 5 +種植 5 +種類 5 +稱作 5 +空氣 5 +穿 5 +突變 5 +競賽 5 +符合 5 +第六 5 +簡單 5 +粵 5 +紅軍 5 +紐西蘭 5 +級別 5 +素 5 +細節 5 +組合 5 +結局 5 +編號 5 +練習 5 +總署 5 +繞 5 +美洲 5 +群島 5 +群眾 5 +耕地 5 +聯絡 5 +聲明 5 +肯定 5 +臺 5 +興奮 5 +興起 5 +般 5 +船上 5 +艘 5 +花 5 +華視 5 +落後 5 +藝人 5 +藥物 5 +蘇格蘭 5 +虎丘 5 +虛構 5 +融合 5 +血壓 5 +行業 5 +裝甲 5 +裝置 5 +裡面 5 +西遊 5 +觀 5 +解散 5 +設備 5 +診斷 5 +該地 5 +該屬 5 +認可 5 +認知 5 +認識 5 +誕生 5 +請 5 +象徵 5 +貝多芬 5 +財產 5 +貨車 5 +質量 5 +赤道 5 +赴 5 +超級 5 +越南 5 +趙國 5 +路易 5 +身亡 5 +軍團 5 +輪 5 +轉移 5 +轉變 5 +辭去 5 +辭職 5 +退出 5 +通車 5 +通道 5 +連 5 +連任 5 +連續 5 +進而 5 +進軍 5 +遠 5 +適當 5 +遭遇 5 +那裡 5 +邦 5 +郗 5 +郵政 5 +鄉鎮 5 +鄰近 5 +醒亞 5 +醫師 5 +鎊 5 +鎮壓 5 +鐵 5 +鑒 5 +長大 5 +長官 5 +長沙 5 +開設 5 +防禦 5 +陝西 5 +院長 5 +陸 5 +階層 5 +障礙 5 +隸屬 5 +難 5 +電梯 5 +電車 5 +青海 5 +預測 5 +預算 5 +預防 5 +領土 5 +頻率 5 +食品 5 +飲料 5 +飾 5 +首相 5 +馬來西亞 5 +馬達 5 +馮 5 +騎士 5 +體積 5 +體色 5 +黑人 5 +龐大 5 +9-9 4 +9.99億 4 +9.9億 4 +9.9萬 4 +b 4 +casey 4 +county 4 +google 4 +john 4 +m9 4 +nba 4 +of 4 +to 4 +you 4 +』 4 +一世 4 +一半 4 +一旦 4 +三世 4 +上演 4 +上訴 4 +下令 4 +下頜 4 +不及 4 +不得 4 +不應 4 +不等 4 +不足 4 +世凱 4 +中東 4 +中止 4 +中華龍鳥 4 +中視 4 +丹羽 4 +主演 4 +乃 4 +久 4 +之上 4 +乘坐 4 +乘客 4 +乾燥 4 +乾隆 4 +了解 4 +予 4 +事變 4 +于 4 +五世 4 +亞軍 4 +交給 4 +交配 4 +交響 4 +亦為 4 +享年 4 +人事 4 +代言 4 +以西 4 +任職 4 +企圖 4 +伊拉克 4 +伺服 4 +供應 4 +依法 4 +侵蝕 4 +保加利亞 4 +保障 4 +信徒 4 +修復 4 +倫理 4 +做出 4 +停留 4 +價 4 +優惠 4 +優秀 4 +兄弟 4 +充電 4 +先進 4 +克拉克 4 +入口 4 +入選 4 +全面 4 +公 4 +公安 4 +公式 4 +共振 4 +其間 4 +具體 4 +冬天 4 +出場 4 +出戰 4 +出發 4 +出租 4 +出色 4 +刀 4 +分佈 4 +分成 4 +分期 4 +列表 4 +則天 4 +則為 4 +前期 4 +前線 4 +前進 4 +劇集 4 +劍 4 +加州 4 +加強 4 +勝利 4 +包 4 +包裝 4 +匈牙利 4 +區劃 4 +十五 4 +協定 4 +協調 4 +南側 4 +南延 4 +印第安 4 +危機 4 +原有 4 +原理 4 +參考 4 +參賽 4 +古物 4 +句 4 +只要 4 +各國 4 +各界 4 +合成 4 +合眾 4 +合金 4 +吉 4 +吉他 4 +同事 4 +同治 4 +名將 4 +名詞 4 +呎 4 +呢 4 +周年 4 +命運 4 +哈爾濱 4 +哥倫比亞 4 +商人 4 +啟用 4 +喬治亞 4 +單車 4 +嘲諷 4 +回來 4 +國防 4 +圓形 4 +地底 4 +地形 4 +地面 4 +坊 4 +基辛格 4 +堅決 4 +墓 4 +夏 4 +外星 4 +夜 4 +夢 4 +大同 4 +大帝 4 +大臣 4 +天皇 4 +夫婦 4 +失望 4 +妹妹 4 +姐姐 4 +姓氏 4 +委任 4 +婚姻 4 +婦女 4 +媽媽 4 +學堂 4 +官員 4 +定律 4 +宣稱 4 +實業 4 +實體 4 +寶貝 4 +小學 4 +少女 4 +尼龍 4 +局部 4 +展示 4 +屯門 4 +山區 4 +山頂 4 +岩 4 +島式 4 +島津 4 +嶺 4 +巡迴 4 +帶給 4 +常用 4 +幅度 4 +幫 4 +平方英里 4 +年齡 4 +幽默 4 +度假 4 +庫 4 +廚房 4 +廢除 4 +廷 4 +影像 4 +影業 4 +往 4 +很快 4 +很難 4 +後者 4 +得分 4 +得名 4 +得寵 4 +循環 4 +微 4 +徵召 4 +志願 4 +快 4 +怎麼 4 +性別 4 +性質 4 +恐龍 4 +患者 4 +情報 4 +情形 4 +情節 4 +情緒 4 +慕尼黑 4 +應該 4 +戀愛 4 +成人 4 +成分 4 +戰敗 4 +戰死 4 +扮演 4 +批判 4 +技巧 4 +抒情 4 +拓展 4 +招募 4 +指數 4 +按照 4 +挪威 4 +排列 4 +排水 4 +排行 4 +接收 4 +接替 4 +推薦 4 +推行 4 +揚州 4 +擔心 4 +擴大 4 +擴建 4 +擴張 4 +收到 4 +收錄 4 +改造 4 +攻克 4 +敖 4 +教區 4 +教宗 4 +教練 4 +整理 4 +數千 4 +數字 4 +文泰 4 +新疆 4 +新竹 4 +旅客 4 +既 4 +日常 4 +昆明 4 +明基 4 +星球 4 +星等 4 +春秋 4 +時段 4 +晉國 4 +晚間 4 +暗示 4 +暴力 4 +更換 4 +曼德拉 4 +最低 4 +最好 4 +最長 4 +有用 4 +服裝 4 +望遠 4 +木材 4 +本作 4 +本土 4 +本科 4 +本線 4 +本魚 4 +東區 4 +某 4 +校舍 4 +格 4 +案件 4 +楚國 4 +樂 4 +樂器 4 +標本 4 +樞紐 4 +模仿 4 +橄欖 4 +檔 4 +檢測 4 +欲 4 +歌 4 +正月 4 +此前 4 +此次 4 +步兵 4 +武術 4 +歷任 4 +死神 4 +殺死 4 +毀 4 +母 4 +每秒 4 +比例 4 +毫克 4 +毫米 4 +水深 4 +永江 4 +污泥 4 +沈 4 +沉澱 4 +沙灘 4 +河川 4 +油價 4 +治 4 +法案 4 +法蘭克 4 +法規 4 +波希米亞 4 +波斯 4 +注入 4 +洛杉磯 4 +洛陽 4 +流經 4 +浦 4 +海域 4 +海外 4 +海德堡 4 +海戰 4 +海水 4 +海灣 4 +海面 4 +液態 4 +液體 4 +深 4 +測量 4 +港鐵 4 +湯 4 +源 4 +滬 4 +滿貫 4 +潮濕 4 +濟南 4 +灣仔 4 +火災 4 +炸藥 4 +烏克蘭 4 +無意 4 +無線 4 +無錫 4 +照片 4 +營 4 +營業 4 +父 4 +爽 4 +牛奶 4 +牧場 4 +特遣 4 +特點 4 +犯罪 4 +狀元 4 +狂 4 +狙擊 4 +獎勵 4 +王后 4 +珍珠 4 +現今 4 +現金 4 +球會 4 +理事 4 +理想 4 +琉球 4 +瑪麗 4 +瓷器 4 +甘珠爾 4 +生化 4 +生意 4 +產地 4 +產量 4 +留 4 +畝 4 +當天 4 +當年 4 +當日 4 +當然 4 +疫苗 4 +癌症 4 +發育 4 +發言 4 +發酵 4 +皮膚 4 +監獄 4 +直 4 +直升 4 +直隸 4 +相比 4 +相近 4 +省份 4 +省委 4 +省級 4 +真相 4 +督察 4 +矩陣 4 +短暫 4 +短篇 4 +研發 4 +社團 4 +神廟 4 +神達 4 +票房 4 +租界 4 +種族 4 +稱號 4 +空調 4 +突然 4 +立即 4 +童 4 +競爭 4 +等級 4 +節日 4 +簽約 4 +粒 4 +精確 4 +紋理 4 +納粹 4 +純 4 +終止 4 +終結 4 +維吾爾 4 +網球 4 +緊密 4 +總量 4 +總長 4 +繼 4 +繼任 4 +罕見 4 +罪名 4 +置 4 +羅伯特 4 +羅馬尼亞 4 +義務 4 +習慣 4 +老闆 4 +考察 4 +考試 4 +聖 4 +聖母 4 +聲稱 4 +聲譽 4 +背景 4 +胡佛 4 +自動 4 +船隻 4 +艙 4 +艱難 4 +苦艾 4 +草本 4 +荷蘭 4 +莊 4 +莊園 4 +莫斯科 4 +華航 4 +落成 4 +著重 4 +董 4 +蒙扎 4 +蓉蓉 4 +薩摩 4 +蘇家 4 +蘋果 4 +蛋白 4 +蜘蛛 4 +血栓 4 +行省 4 +術語 4 +衛星 4 +製 4 +西側 4 +西曼 4 +親 4 +親王 4 +評價 4 +評定 4 +詞語 4 +試 4 +該劇 4 +該區 4 +詹姆斯 4 +誰 4 +課程 4 +談話 4 +請求 4 +論文 4 +識字 4 +警署 4 +議長 4 +讀者 4 +負 4 +財富 4 +財政 4 +貨幣 4 +貨物 4 +貨運 4 +費 4 +賀 4 +資本 4 +資深 4 +資金 4 +賈 4 +質 4 +購買 4 +贊助 4 +起義 4 +足 4 +身上 4 +身分 4 +躲避 4 +車序 4 +軍人 4 +軍力 4 +軍官 4 +軍閥 4 +較多 4 +較少 4 +較高 4 +輸入 4 +輻射 4 +輻鰭魚 4 +轄下 4 +轉換 4 +辦事 4 +辦法 4 +辦理 4 +農民 4 +逃往 4 +這麼 4 +週期 4 +進口 4 +進球 4 +進程 4 +遂 4 +遊行 4 +過度 4 +過枝 4 +遷移 4 +遼寧 4 +邊境 4 +邵 4 +部落 4 +郵票 4 +重視 4 +野生 4 +量子 4 +金字 4 +針對 4 +銅鑼 4 +鋼琴 4 +錯誤 4 +鏡片 4 +鏡面 4 +長子 4 +長江 4 +門診 4 +開 4 +開幕 4 +開闢 4 +關心 4 +防守 4 +阿拉伯 4 +院校 4 +陽光 4 +隊伍 4 +階 4 +隔離 4 +雕塑 4 +雨水 4 +電 4 +電力 4 +電台 4 +電磁 4 +電訊 4 +靜態 4 +靜脈 4 +非法 4 +靠近 4 +音 4 +順 4 +順位 4 +預期 4 +頭 4 +題 4 +願意 4 +風險 4 +颱風 4 +飛 4 +飼養 4 +餐 4 +餘下 4 +首領 4 +體內 4 +體長 4 +高架 4 +高溫 4 +鬥爭 4 +鳥類 4 +黃埔 4 +黑 4 +黑色 4 +黨籍 4 +鼓勵 4 +! 3 +9.9% 3 +9.999 3 +9999萬 3 +99多 3 +99餘 3 +center 3 +close 3 +game 3 +gdp 3 +h9n9 3 +iii 3 +james 3 +mappy 3 +new 3 +psp 3 +°c 3 +─ 3 +・ 3 +一同 3 +丁 3 +三十 3 +三江 3 +上游 3 +上環 3 +上表 3 +上課 3 +上面 3 +下列 3 +下台 3 +下場 3 +下級 3 +不但 3 +不想 3 +不敵 3 +不明 3 +不遠 3 +不韋 3 +世 3 +世宗 3 +丟失 3 +中古 3 +中子 3 +中將 3 +中期 3 +中西 3 +中轉 3 +中風 3 +丹佛 3 +丹尼士 3 +主任 3 +主動 3 +主唱 3 +主機 3 +主編 3 +主辦 3 +主體 3 +之內 3 +之時 3 +也好 3 +互聯 3 +五角 3 +些 3 +亞當 3 +亞目 3 +亞視 3 +交往 3 +京都 3 +亮度 3 +人性 3 +人次 3 +人生 3 +人身 3 +他人 3 +付出 3 +以往 3 +以為 3 +以致 3 +任內 3 +任教 3 +份子 3 +企鵝 3 +伊恩 3 +伊賀 3 +休息 3 +估計 3 +伸出 3 +似 3 +伽利略 3 +住戶 3 +住院 3 +佔有 3 +佛 3 +佛像 3 +佛學 3 +佛羅倫薩 3 +作霖 3 +併 3 +來自 3 +例子 3 +供奉 3 +供給 3 +依 3 +依賴 3 +俄亥俄 3 +俘虜 3 +保安 3 +保育 3 +保證 3 +信德 3 +修建 3 +修道 3 +個別 3 +個性 3 +倖存 3 +候選 3 +借用 3 +倪 3 +值 3 +值得 3 +偉大 3 +偏 3 +停 3 +停車 3 +備受 3 +傳奇 3 +傳教 3 +傳染 3 +傾向 3 +優異 3 +允許 3 +元洪 3 +光源 3 +光緒 3 +克里米亞 3 +兒女 3 +內陸 3 +全市 3 +全縣 3 +全體 3 +公國 3 +公尺 3 +公轉 3 +六十 3 +共計 3 +兵力 3 +兼任 3 +冊封 3 +冷 3 +凱撒 3 +出使 3 +出口 3 +出獄 3 +函數 3 +分散 3 +分行 3 +分裂 3 +分解 3 +切斷 3 +刊物 3 +列為 3 +利 3 +制定 3 +前後 3 +前鋒 3 +前面 3 +剛好 3 +創意 3 +劇團 3 +劇本 3 +劇目 3 +劇院 3 +劍橋 3 +力學 3 +加利福尼亞 3 +勞倫斯 3 +匈 3 +化工 3 +北冕 3 +北洋 3 +區別 3 +十七 3 +十多 3 +升 3 +升任 3 +升格 3 +南昌 3 +南海 3 +占 3 +印象 3 +即將 3 +卻是 3 +厘米 3 +原則 3 +原告 3 +原料 3 +友誼 3 +取 3 +受損 3 +叛亂 3 +口徑 3 +古城 3 +可惜 3 +台中 3 +史上 3 +各州 3 +各省 3 +同名 3 +同性 3 +同情 3 +名譽 3 +告訴 3 +周邊 3 +呼聲 3 +和也 3 +和約 3 +品種 3 +哥哥 3 +哲 3 +哺乳 3 +唱 3 +喜愛 3 +單一 3 +嘉賓 3 +器官 3 +噴泉 3 +嚴格 3 +四世 3 +回應 3 +回歸 3 +國泰 3 +國籍 3 +國軍 3 +圍繞 3 +園區 3 +土 3 +土壤 3 +在任 3 +在場 3 +地中 3 +地勢 3 +地獄 3 +地理 3 +坡 3 +報導 3 +場合 3 +塊 3 +塑造 3 +塘 3 +塞爾維亞 3 +填充 3 +填海 3 +填補 3 +境地 3 +墜毀 3 +士官 3 +壯 3 +壯觀 3 +夏天 3 +外來 3 +外界 3 +外部 3 +多達 3 +大夫 3 +大家 3 +大師 3 +大橋 3 +大權 3 +大致 3 +大賽 3 +大選 3 +天國 3 +天王 3 +天空 3 +太 3 +太小 3 +失業 3 +奇異 3 +奈米 3 +契約 3 +奧 3 +奪取 3 +女巫 3 +女王 3 +女神 3 +好評 3 +如何 3 +妃 3 +妨礙 3 +委派 3 +委託 3 +威力 3 +威尼斯 3 +威爾士 3 +威爾斯 3 +娃娃 3 +娶 3 +嫁給 3 +嫌疑 3 +嬌嬌 3 +子女 3 +孟席斯 3 +孫子 3 +學府 3 +宇 3 +安 3 +安德烈 3 +安徽 3 +安置 3 +宋朝 3 +完備 3 +完善 3 +完工 3 +完美 3 +宏 3 +宗 3 +官僚 3 +定 3 +宣傳 3 +宣告 3 +室內 3 +宰相 3 +家寶 3 +家裡 3 +家鄉 3 +富有 3 +富江 3 +寒冷 3 +實在 3 +實施 3 +封閉 3 +射入 3 +射擊 3 +專用 3 +尊 3 +對方 3 +對比 3 +導航 3 +小吃 3 +小堂 3 +小孩 3 +小平 3 +少 3 +尖 3 +就算 3 +尼山 3 +局面 3 +屈 3 +屋邨 3 +屠 3 +屯 3 +州長 3 +已婚 3 +已知 3 +巴哈伊 3 +巴斯 3 +布庫 3 +布袋 3 +師傅 3 +帶到 3 +帶走 3 +帶頭 3 +帽 3 +帽子 3 +平台 3 +平方呎 3 +平方英尺 3 +平民 3 +平衡 3 +年間 3 +幸福 3 +幹線 3 +幼 3 +幾何 3 +序列 3 +度過 3 +康 3 +庾 3 +延續 3 +延長 3 +建業 3 +弓毛 3 +引入 3 +引力 3 +引用 3 +弟子 3 +弱小 3 +強制 3 +強壯 3 +彈 3 +彈簧 3 +彭 3 +彰化 3 +影視 3 +往來 3 +往後 3 +征服 3 +待 3 +待遇 3 +很好 3 +很高 3 +後人 3 +後方 3 +後衛 3 +後面 3 +徑 3 +徒步 3 +復 3 +復工 3 +復辟 3 +徵收 3 +德克薩斯 3 +德川 3 +德意志 3 +德綱 3 +徹底 3 +心情 3 +必然 3 +必要 3 +忽略 3 +思潮 3 +怡和 3 +急速 3 +性格 3 +怪物 3 +怪獸 3 +恩來 3 +悠久 3 +情書 3 +想到 3 +想法 3 +愛上 3 +愛國 3 +愛達荷 3 +感應 3 +慢慢 3 +憑藉 3 +憤怒 3 +懷疑 3 +懸崖 3 +成份 3 +成千上萬 3 +成果 3 +戒毒 3 +截止 3 +戰俘 3 +戰時 3 +戰艦 3 +戲劇 3 +戶 3 +房地產 3 +房子 3 +手下 3 +扎維耶 3 +扭曲 3 +扶手 3 +承受 3 +承擔 3 +投手 3 +抗戰 3 +抵擋 3 +拆除 3 +拉丁 3 +拯救 3 +持 3 +指令 3 +挑戰 3 +挺 3 +捐助 3 +捕捉 3 +捷克 3 +授 3 +排出 3 +探討 3 +接任 3 +接唱 3 +控 3 +提議 3 +換乘 3 +損失 3 +損害 3 +搬到 3 +撞擊 3 +播映 3 +撰寫 3 +擔當 3 +據說 3 +擴充 3 +支付 3 +支撐 3 +支流 3 +收回 3 +收拾 3 +收購 3 +改制 3 +改善 3 +改稱 3 +改進 3 +攻打 3 +放射 3 +故障 3 +救 3 +敘述 3 +教養 3 +文人 3 +文件 3 +料理 3 +斯里蘭卡 3 +新增 3 +新建 3 +新教 3 +新村 3 +新羅 3 +旁遮普 3 +族群 3 +日內瓦 3 +日後 3 +日間 3 +旨 3 +明星 3 +明珠 3 +明納努 3 +昏迷 3 +易 3 +星光 3 +星際 3 +星雲 3 +映射 3 +昭和 3 +是否 3 +時機 3 +時空 3 +晚 3 +晚年 3 +晨興 3 +普選 3 +景德 3 +景點 3 +晶體 3 +暗 3 +暨 3 +暫時 3 +暴動 3 +更為 3 +書店 3 +曼聯 3 +替 3 +替代 3 +替換 3 +最佳 3 +最為 3 +會堂 3 +月氏 3 +月球 3 +有利 3 +有機 3 +有權 3 +有趣 3 +服役 3 +服用 3 +朝日 3 +期望 3 +木板 3 +本來 3 +村民 3 +杜蘭戈 3 +杰 3 +東側 3 +東港 3 +東面 3 +板塊 3 +柏立基 3 +某個 3 +栃木 3 +校名 3 +核電 3 +栽培 3 +栽種 3 +桃 3 +桃園 3 +桃浦 3 +梅 3 +梅妃 3 +梅莉迪絲 3 +條例 3 +極度 3 +極端 3 +概念 3 +概率 3 +榮 3 +榮聲 3 +槍 3 +槍手 3 +樂曲 3 +樂章 3 +樊 3 +模擬 3 +機率 3 +檢察 3 +檸檬 3 +權力 3 +權勢 3 +權益 3 +次子 3 +次日 3 +歌劇 3 +正義 3 +正選 3 +步槍 3 +步道 3 +死傷 3 +死去 3 +毀滅 3 +比起 3 +民進 3 +氣壓 3 +氣泡 3 +氧化 3 +氧氣 3 +水上 3 +水域 3 +水塔 3 +水族 3 +水溝 3 +水稻 3 +永遠 3 +求救 3 +江南 3 +江孜 3 +污水 3 +決議 3 +沒收 3 +沖 3 +沙烏地阿拉伯 3 +油脂 3 +沼澤 3 +沿著 3 +法人 3 +法學 3 +法官 3 +波動 3 +波士頓 3 +波長 3 +泰國 3 +洋房 3 +洋行 3 +洗浴 3 +活佛 3 +活力 3 +流動 3 +流感 3 +流量 3 +海上 3 +海珊 3 +消滅 3 +淋巴 3 +淘汰 3 +淡水 3 +清代 3 +渝 3 +港口 3 +湖水 3 +湯瑪斯 3 +準則 3 +溥儀 3 +溫帶 3 +溶解 3 +滉 3 +滑冰 3 +漂亮 3 +漢城 3 +漳州 3 +潛入 3 +火箭 3 +災難 3 +為期 3 +無數 3 +煙草 3 +照相 3 +煩惱 3 +熱庫 3 +熱能 3 +爬行 3 +爭 3 +爲 3 +牆壁 3 +牛 3 +牛津 3 +物資 3 +特化 3 +狹窄 3 +獎學 3 +獎項 3 +獲利 3 +獲取 3 +獵食 3 +獻給 3 +率領 3 +王室 3 +珠海 3 +班納蒂克 3 +現任 3 +球場 3 +理解 3 +瑞草 3 +環 3 +生前 3 +生成 3 +生殖 3 +產下 3 +用品 3 +用戶 3 +用法 3 +用途 3 +田 3 +男女 3 +男孩 3 +畢 3 +畫作 3 +異常 3 +當今 3 +當作 3 +當初 3 +疑問 3 +病故 3 +瘋狂 3 +登上 3 +登基 3 +登場 3 +登陸 3 +發揮 3 +發源 3 +白人 3 +百科 3 +皇 3 +皇室 3 +盟友 3 +盟旗 3 +監管 3 +目錄 3 +直系 3 +直線 3 +直選 3 +相反 3 +相機 3 +相遇 3 +真人 3 +真武 3 +眼睛 3 +睡蓮 3 +瞭解 3 +知情 3 +短尾貓 3 +短短 3 +破曉 3 +破產 3 +碎片 3 +碳 3 +碳化 3 +確保 3 +確立 3 +社群 3 +祖父 3 +神父 3 +票價 3 +福 3 +福島 3 +福斯 3 +禮 3 +禮儀 3 +禮拜 3 +秀 3 +科系 3 +科隆 3 +租借 3 +租賃 3 +種種 3 +積極 3 +窯瓷 3 +立陶宛 3 +章 3 +童話 3 +競選 3 +竹子 3 +第八 3 +第十 3 +第十三 3 +筆 3 +算 3 +管道 3 +箱 3 +節慶 3 +節省 3 +籍 3 +精通 3 +精選 3 +糧食 3 +紅磡 3 +紅色 3 +紛爭 3 +素貞 3 +紡織 3 +索馬利亞 3 +細小 3 +終點 3 +組建 3 +組裝 3 +結成 3 +維京 3 +維吉爾 3 +維基 3 +維多利亞 3 +編劇 3 +總共 3 +總數 3 +總結 3 +總體 3 +繪製 3 +繼位 3 +纖維 3 +缺席 3 +缺點 3 +置富 3 +羊肉 3 +美利堅 3 +翁 3 +老鼠 3 +考 3 +考古 3 +考驗 3 +而非 3 +耶穌 3 +聖誕 3 +聖靈 3 +聘請 3 +聚會 3 +聯手 3 +聯軍 3 +聲勢 3 +職位 3 +股價 3 +股票 3 +胡安 3 +膜 3 +自傳 3 +自我 3 +自稱 3 +自身 3 +自願 3 +至少 3 +致力 3 +致命 3 +臺南 3 +臼齒 3 +舞蹈 3 +航海 3 +航程 3 +船員 3 +艾女 3 +艾滋 3 +芭比 3 +英九 3 +范 3 +茶葉 3 +草食 3 +莽 3 +華格納 3 +菲利普斯 3 +萊姆 3 +萊茵 3 +著稱 3 +蒙大拿 3 +蒙特內哥羅 3 +蒸汽 3 +蓬勃 3 +薩魯曼 3 +藍 3 +藍色 3 +藤 3 +藥 3 +藩 3 +蘇州 3 +虎鯨 3 +蜀 3 +衍生 3 +衙門 3 +衛 3 +衛視 3 +衝擊 3 +衣服 3 +表 3 +裁判 3 +裏 3 +補給 3 +裝 3 +裝飾 3 +複合 3 +複製 3 +西安 3 +西洋 3 +西湖 3 +西納 3 +西門 3 +西關 3 +西面 3 +見到 3 +規格 3 +視頻 3 +親自 3 +計畫 3 +記憶 3 +評估 3 +評審 3 +該寺 3 +該書 3 +該校 3 +該站 3 +該鎮 3 +誠實 3 +誤認 3 +說明 3 +課室 3 +諷刺 3 +諸多 3 +謀殺 3 +謂 3 +謝 3 +證實 3 +識別 3 +護照 3 +譽 3 +讀書 3 +變形 3 +變數 3 +變體 3 +讚賞 3 +象 3 +貝爾 3 +貴妃 3 +貴州 3 +買 3 +買家 3 +費德勒 3 +費雪 3 +資助 3 +賓夕法尼亞 3 +賢 3 +賦予 3 +走廊 3 +起訴 3 +越 3 +足協 3 +距 3 +路徑 3 +跳 3 +踢 3 +身邊 3 +車體 3 +較大 3 +較長 3 +輔助 3 +輔導 3 +輔政 3 +輸出 3 +轄 3 +轄區 3 +轉乘 3 +轉到 3 +轉投 3 +轉讓 3 +農 3 +近期 3 +迫 3 +迫使 3 +追逐 3 +退休 3 +逃離 3 +逐步 3 +通信 3 +通用 3 +通行 3 +速食 3 +逢 3 +連環 3 +連線 3 +逮捕 3 +週 3 +逾 3 +遇見 3 +遊樂 3 +運營 3 +過渡 3 +道光 3 +達也 3 +違法 3 +遠航 3 +適應 3 +遷徙 3 +選 3 +選手 3 +選秀 3 +遺體 3 +邊界 3 +那裏 3 +邦聯 3 +都市 3 +鄉議 3 +配 3 +配樂 3 +配置 3 +酗酒 3 +酸 3 +釀酒 3 +釋放 3 +重傷 3 +重整 3 +金庫 3 +金庸 3 +金鐘 3 +銅 3 +鋼 3 +錄 3 +錄音 3 +鍵 3 +鎳 3 +鐵人 3 +長安 3 +長州 3 +長相 3 +長遠 3 +開播 3 +關節 3 +阿兒法 3 +阿拉斯加 3 +阿根廷 3 +阿爾卑斯 3 +附屬 3 +降 3 +降落 3 +陣營 3 +除外 3 +陵 3 +陸地 3 +陸續 3 +際 3 +隱藏 3 +隱語 3 +雅典 3 +雌雄 3 +雙立 3 +雜技 3 +難度 3 +雪梨 3 +雪莉 3 +雲 3 +零 3 +零售 3 +雷睦斯 3 +靈 3 +青島 3 +鞏固 3 +音頻 3 +頂層 3 +順利 3 +預先 3 +頭銜 3 +頻譜 3 +題寫 3 +額 3 +風 3 +食 3 +飲食 3 +飾演 3 +首任 3 +首演 3 +馬來 3 +馬來亞 3 +馬德里 3 +驅動 3 +驅逐 3 +體操 3 +高低槓 3 +高原 3 +高層 3 +高山 3 +高麗 3 +鳳山 3 +麥田 3 +黃金 3 +黑子 3 +黑斑 3 +黑洞 3 +黛比 3 +黨員 3 +龍 3 +龍馬 3 +$ 2 +' 2 +... 2 +...... 2 +9.9999999 2 +99%-99% 2 +99.9億 2 +999.9 2 +999.99999 2 +999.9999999999999 2 +999.99億 2 +9999.9 2 +9999/99 2 +9999多 2 +9999餘 2 +99:99 2 +99a 2 +99° 2 +9:99 2 +9d 2 +9億9千9百萬 2 +9百萬 2 +9萬 2 +aac 2 +abc 2 +ai 2 +aldridge 2 +and 2 +arts 2 +bbc 2 +before 2 +boy 2 +c 2 +dc-99 2 +de 2 +dj 2 +dna 2 +e 2 +e9 2 +e99 2 +europipe 2 +eve 2 +f-99a 2 +fc 2 +finn 2 +gcmg 2 +gravion 2 +hall 2 +ii 2 +ipod 2 +jason 2 +jean 2 +k 2 +karin 2 +km/h 2 +l 2 +la 2 +lee 2 +live 2 +m9999 2 +n999 2 +nasa 2 +nds 2 +net 2 +nicea 2 +orochi 2 +p 2 +phillips 2 +pvc 2 +rivers 2 +robert 2 +s 2 +silver 2 +station 2 +strait 2 +tvb 2 +u99 2 +ua 2 +v 2 +winston 2 +wyclef 2 +x 2 +xii 2 +‧ 2 +〈 2 +〉 2 +一中 2 +一共 2 +一千 2 +一向 2 +一度 2 +一手 2 +一提 2 +一貫 2 +一面 2 +七喜 2 +三棟屋 2 +三氯化金 2 +三藏 2 +上下車 2 +上任 2 +上佳 2 +上午 2 +上吊 2 +上將 2 +上層 2 +上方 2 +上校 2 +上街 2 +下去 2 +下層 2 +下屬 2 +下旬 2 +下水 2 +下海 2 +下游 2 +下野 2 +不一 2 +不停 2 +不再 2 +不列顛 2 +不受 2 +不夠 2 +不如 2 +不宜 2 +不已 2 +不幸 2 +不法 2 +不清 2 +不用 2 +不管 2 +不良 2 +不論 2 +不變 2 +不錯 2 +不需 2 +不願 2 +丐幫 2 +世俗 2 +世博 2 +世卿 2 +世襲 2 +世錦 2 +丘陵 2 +中區 2 +中午 2 +中天 2 +中巴 2 +中正 2 +中途 2 +中道 2 +中遠 2 +主上 2 +主力 2 +主因 2 +主場 2 +主權 2 +主管 2 +主線 2 +乘船 2 +乘車 2 +乙級 2 +九一八 2 +九州 2 +九巴 2 +也有 2 +也許 2 +乳酪 2 +事後 2 +二十六 2 +二甘醇 2 +互動 2 +五四 2 +五峰 2 +五百 2 +井 2 +亞冠 2 +亞利桑那 2 +亞得里亞 2 +交 2 +交互 2 +交到 2 +交匯 2 +交好 2 +交情 2 +交戰 2 +交手 2 +交趾 2 +人力 2 +人心 2 +人才 2 +人文 2 +人格 2 +人熙 2 +人群 2 +人間 2 +人魚 2 +仁慈 2 +仁記 2 +今年 2 +介乎 2 +介石 2 +仍舊 2 +付款 2 +仙 2 +仙劍 2 +仙女 2 +以南 2 +以東 2 +以至 2 +任城 2 +任天堂 2 +任意 2 +份額 2 +仿 2 +伊比利亞 2 +伏威 2 +休閒 2 +伯公 2 +伯爵 2 +伯靈頓 2 +伴隨 2 +似乎 2 +低地 2 +低廉 2 +低溫 2 +住房 2 +佐土原 2 +佐藤 2 +佛山 2 +佛朗明哥 2 +佛殿 2 +作好 2 +作業 2 +作物 2 +佩劍 2 +併入 2 +使命 2 +使者 2 +使館 2 +來訪 2 +例 2 +例外 2 +供暖 2 +供熱 2 +供職 2 +侵 2 +侵入 2 +侵犯 2 +便宜 2 +促使 2 +促成 2 +俗稱 2 +保有 2 +保級 2 +保羅 2 +保衛 2 +信心 2 +信義 2 +信長 2 +信雄 2 +修士 2 +修理 2 +修習 2 +修訂 2 +修鍊 2 +個案 2 +倒台 2 +倒掛 2 +候鳥 2 +倡導 2 +倫 2 +假如 2 +假期 2 +假髮 2 +偏差 2 +停戰 2 +停滯 2 +偶然 2 +偶爾 2 +偽造 2 +傑作 2 +備 2 +催化 2 +傳入 2 +傳到 2 +傳動 2 +傳媒 2 +傳導 2 +傳授 2 +傳聞 2 +傳言 2 +傳送 2 +傳達 2 +債務 2 +傷 2 +傾聽 2 +僅僅 2 +僱員 2 +儀錶 2 +儒家 2 +優先 2 +儲備 2 +元代 2 +元件 2 +元帥 2 +元年 2 +元洲 2 +元璋 2 +元甲 2 +元首 2 +充斥 2 +充當 2 +兆帕 2 +先知 2 +先行 2 +先驅 2 +光線 2 +光譜 2 +光軸 2 +克基拉 2 +克用 2 +克隆 2 +免職 2 +入伍 2 +入圍 2 +入學 2 +入獄 2 +入讀 2 +入門 2 +內務 2 +內外 2 +內心 2 +內流 2 +全新 2 +全日 2 +全校 2 +全權 2 +全能 2 +全身 2 +八一 2 +八百餘 2 +公學 2 +公寓 2 +公署 2 +公認 2 +兵營 2 +其父 2 +具備 2 +典禮 2 +再造 2 +冬季 2 +冰兄 2 +冰峰 2 +冰川 2 +冰雪 2 +凡 2 +凱特 2 +凱瑞 2 +出入 2 +出入口 2 +出家 2 +出席 2 +出演 2 +出產 2 +出賽 2 +出道 2 +分享 2 +分化 2 +分區 2 +分手 2 +分擔 2 +分歧 2 +分隊 2 +刊載 2 +列傳 2 +列出 2 +初學 2 +初年 2 +初稿 2 +初級 2 +初賽 2 +判斷 2 +判處 2 +別 2 +別列佐夫斯基 2 +利比亞 2 +利物浦 2 +利特維年科 2 +到來 2 +到底 2 +制止 2 +制裁 2 +制訂 2 +刺 2 +刺客 2 +刺死 2 +刻 2 +刻有 2 +削弱 2 +前任 2 +前來 2 +前妻 2 +前途 2 +剝奪 2 +剩下 2 +副本 2 +創 2 +創始 2 +創新 2 +創業 2 +劃入 2 +劃給 2 +劇烈 2 +劍術 2 +劍齒虎 2 +功 2 +功率 2 +加之 2 +加勒比 2 +加堆 2 +加重 2 +劣勢 2 +助戰 2 +勒格里 2 +勒沃 2 +動機 2 +動脈 2 +動車 2 +勝出 2 +勳 2 +勳章 2 +勳銜 2 +勾引 2 +包圍 2 +包廂 2 +包衣 2 +匕首 2 +化纖 2 +化身 2 +北宋 2 +北平 2 +北方 2 +北端 2 +北約 2 +北道 2 +北齊 2 +匯率 2 +區分 2 +十一世 2 +十三 2 +十六 2 +升學 2 +半山 2 +半球 2 +協商 2 +協奏 2 +協約 2 +南下 2 +南山 2 +南斯拉夫 2 +南遣 2 +南邊 2 +南陽 2 +南非 2 +南面 2 +南韓 2 +博弈 2 +博彩 2 +博恩 2 +占卜 2 +卡梅隆 2 +卡片 2 +印 2 +印加 2 +印尼 2 +印製 2 +即位 2 +即時 2 +即興 2 +卷 2 +卿 2 +卿雲 2 +厄運 2 +原名 2 +原址 2 +原聲 2 +去除 2 +參觀 2 +參選 2 +又是 2 +又稱 2 +及格 2 +友好 2 +反叛 2 +反抗 2 +反擊 2 +叔叔 2 +取決 2 +受審 2 +受益 2 +受體 2 +口中 2 +口述 2 +古巴 2 +古柯鹼 2 +古蹟 2 +召喚 2 +可汗 2 +史學 2 +史密斯 2 +史提夫 2 +史蒂芬 2 +右岸 2 +司機 2 +司長 2 +司鼓 2 +吃肉 2 +吃飯 2 +各式 2 +各式各樣 2 +各級 2 +各自 2 +各部 2 +合同 2 +合川 2 +合稱 2 +合葬 2 +吉布斯 2 +吉林 2 +吉里巴斯 2 +同人 2 +同居 2 +同體 2 +名人 2 +名利 2 +名古屋 2 +名縉 2 +名鎮 2 +向量 2 +君 2 +君王 2 +吞併 2 +否則 2 +否定 2 +告別 2 +告知 2 +告終 2 +周歲 2 +味 2 +呼叫 2 +呼籲 2 +和解 2 +和談 2 +咬金 2 +品行 2 +哈里發 2 +哥斯大黎加 2 +哥本哈根 2 +哥特 2 +哪裡 2 +售賣 2 +唯有 2 +唯美 2 +問 2 +啟動 2 +啟睿 2 +啟航 2 +啟蒙 2 +善化 2 +善意 2 +喉嚨 2 +喜劇 2 +喝 2 +喪生 2 +喬伊斯 2 +喬艾爾 2 +單元 2 +單曲 2 +喻 2 +嘉慶 2 +嘉玲 2 +器物 2 +噪音 2 +噴氣 2 +嚴密 2 +囚禁 2 +四分之一 2 +四十 2 +回國 2 +回想 2 +回憶 2 +回收 2 +國代 2 +國外 2 +國寶 2 +國徽 2 +國璋 2 +國語 2 +國鋒 2 +圍攻 2 +園藝 2 +圓頂 2 +圖樣 2 +圖畫 2 +團結 2 +團聚 2 +團長 2 +在位 2 +在來 2 +地外 2 +地帶 2 +坐診 2 +型態 2 +埃 2 +埃米莉 2 +城中 2 +城子 2 +域名 2 +執導 2 +執掌 2 +執教 2 +執法 2 +基底 2 +堂區 2 +堅 2 +堅固 2 +堅強 2 +報紙 2 +場場 2 +塞普勒斯 2 +境外 2 +墓地 2 +墓室 2 +增多 2 +增建 2 +增強 2 +增設 2 +墮胎 2 +壓倒 2 +壓強 2 +壓迫 2 +士 2 +壯大 2 +壯年 2 +夏伊 2 +夏季 2 +外傳 2 +外圍 2 +外在 2 +外援 2 +外觀 2 +外資 2 +多倫多 2 +多半 2 +多少 2 +夜晚 2 +夠 2 +夥伴 2 +大亂 2 +大佛 2 +大公 2 +大力 2 +大勝 2 +大半 2 +大堂 2 +大妃 2 +大將 2 +大屋 2 +大廳 2 +大批 2 +大敗 2 +大槍 2 +大火 2 +大碟 2 +大笨 2 +大街 2 +大衛 2 +大連 2 +大阪 2 +天地 2 +天子 2 +天師 2 +天敵 2 +天氣 2 +天衣 2 +天雷 2 +太古 2 +太多 2 +太大 2 +太子 2 +太守 2 +太祖 2 +夸脫 2 +奉天 2 +契合 2 +奢侈 2 +奧布賴恩 2 +奧斯曼 2 +奧朗則布 2 +奧林匹克 2 +奪冠 2 +女士 2 +女孩 2 +女皇 2 +妖精 2 +妖魔 2 +妥善 2 +姊妹 2 +始皇 2 +姐妹 2 +姐弟 2 +姑家 2 +姓 2 +姓名 2 +姚 2 +姜 2 +姿態 2 +威斯康辛 2 +威爾遜 2 +娘舅 2 +婆婆 2 +嫉妒 2 +子夜 2 +子珍 2 +孔子 2 +字元 2 +字型 2 +字體 2 +存有 2 +存活 2 +孟能 2 +季前 2 +季軍 2 +孤僻 2 +孤獨 2 +孵化 2 +學制 2 +學問 2 +學士 2 +學年 2 +學期 2 +學童 2 +學系 2 +學費 2 +宇一郎 2 +守衛 2 +安修 2 +安息 2 +安打 2 +安東尼 2 +安菲特裡忒 2 +安邑 2 +完 2 +完整 2 +宏觀 2 +宗室 2 +官職 2 +定下 2 +定名 2 +定型 2 +定期 2 +客 2 +客串 2 +客人 2 +客室 2 +客機 2 +客車 2 +宣戰 2 +害怕 2 +家久 2 +家境 2 +家屬 2 +家產 2 +家衛 2 +家貓 2 +寄宿 2 +密切 2 +密蘇里 2 +富 2 +富人 2 +富特 2 +實力 2 +實務 2 +實用 2 +實習 2 +審 2 +審判 2 +審查 2 +寫道 2 +寬廣 2 +寬頻 2 +寬鬆 2 +寶石 2 +寺廟 2 +寺院 2 +封神 2 +封面 2 +射殺 2 +專區 2 +專員 2 +專有 2 +專題 2 +尉 2 +尊嚴 2 +尊重 2 +尋常 2 +對峙 2 +對待 2 +對陣 2 +小兒 2 +小姐 2 +小心 2 +小桃 2 +小梅 2 +小鎮 2 +小閻 2 +小青 2 +就任 2 +就業 2 +尺 2 +尼克森 2 +尼羅 2 +尼西亞 2 +尼采 2 +尾部 2 +局限 2 +居 2 +居委 2 +居里 2 +屋大維 2 +屋苑 2 +展 2 +展館 2 +履仁 2 +屬名 2 +山丘 2 +山坡 2 +山海 2 +岩石 2 +岳母 2 +岳父 2 +崇拜 2 +崔西 2 +崖 2 +嵌 2 +嶺南 2 +嶽麓 2 +川 2 +工兵 2 +工商 2 +工農 2 +工黨 2 +左上 2 +左側 2 +巧眉 2 +巧言 2 +差距 2 +差點 2 +巴克特里亞 2 +巴勒斯坦 2 +巴哈歐拉 2 +巴格曼 2 +巴格達 2 +巴洛克 2 +巴爾幹 2 +巴納德 2 +巷 2 +市值 2 +市內 2 +市商 2 +市郊 2 +市長 2 +布卡 2 +布拉格 2 +布朗 2 +布爾薩 2 +布魯明頓 2 +希羅 2 +帕洛馬 2 +帛琉 2 +帶去 2 +帶有 2 +常務 2 +常年 2 +常德 2 +常春藤葉 2 +常規 2 +幫忙 2 +干擾 2 +干涉 2 +干預 2 +平 2 +平安 2 +平息 2 +平成 2 +平方尺 2 +平時 2 +平頂 2 +年初 2 +年紀 2 +年譜 2 +幼體 2 +床墊 2 +序數 2 +底層 2 +店鋪 2 +度母 2 +座堂 2 +庫夫 2 +庫容 2 +庭 2 +康乃爾 2 +康復 2 +廉租 2 +廠房 2 +廢 2 +廢墟 2 +廢止 2 +廣安 2 +廣義 2 +延任 2 +延遲 2 +建有 2 +建銘 2 +引種 2 +引退 2 +引進 2 +弗朗索瓦 2 +強風 2 +彈奏 2 +彈性 2 +彌迦 2 +彙集 2 +彩色 2 +影展 2 +往返 2 +征 2 +征戰 2 +很近 2 +後端 2 +後裔 2 +徒刑 2 +得票 2 +得道 2 +從小 2 +從而 2 +從軍 2 +御苑 2 +微山 2 +德州 2 +德瑞克 2 +德輔 2 +心中 2 +必 2 +志剛 2 +快樂 2 +忽必烈 2 +思念 2 +思明 2 +思科 2 +性交 2 +恆鳳 2 +恐慌 2 +恥辱 2 +恩 2 +恩寵 2 +恩賜 2 +悅強 2 +悲觀 2 +情意 2 +惠 2 +惠山 2 +愈 2 +愉景 2 +意志 2 +意願 2 +愛因斯坦 2 +愛德華 2 +愛惜 2 +感動 2 +感受 2 +感染 2 +慈幼 2 +慈鯛 2 +態 2 +慘敗 2 +慣例 2 +慶尚 2 +慶豐 2 +慾望 2 +憎恨 2 +憑 2 +應對 2 +懊惱 2 +懷俄明 2 +懷舊 2 +懸浮 2 +懸索 2 +成仙 2 +成傑 2 +成因 2 +成型 2 +成就 2 +成群 2 +成貓 2 +戰亂 2 +戰列 2 +戰場 2 +戰士 2 +戰線 2 +戰術 2 +戴 2 +戴麟趾 2 +房 2 +手冊 2 +手動 2 +手機 2 +手裡 2 +才能 2 +打工 2 +打敗 2 +打破 2 +打開 2 +托爾斯 2 +托爾斯泰 2 +扶植 2 +找出 2 +找回 2 +找尋 2 +承諾 2 +抄襲 2 +抓 2 +抓住 2 +投影 2 +投降 2 +抗擊 2 +抽取 2 +拆穿 2 +拆解 2 +拉斐爾 2 +拉格 2 +拔出 2 +拖 2 +拖延 2 +招商 2 +招股 2 +拷貝 2 +拼音 2 +拿 2 +拿到 2 +拿破崙 2 +拿走 2 +指引 2 +指控 2 +指涉 2 +按鍵 2 +挖角 2 +挽救 2 +捐贈 2 +捕 2 +捕獲 2 +捕食 2 +捷運 2 +掉 2 +排 2 +排放 2 +排氣 2 +排演 2 +掛架 2 +掠過 2 +掠食 2 +採訪 2 +接待 2 +接掌 2 +接種 2 +接駁 2 +控球 2 +推廣 2 +推翻 2 +推選 2 +描寫 2 +提倡 2 +提及 2 +提示 2 +插圖 2 +揚聲 2 +換入 2 +換股 2 +損傷 2 +損毀 2 +搞笑 2 +搭檔 2 +搶險 2 +摩根 2 +摩爾 2 +撤出 2 +撤軍 2 +播 2 +播客 2 +擅長 2 +擊 2 +擊退 2 +擒抱 2 +擔負 2 +據守 2 +擺脫 2 +擾動 2 +支出 2 +支柱 2 +收 2 +收復 2 +收發 2 +收穫 2 +收視 2 +收集 2 +改回 2 +改寫 2 +改建 2 +改版 2 +改良 2 +改裝 2 +攻佔 2 +攻陷 2 +放映 2 +放置 2 +政協 2 +政變 2 +政黨 2 +故意 2 +故此 2 +故鄉 2 +效忠 2 +效率 2 +敏 2 +敏感 2 +敗給 2 +教友 2 +教員 2 +教徒 2 +教派 2 +教科文 2 +整修 2 +整套 2 +整體 2 +敵對 2 +數位 2 +數十 2 +數理 2 +數目 2 +文元 2 +文官 2 +文帝 2 +文康 2 +文英 2 +文華 2 +文革 2 +斐濟 2 +斥資 2 +斯圖爾特 2 +斯大林 2 +斯氏星蟒 2 +斯洛維尼亞 2 +斯特勒謝尼 2 +斯理 2 +新宿 2 +新岩 2 +新曲 2 +新澤西 2 +新田 2 +新興 2 +斷裂 2 +方位 2 +方針 2 +施行 2 +旁 2 +旁邊 2 +旅鴿 2 +旋律 2 +日喀則 2 +日本龍 2 +日益 2 +日航 2 +日行 2 +早上 2 +旺山 2 +旺盛 2 +昆士蘭 2 +昌 2 +明帝 2 +易名 2 +昔日 2 +星形 2 +春天 2 +春日 2 +是否是 2 +時尚 2 +時速 2 +晉升 2 +晚上 2 +晚會 2 +普 2 +普及 2 +普陀 2 +景 2 +景帝 2 +景觀 2 +景象 2 +智慧 2 +暑假 2 +暗殺 2 +暢銷 2 +暫停 2 +暫緩 2 +暴露 2 +曝氣 2 +更好 2 +更改 2 +更深 2 +更高 2 +書信 2 +書寫 2 +書房 2 +書法 2 +最久 2 +最小 2 +最少 2 +最新 2 +最遊 2 +會員 2 +會場 2 +會社 2 +會談 2 +會長 2 +月刊 2 +有助 2 +有意 2 +有毒 2 +有罪 2 +服 2 +服從 2 +朔日 2 +朝代 2 +木星 2 +木管 2 +末年 2 +末期 2 +本區 2 +本屆 2 +本班 2 +本站 2 +本質 2 +本願 2 +朴 2 +村落 2 +村頭 2 +束縛 2 +杭 2 +東亞 2 +東吳 2 +東山 2 +東征 2 +東晉 2 +東正 2 +東視 2 +松潘 2 +松鼠猴 2 +林庄 2 +果實 2 +果汁 2 +架設 2 +柏油 2 +染色 2 +柔佛 2 +柔弱 2 +查 2 +查德 2 +柯 2 +柱 2 +柳 2 +柳江 2 +柴油 2 +柴灣 2 +校內 2 +校隊 2 +核能 2 +根本 2 +格式 2 +格林 2 +格林維爾 2 +格格 2 +格檔 2 +格里高利 2 +桃太洛斯 2 +桌面 2 +桑葚 2 +棕熊 2 +棣 2 +植被 2 +楊樹 2 +業者 2 +極大 2 +極性 2 +極高 2 +榮獲 2 +樁 2 +樂農 2 +標語 2 +標題 2 +樞機 2 +樟湖 2 +模具 2 +樣本 2 +樹木 2 +橙 2 +機動 2 +機員 2 +機槍 2 +橡膠 2 +橫山 2 +橫濱 2 +橫跨 2 +檢索 2 +檢討 2 +權威 2 +權貴 2 +次數 2 +次級 2 +次要 2 +次郎 2 +次長 2 +欺騙 2 +歌仔 2 +歌唱 2 +歌聲 2 +歌迷 2 +正是 2 +正直 2 +正統 2 +正面 2 +此人 2 +此案 2 +此物 2 +此種 2 +此線 2 +此舉 2 +此類 2 +步態 2 +武大 2 +武昌 2 +武松 2 +歧視 2 +歸類 2 +死靈 2 +殘存 2 +殘忍 2 +殘酷 2 +殯葬 2 +殺傷 2 +殺掉 2 +每位 2 +每周 2 +每層 2 +每日 2 +每次 2 +毒性 2 +毒殺 2 +毒藥 2 +毗鄰 2 +毛利 2 +民不聊生 2 +民調 2 +民都洛水牛 2 +氣 2 +氣田 2 +氧 2 +氫彈 2 +氯金酸 2 +水孔 2 +水手 2 +水準 2 +水溫 2 +水滸 2 +水質 2 +水道 2 +水餃 2 +永嘉 2 +永寧 2 +汗位 2 +汝霖 2 +江北 2 +江戶 2 +池尻 2 +決心 2 +決戰 2 +沃夫 2 +沖繩 2 +沙 2 +沙咀 2 +沙柏 2 +沙河 2 +沙龍 2 +河水 2 +泉州 2 +法蘭西 2 +法醫 2 +泡沫 2 +波塞摩斯 2 +注射 2 +注重 2 +泰坦 2 +洗 2 +洗手 2 +洛辛堡 2 +洞 2 +活 2 +活性 2 +派出 2 +派別 2 +派駐 2 +流傳 2 +流失 2 +流求 2 +流派 2 +流通 2 +浙東 2 +浩劫 2 +浮冰 2 +海南 2 +海涌 2 +海豹 2 +海邊 2 +海關 2 +消化 2 +消失 2 +涌 2 +涮 2 +淄博 2 +淮 2 +淮河 2 +深厚 2 +深得 2 +深愛 2 +深遠 2 +淹沒 2 +添加 2 +清晨 2 +清楚 2 +清華 2 +清鍾 2 +減輕 2 +游牧 2 +湖州 2 +湘 2 +湯興 2 +溝通 2 +溪流 2 +溫和 2 +溫州 2 +溫暖 2 +滄州 2 +滅口 2 +滙豐 2 +滬東 2 +滿足 2 +漁業 2 +漂流 2 +演說 2 +漢佛瑞 2 +漢口 2 +漸漸 2 +潔 2 +潭西 2 +潮州 2 +澤普 2 +澳底 2 +激光 2 +激戰 2 +激起 2 +濃縮 2 +濕原 2 +濕度 2 +濟寧 2 +濱松 2 +濱湖 2 +瀏覽 2 +灌木 2 +灘 2 +火藥 2 +灰狼 2 +灰色 2 +災害 2 +炮台 2 +為數 2 +烏孫 2 +無力 2 +無效 2 +無界 2 +無緣 2 +無辜 2 +無黨 2 +焦耳 2 +然 2 +煙熏 2 +照料 2 +照顧 2 +煮制 2 +熊 2 +熊隻 2 +熱比婭 2 +熱衷 2 +燃燒 2 +燒毀 2 +燒餅 2 +燕山 2 +爪 2 +爪獸 2 +爭取 2 +爭執 2 +爭辯 2 +爭霸 2 +父子 2 +爾後 2 +牆體 2 +片段 2 +牙買加 2 +牛仔 2 +牛肉 2 +牧師 2 +牧養 2 +物價 2 +特使 2 +特性 2 +特權 2 +特種 2 +犬隻 2 +犬齒 2 +犯 2 +狀 2 +狐狸 2 +猛烈 2 +猛虎 2 +猶他 2 +猶豫 2 +獅 2 +獎章 2 +獎金 2 +獨居 2 +獨自 2 +獵奇 2 +獵殺 2 +玄 2 +玄機 2 +率軍 2 +玉帶 2 +玉門 2 +王位 2 +王妃 2 +玩 2 +玩具 2 +珀西 2 +珍 2 +珍品 2 +現址 2 +現狀 2 +球迷 2 +理念 2 +琪 2 +琴 2 +琴行 2 +瑪利亞 2 +瑪納斯 2 +瑪莉 2 +瑪麗亞 2 +環島 2 +環形 2 +環球 2 +環礁 2 +瓊璘 2 +瓜分 2 +瓦爾那 2 +甚 2 +甚少 2 +甚麼 2 +甜甜 2 +生日 2 +生母 2 +生病 2 +產值 2 +產區 2 +產物 2 +用地 2 +用電 2 +由來 2 +由衷 2 +甲 2 +甲板 2 +甲醇 2 +申花 2 +男爵 2 +町村 2 +留存 2 +留學 2 +留意 2 +留香 2 +畜 2 +番 2 +畫上 2 +畫報 2 +異性 2 +當事 2 +當代 2 +當前 2 +當場 2 +當成 2 +疫情 2 +病人 2 +病理 2 +痕迹 2 +登記 2 +登輝 2 +發售 2 +發回 2 +發掘 2 +發覺 2 +發音 2 +白紙 2 +白金漢 2 +白馬 2 +百度 2 +皇子 2 +皇宮 2 +皮 2 +皮埃蒙特 2 +盆子 2 +益世 2 +盟校 2 +監察 2 +監製 2 +監視 2 +直人 2 +直布羅陀 2 +直轄 2 +直通 2 +相戀 2 +相等 2 +相識 2 +相連 2 +省立 2 +看似 2 +看法 2 +真宗 2 +真情 2 +真的 2 +眼鏡 2 +眾人 2 +睡衣 2 +矚目 2 +矛盾 2 +知節 2 +短面熊 2 +矮人 2 +石化 2 +石原 2 +石家 2 +砍柴 2 +研製 2 +研討 2 +砲 2 +硅 2 +硫磺 2 +硬 2 +硬體 2 +碎石 2 +碘 2 +碧翠絲 2 +碩士 2 +確實 2 +磅 2 +磨損 2 +礦 2 +礦業 2 +示 2 +示威 2 +社交 2 +祂 2 +祕教 2 +神代 2 +票 2 +祺瑞 2 +福來 2 +福利 2 +福部 2 +福音 2 +禮節 2 +禽龍 2 +秀全 2 +秀吉 2 +秋天 2 +科幻 2 +科爾多瓦 2 +科羅拉多 2 +科赫 2 +科雷馬 2 +秘魯 2 +租客 2 +移除 2 +稀有 2 +稅 2 +程式 2 +種姓 2 +稱呼 2 +稱臣 2 +稱讚 2 +稻盛 2 +穆罕默德 2 +積分 2 +空缺 2 +穿耳 2 +突出 2 +突厥 2 +突擊 2 +窟 2 +立下 2 +立憲 2 +立熙 2 +站台 2 +竟然 2 +竣工 2 +童星 2 +競技 2 +競馬 2 +笑話 2 +笨 2 +第九 2 +第十一 2 +筆下 2 +等到 2 +等待 2 +策 2 +策劃 2 +管弦 2 +管治 2 +節奏 2 +簡 2 +簡易 2 +簽署 2 +籃壇 2 +籃子 2 +籌建 2 +籤 2 +米利特 2 +米格 2 +米爾扎 2 +精度 2 +精武 2 +精液 2 +精緻 2 +精美 2 +精采 2 +糖 2 +糖份 2 +紀 2 +約克 2 +約定俗成 2 +約會 2 +約瑟夫 2 +紅木 2 +紅麴 2 +紋 2 +紐卡斯爾 2 +紓緩 2 +純淨 2 +純粹 2 +紙 2 +紙幣 2 +紛紛 2 +素質 2 +索引 2 +細緻 2 +終 2 +組長 2 +結 2 +結晶 2 +結識 2 +絕望 2 +統稱 2 +絲綢 2 +經紀 2 +經費 2 +綠 2 +維 2 +維修 2 +維吉尼亞 2 +維鈞 2 +網上 2 +網友 2 +網民 2 +緊鄰 2 +線粒 2 +線西 2 +編入 2 +編寫 2 +編製 2 +緩存 2 +緩慢 2 +緬因 2 +縣城 2 +縣治 2 +縣長 2 +縱橫 2 +縱貫 2 +總值 2 +總會 2 +總監 2 +總管 2 +總額 2 +繁忙 2 +繁榮 2 +繁殖 2 +繚 2 +繞城 2 +繪圖 2 +續篇 2 +續約 2 +罪 2 +罪案 2 +罪行 2 +署名 2 +署長 2 +罷黜 2 +罹患 2 +羅丹 2 +羅塞塔 2 +羅斯 2 +羅斯基勒 2 +羅斯提 2 +羅漢 2 +羅素 2 +羅貝爾 2 +羊 2 +羊曲 2 +羊毛 2 +羌 2 +美女 2 +義 2 +義和 2 +習 2 +習性 2 +翦 2 +翻新 2 +翻越 2 +翼 2 +老年 2 +老式 2 +老舍 2 +考場 2 +考證 2 +耕作 2 +耕種 2 +耳道 2 +耶和華 2 +耶律 2 +耶魯 2 +聖三 2 +聖地 2 +聘任 2 +聚 2 +聚合 2 +聚居 2 +聯 2 +聯名 2 +聰明 2 +聲名 2 +聲望 2 +聲道 2 +聽 2 +肉糕 2 +肉食 2 +肖 2 +肖像 2 +肖金 2 +肝臟 2 +股 2 +股權 2 +肢 2 +肯尼迪 2 +育才 2 +育種 2 +肺炎 2 +胎兒 2 +胖子 2 +能源 2 +能級 2 +腎 2 +腓特烈 2 +腳 2 +腳趾 2 +腹面 2 +腺葉木犀欖 2 +膝蓋 2 +膠質 2 +臘汁 2 +臣民 2 +臨床 2 +臨淄 2 +臨近 2 +臨邑 2 +自主 2 +自助 2 +自家 2 +自殺 2 +自衛 2 +自轉 2 +臭氧 2 +至於 2 +致 2 +致死 2 +臺中 2 +臺北 2 +興化 2 +舉 2 +舉人 2 +舉動 2 +舊址 2 +舒服 2 +舒適 2 +舞台 2 +船尾 2 +船廠 2 +船艦 2 +船長 2 +艇 2 +艦艇 2 +艱苦 2 +色度 2 +色素 2 +花卉 2 +花崗 2 +花樣 2 +花費 2 +苗 2 +若干 2 +若是 2 +苦惱 2 +英俊 2 +英超 2 +英雄 2 +茨威格 2 +荷花 2 +荷蘭豬 2 +莆田 2 +莉拉 2 +莎拉 2 +莎莉 2 +莫名 2 +莫泊桑 2 +莫爾庫斯 2 +莫雷爾 2 +莫高 2 +菁英 2 +菌 2 +菩薩 2 +華夏 2 +華隆 2 +華麗 2 +菲 2 +萊特 2 +萬宜 2 +萬春 2 +萬萬 2 +落入 2 +落差 2 +葉子 2 +葉海亞 2 +葉片 2 +著想 2 +著迷 2 +葛馮 2 +葡萄牙 2 +葵盛 2 +蒂羅爾 2 +蒐集 2 +蒙 2 +蒙山 2 +蒙蔽 2 +蒸餾 2 +蓄電 2 +蓮屬 2 +蔬菜 2 +蔭權 2 +薩 2 +薩達姆 2 +薪資 2 +藉 2 +藉口 2 +藉著 2 +藍調 2 +藍鯨 2 +藏在 2 +藝員 2 +蘇丹 2 +蘇爾曼 2 +蘇維埃 2 +蘇黎世 2 +蘭 2 +虎豹 2 +虐待 2 +虔誠 2 +處境 2 +蛇 2 +蛇夫 2 +蛇類 2 +螺旋 2 +蠟燭 2 +蠻族 2 +血清 2 +血緣 2 +行李 2 +行程 2 +行車 2 +行駛 2 +術士 2 +街區 2 +衛冕 2 +衛戍 2 +表皮 2 +袋中 2 +裁定 2 +補充 2 +補助 2 +裝病 2 +製冷 2 +製片 2 +西元 2 +西區 2 +西沙 2 +西甲 2 +西站 2 +西鄰 2 +西鐵 2 +西門子 2 +西雅圖 2 +要素 2 +要職 2 +見義勇為 2 +見證 2 +規範 2 +視覺 2 +親密 2 +親屬 2 +親情 2 +親戚 2 +親緣 2 +親近 2 +觀世音 2 +觀塘 2 +觀賞 2 +角宿 2 +角逐 2 +解 2 +解鎖 2 +解體 2 +言論 2 +訂婚 2 +訂購 2 +計 2 +討伐 2 +記號 2 +許可 2 +訴說 2 +註冊 2 +評議 2 +評選 2 +詞彙 2 +詩篇 2 +詮釋 2 +話語 2 +該廟 2 +該車 2 +該館 2 +誕辰 2 +誘發 2 +語堂 2 +誤導 2 +說唱 2 +課 2 +課題 2 +調動 2 +調料 2 +調景 2 +論壇 2 +諸侯 2 +諸葛 2 +諾貝爾 2 +謎 2 +謙虛 2 +講 2 +謠言 2 +證件 2 +證券 2 +證據 2 +譜 2 +譜寫 2 +警報 2 +警官 2 +警方 2 +警長 2 +譯 2 +譯名 2 +譯法 2 +護士 2 +護法 2 +變得 2 +變換 2 +變更 2 +變異 2 +谷 2 +象棋 2 +豪華 2 +豫 2 +貓頭鷹 2 +貝爾尼納 2 +財務 2 +財困 2 +財團 2 +財物 2 +貨櫃 2 +販子 2 +貪污 2 +貴人 2 +買下 2 +買來 2 +賀氏 2 +資方 2 +資產 2 +賠償 2 +賢妃 2 +質子 2 +質疑 2 +質素 2 +購入 2 +購物 2 +賽馬 2 +赤川 2 +走出 2 +走路 2 +起飛 2 +趁機 2 +超越 2 +越低 2 +越獄 2 +越遠 2 +越高 2 +趕出 2 +趨 2 +趨同 2 +路上 2 +路口 2 +身 2 +身長 2 +躲過 2 +車中 2 +車廂 2 +車資 2 +車隊 2 +軍區 2 +軍校 2 +軍法 2 +載重 2 +輔音 2 +輕 2 +輕傷 2 +輕型 2 +輕視 2 +輟學 2 +轄境 2 +轄有 2 +轉介 2 +轉車 2 +轎車 2 +轟動一時 2 +辛亥 2 +辣妹 2 +辦 2 +辭退 2 +辯護 2 +農地 2 +農場 2 +農曆 2 +農田 2 +農藥 2 +近藤 2 +近衛 2 +迫害 2 +迴避 2 +迷 2 +迷信 2 +迷幻 2 +追溯 2 +退化 2 +送 2 +送入 2 +逃 2 +逃出 2 +逃避 2 +透明 2 +逐鹿 2 +通報 2 +通婚 2 +通知 2 +通稱 2 +通航 2 +速寫 2 +速率 2 +造出 2 +造船 2 +連同 2 +連帶 2 +連鎖 2 +週年 2 +進修 2 +進出口 2 +進化 2 +進駐 2 +遇到 2 +遊仙 2 +遊客 2 +遊玩 2 +運河 2 +運用 2 +運轉 2 +過世 2 +過勞 2 +過年 2 +過於 2 +過海 2 +過關 2 +道場 2 +道生 2 +達爾 2 +違反 2 +遞歸 2 +遠東 2 +遭受 2 +遴選 2 +遵守 2 +遷 2 +遷往 2 +選中 2 +選拔 2 +選民 2 +選為 2 +遺囑 2 +遺產 2 +遺跡 2 +遼東 2 +還珠 2 +那些 2 +那樣 2 +邦初 2 +郊外 2 +部下 2 +部件 2 +部族 2 +郵件 2 +都柏林 2 +都統 2 +鄭國 2 +鄭氏 2 +鄰國 2 +配合 2 +配對 2 +酒吧 2 +酒泉 2 +酒醉 2 +醜聞 2 +醫藥 2 +釉下 2 +里昂 2 +里程 2 +重修 2 +重型 2 +重華 2 +重言 2 +重返 2 +重重 2 +重量 2 +野獸 2 +量表 2 +金山 2 +金星 2 +金牌 2 +金蓮 2 +金雞 2 +金馬 2 +鈞 2 +銀 2 +鋅 2 +鋼鐵 2 +錢 2 +錫金 2 +鍋 2 +鍵盤 2 +鐘錶 2 +鐵伊 2 +鐵達尼 2 +鑄造 2 +長久 2 +長城 2 +長女 2 +長春 2 +長老 2 +長者 2 +長興 2 +長蘆 2 +長軸 2 +長音 2 +門前 2 +門口 2 +門戶 2 +門齒 2 +開創 2 +開口 2 +開心 2 +開拍 2 +開採 2 +開會 2 +開火 2 +開羅 2 +開賽 2 +開通 2 +開門 2 +開除 2 +閏年 2 +間接 2 +間隙 2 +閘門 2 +閱讀 2 +闊 2 +關注 2 +關聯 2 +關說 2 +關鍵 2 +關門 2 +關閉 2 +防範 2 +防衛 2 +阻擋 2 +阻礙 2 +阿 2 +阿保機 2 +阿姆斯特丹 2 +阿格 2 +阿美 2 +附帶 2 +降級 2 +降解 2 +除籍 2 +陰霾 2 +陵墓 2 +陵寢 2 +陶瓷 2 +陷阱 2 +陽澄 2 +隆頭魚 2 +隊友 2 +隊長 2 +隋 2 +隋代 2 +隔 2 +隕石 2 +隨之 2 +集成 2 +集資 2 +集雨 2 +集體 2 +雍正 2 +雕像 2 +離任 2 +離婚 2 +離心 2 +雪貂 2 +雲想 2 +零星 2 +電動 2 +電壓 2 +電流 2 +電纜 2 +電能 2 +電路 2 +電鐵 2 +震動 2 +震盪 2 +震驚 2 +霍 2 +霍普 2 +霸主 2 +霸王 2 +靈素 2 +青聯 2 +青藏 2 +青銅 2 +靜電 2 +面臨 2 +面試 2 +面部 2 +音系 2 +音變 2 +韻律 2 +頂 2 +順序 2 +預備 2 +預定 2 +預言 2 +預計 2 +頒布 2 +頒發 2 +頓 2 +領地 2 +頭等 2 +頭部 2 +願望 2 +顧 2 +顯得 2 +顯聖 2 +顯著 2 +風俗 2 +風景 2 +風氣 2 +風濕 2 +風雲 2 +風靡 2 +食夢 2 +食材 2 +飢荒 2 +飯 2 +飲品 2 +飲用 2 +餘額 2 +館藏 2 +饑荒 2 +饒舌 2 +首位 2 +首播 2 +首爾 2 +首腦 2 +首部 2 +香蕉 2 +馬丁 2 +馬克思 2 +馬克斯 2 +馬其頓 2 +馬拉松 2 +馬歇爾 2 +馬耳他 2 +馬里奧 2 +駐紮 2 +駐足 2 +駕駛 2 +骨 2 +骨頭 2 +骨髓 2 +體制 2 +體力 2 +體型 2 +體校 2 +體重 2 +體驗 2 +高低 2 +高壓 2 +高平 2 +高校 2 +高止 2 +高能 2 +高興 2 +高郵 2 +鬆散 2 +鬼 2 +魅力 2 +魏 2 +魚雷 2 +魚頭 2 +魯殊 2 +鮮明 2 +鯉形 2 +鯉科 2 +鰂魚 2 +鱸形 2 +鳥取 2 +鳥綱 2 +鳳凰 2 +鳳翔 2 +鹼 2 +鹿 2 +鹿兒 2 +麗珠 2 +麗茲 2 +麥克塞 2 +麥爾斯 2 +麻河 2 +麻省 2 +黃帝 2 +黃色 2 +黑幫 2 +黑貓 2 +黑龍 2 +黔 2 +點擊 2 +點數 2 +點球 2 +黨派 2 +鼎盛 2 +鼠疫 2 +齊 2 +齊克果 2 +齒擦 2 +齒軌 2 +齧齒 2 +龐家堡 2 +$9,999 1 +$99,999 1 ++ 1 +-99 1 +-999 1 +9--9 1 +9-9.9 1 +9.99% 1 +9.999% 1 +9.9999萬 1 +9.99萬 1 +9/9 1 +99-99 1 +999-999lr 1 +999.999 1 +9999-9999 1 +999cm 1 +999m 1 +999x 1 +999萬9千餘 1 +999餘 1 +999餘萬 1 +99b 1 +99萬9千 1 +9c 1 +9f 1 +9nd 1 +9億9999萬 1 +9億9千萬 1 +9成 1 +9百多萬 1 +9萬億 1 +9萬多 1 +`` 1 +a9 1 +a999-999 1 +aankhen 1 +abante 1 +abdurrahman 1 +ac 1 +académie 1 +activision 1 +adilabad 1 +adisumarmo 1 +admiral 1 +advance 1 +aeg 1 +aek 1 +aero 1 +aeromobile 1 +aethra 1 +afd 1 +ages 1 +airlines 1 +airport 1 +aleksej 1 +alliance 1 +alpha 1 +alyssum 1 +amorc 1 +android 1 +anne 1 +antarctic 1 +architecture 1 +argonauts 1 +arwadi 1 +arzacq-arraziguet 1 +asteroid 1 +auld 1 +auteuil 1 +avenue 1 +aviation 1 +aviv 1 +b9 1 +bad 1 +baldwin 1 +ballklub 1 +bank 1 +bar 1 +baronet 1 +barros 1 +barsbold 1 +beatles 1 +beaune 1 +beaune-sud 1 +beckham 1 +beinasco 1 +belgaum 1 +bellagio 1 +berg 1 +berne-belp 1 +besar 1 +bhcs 1 +blake 1 +books 1 +boot 1 +bransoni 1 +brett 1 +brian 1 +briann 1 +bronfenbrenner 1 +brough 1 +bruce 1 +bt 1 +bud 1 +caen 1 +calling 1 +campaign 1 +campostoma 1 +can 1 +canal 1 +cannon 1 +capital 1 +caret 1 +caroline 1 +castle 1 +cathedral 1 +cbe 1 +cec 1 +cerro 1 +cet 1 +ceyhan 1 +chapman 1 +chase 1 +chau 1 +chell 1 +christopher 1 +chrome 1 +churchill 1 +ci-9999 1 +cit999b 1 +city 1 +claritin 1 +clark 1 +cms 1 +cnzz 1 +cohen 1 +colchis 1 +color 1 +comic 1 +company 1 +connecticut 1 +conroy 1 +copper 1 +cornell 1 +cost 1 +costa 1 +council 1 +cp 1 +cpu 1 +crh999b 1 +crh999b-999 1 +crh999c 1 +crypton 1 +cushing 1 +cálida 1 +daisuke 1 +dakota 1 +damrosch 1 +daria 1 +dark 1 +dart 1 +dawn 1 +ddc 1 +dennis 1 +derby 1 +desanctis 1 +devasthanam 1 +dfh9 1 +dialogue 1 +digi 1 +digibook 1 +direct 1 +director 1 +divisione 1 +dmfc 1 +dog 1 +doodle 1 +dorian 1 +dossing 1 +double 1 +dragon 1 +ds 1 +dsm 1 +durst 1 +earth 1 +eden 1 +el 1 +electronic 1 +elisabeth 1 +ellie 1 +elliot 1 +eminescu 1 +end 1 +entertainment 1 +entity 1 +epa 1 +epithema 1 +epstein 1 +estate 1 +et 1 +exe 1 +expedition 1 +f(x) 1 +falls 1 +family 1 +fernando 1 +films 1 +firefox 1 +firozpur 1 +fleet 1 +fly 1 +fook 1 +forever 1 +fortran 1 +fox 1 +frank 1 +franpipe 1 +fred 1 +frito-lay 1 +fsb 1 +fudosi 1 +fund 1 +g 1 +g(x) 1 +g99a 1 +galliano 1 +gb 1 +gbest 1 +gear 1 +geophysical 1 +german 1 +ghost 1 +gibbs 1 +giuliano 1 +golden 1 +good 1 +goodnow 1 +government 1 +grant 1 +greater 1 +greenbelt 1 +greenville 1 +groening 1 +ground 1 +group 1 +gto 1 +guariglia 1 +halifax 1 +hangar 1 +harry 1 +harvey 1 +hau 1 +haven 1 +hear 1 +heh 1 +herrera 1 +herschel 1 +higher 1 +hillman 1 +hiv 1 +holy 1 +hondt 1 +hopkins 1 +housing 1 +hp 1 +humphrey 1 +hunt 1 +i 1 +ib 1 +igbt 1 +igy 1 +illumination 1 +in 1 +india 1 +ingeri 1 +innocence 1 +international 1 +ipark 1 +iphone 1 +iron 1 +isartor 1 +ischl 1 +it's 1 +itunes 1 +iupac 1 +iv 1 +jay 1 +jazz 1 +jeff 1 +johnson 1 +jpl 1 +justice 1 +justin 1 +juvisy 1 +kansas 1 +karaköy 1 +karlstor 1 +kate 1 +kekal 1 +kenway 1 +kilpatrick 1 +kingfisher 1 +kink.com 1 +kinross 1 +kkr 1 +km 1 +knudstrup 1 +koffka 1 +kurnool 1 +kurt 1 +langdon 1 +langford 1 +language 1 +last 1 +laurifolia 1 +lcd 1 +ld99 1 +leaf 1 +lees 1 +lennart 1 +lethal 1 +liability 1 +liaoxipterus 1 +lilim 1 +linux 1 +liu 1 +lomidine 1 +loratadin 1 +lotz 1 +low 1 +lowell 1 +maddie 1 +magic 1 +magma 1 +mallride 1 +mamaia 1 +man 1 +managing 1 +manea 1 +maolan 1 +maria 1 +mario 1 +market 1 +marshlands 1 +martin 1 +mayflower 1 +md-99 1 +mechernich 1 +medical 1 +menachem 1 +merina 1 +methala 1 +metress 1 +meyers 1 +michaelerkirche 1 +micro 1 +micro-usm 1 +middle 1 +mihai 1 +mintz 1 +mitchell 1 +mm 1 +modern 1 +mogens 1 +money 1 +monsters 1 +montana 1 +morus 1 +multitier 1 +mundell 1 +museum 1 +my 1 +myers 1 +n99 1 +n=9 1 +name 1 +nanocells 1 +natasha 1 +nazionale 1 +ncaa 1 +neluset 1 +neverwhere 1 +nhk 1 +niarchos 1 +nibiru 1 +nickel 1 +nirmal 1 +nist 1 +no 1 +norman 1 +north 1 +novogrudok 1 +o. 1 +odd 1 +omega 1 +omniworld 1 +one 1 +online 1 +opus 1 +ori 1 +orjan 1 +orkney 1 +ornatum 1 +ospatulus 1 +otto 1 +ova 1 +p9o9 1 +paleorhinus 1 +pangjiabu 1 +papa 1 +park 1 +pasmo 1 +pau 1 +paul 1 +pbest 1 +peronismo 1 +perouse 1 +persson 1 +perth 1 +pfa 1 +phil 1 +philippa 1 +piano 1 +pinerolo 1 +pisapia 1 +pittsburghia 1 +place 1 +planes 1 +planetshanghai 1 +playgirl 1 +police 1 +pre-rendering 1 +presbyterian 1 +primary 1 +psychology 1 +pukaki 1 +pulau 1 +purma 1 +quartet 1 +quentin 1 +quest 1 +r9 1 +railway 1 +rbk 1 +rbs 1 +record 1 +recordon 1 +reserve 1 +return 1 +review 1 +rhcl9 1 +rhythm 1 +riaa 1 +rinchen 1 +river 1 +roble 1 +rocha 1 +rock 1 +rolf 1 +rosenborg 1 +rossabi 1 +ruger 1 +russell 1 +s9 1 +safari 1 +salomon 1 +sam 1 +sandwithii 1 +sara 1 +sarianidi 1 +savannah 1 +sbe 1 +school 1 +schuchat 1 +scream 1 +scree 1 +sea 1 +sec 1 +secobarbital 1 +seemann 1 +sendlinger 1 +sensme 1 +shame 1 +sharon 1 +sheegog 1 +sheinkin 1 +shelters 1 +simon 1 +snipes 1 +social 1 +sofi 1 +soobedars 1 +soviet 1 +space 1 +spector 1 +spirit 1 +spittel 1 +sportsnet 1 +srisailamgudem 1 +ss 1 +st 1 +standard 1 +stanton 1 +star 1 +statpipe 1 +stavros 1 +steinbeck 1 +stephen 1 +steven 1 +stif 1 +stonewall 1 +street 1 +streymoy 1 +study 1 +stutsman 1 +suica 1 +sunset 1 +supply 1 +suzuki 1 +syahrin 1 +sōya 1 +t 1 +t.999.com 1 +t.qq.com 1 +t.sina.com.cn 1 +t.sohu.com 1 +t.xxxx.com 1 +tau 1 +technology 1 +tel 1 +texas 1 +tf 1 +tf99 1 +theodor 1 +thomas 1 +thrissur 1 +timati 1 +time 1 +tnm 1 +tor 1 +touch 1 +trail 1 +train 1 +tru 1 +truncatulus 1 +tsang 1 +tvs-9 1 +tweddle 1 +twisty 1 +tyler 1 +uhler-phillips 1 +umls 1 +un 1 +union 1 +university 1 +usphs 1 +utricularia 1 +valla 1 +varginha 1 +victoria 1 +view 1 +viktor 1 +villa 1 +volantis 1 +vvvf 1 +w=9 1 +walker 1 +walter 1 +wesley 1 +west 1 +westmeath 1 +wheeler 1 +white 1 +who 1 +wii 1 +william 1 +wing 1 +wireless 1 +woman 1 +wood 1 +woodside 1 +world 1 +wta 1 +year 1 +youtube 1 +zeepipe 1 +zone 1 +° 1 +ð 1 +þ 1 +̄ 1 +θ 1 +〔 1 +〕 1 +一, 1 +一中全會 1 +一九五八 1 +一併 1 +一億 1 +一八 1 +一分為二 1 +一到 1 +一勞永逸 1 +一反其道 1 +一字一句 1 +一式一樣 1 +一成 1 +一戰 1 +一改 1 +一時 1 +一概 1 +一模一樣 1 +一氧化碳 1 +一炮 1 +一爭 1 +一發 1 +一百 1 +一百幾十 1 +一百萬 1 +一百餘 1 +一益 1 +一而再、再而三 1 +一舉 1 +一落千丈 1 +一見鍾情 1 +一路 1 +一身 1 +一邊 1 +一點 1 +丁字 1 +丁目 1 +七七 1 +七十 1 +七里 1 +三、 1 +三一 1 +三中 1 +三中全會 1 +三井 1 +三井住友 1 +三亞 1 +三元 1 +三十四 1 +三原 1 +三崎 1 +三星 1 +三氯化銠 1 +三氯氧釩 1 +三浦 1 +三王 1 +三百 1 +三百六七十 1 +三百多 1 +三索頜腔蛇 1 +三船 1 +三菱 1 +三萬 1 +三藩市 1 +三軍 1 +三門 1 +上下 1 +上下行 1 +上傳 1 +上去 1 +上古 1 +上司 1 +上埔 1 +上報 1 +上塘 1 +上奏 1 +上學 1 +上尉 1 +上手 1 +上新世 1 +上朝 1 +上林 1 +上沖 1 +上班 1 +上端 1 +上網 1 +上線 1 +上色 1 +上蓋 1 +上訪 1 +上調 1 +上路 1 +上身 1 +上車 1 +上選 1 +上部 1 +上限 1 +上集 1 +上雲 1 +上顎 1 +下剋上高潮 1 +下圖 1 +下徹 1 +下樓 1 +下河 1 +下潛 1 +下獄 1 +下稱 1 +下蝕 1 +下設 1 +下課 1 +下跌 1 +下車 1 +下遊 1 +下部 1 +下關 1 +下院 1 +下集 1 +下雷 1 +下面 1 +下顎 1 +下風 1 +不丹 1 +不乏 1 +不以為然 1 +不克 1 +不入 1 +不凡 1 +不出 1 +不出所料 1 +不利 1 +不到 1 +不力 1 +不動 1 +不去 1 +不吃 1 +不合 1 +不和 1 +不問 1 +不均 1 +不多 1 +不大 1 +不定 1 +不實 1 +不惜 1 +不愛 1 +不懷好意 1 +不折不扣 1 +不捨 1 +不收 1 +不敬 1 +不料 1 +不易 1 +不景 1 +不服 1 +不朽 1 +不歸 1 +不準 1 +不理 1 +不畏 1 +不符 1 +不純 1 +不絕 1 +不行 1 +不衰 1 +不要 1 +不見天日 1 +不解 1 +不計其數 1 +不該 1 +不詳 1 +不豐 1 +不賣 1 +不輸 1 +不辭辛勞 1 +不道 1 +不適 1 +不銹 1 +不限 1 +不露 1 +不顧 1 +且是 1 +世上 1 +世人 1 +世代相傳 1 +世充 1 +世則 1 +世子 1 +世家 1 +世昌 1 +世民 1 +世田谷 1 +世祿 1 +世綱 1 +世貿 1 +世道 1 +世銘 1 +丘 1 +丙組 1 +丞相 1 +並無 1 +並稱 1 +並系 1 +中信 1 +中南 1 +中南海 1 +中原 1 +中堅 1 +中場 1 +中底層 1 +中彈 1 +中性 1 +中投 1 +中斷 1 +中旬 1 +中校 1 +中樞 1 +中檔 1 +中殿 1 +中毒 1 +中波希米亞 1 +中田 1 +中級 1 +中綴 1 +中線 1 +中耳 1 +中聯 1 +中興 1 +中葉 1 +中藥 1 +中西方 1 +中西醫 1 +中觀 1 +中超 1 +中農 1 +中鐵 1 +串聯 1 +丸都 1 +丹 1 +丹噶爾 1 +丹尼士達智 1 +丹路殊 1 +主修 1 +主創 1 +主導 1 +主帶 1 +主幹 1 +主意 1 +主控 1 +主治 1 +主炮 1 +主犯 1 +主筆 1 +主船 1 +主食 1 +乃威 1 +久經 1 +久藏 1 +之所以 1 +之申 1 +之銓 1 +之鋒 1 +乘勢 1 +乘搭 1 +乘撘 1 +乘裝 1 +乙 1 +乙二胺 1 +乙未 1 +乙組 1 +乙苯 1 +九一一 1 +九十 1 +九江 1 +九鐵 1 +乳房 1 +乾季 1 +乾德 1 +乾淨 1 +乾西 1 +亂 1 +亂倫 1 +亂刀 1 +事先 1 +事態 1 +事發 1 +事與願違 1 +事跡 1 +事蹟 1 +二中全會 1 +二二八 1 +二十一 1 +二十二 1 +二十五 1 +二十八 1 +二十多 1 +二十萬 1 +二宮 1 +二戶 1 +二百 1 +二百五十餘 1 +二百餘 1 +二胺 1 +二郎 1 +于敏 1 +互作 1 +互利 1 +互助 1 +互惠 1 +互通 1 +互選 1 +五一 1 +五中全會 1 +五分之一 1 +五十 1 +五十一 1 +五十六 1 +五常 1 +五弟 1 +五彩繽紛 1 +五成半 1 +五指 1 +五氧化二氮 1 +五百萬 1 +五萬三千 1 +井字 1 +井村 1 +井田 1 +些微 1 +亞丁 1 +亞他那修 1 +亞伯塔 1 +亞伯拉罕 1 +亞冠龍 1 +亞利桑納 1 +亞基 1 +亞奧 1 +亞彬 1 +亞德里亞堡 1 +亞文 1 +亞普芮 1 +亞東 1 +亞歷山大丹尼士 1 +亞流 1 +亞烏扎 1 +亞特蘭大 1 +亞瑟 1 +亞當斯 1 +亞西爾 1 +亞運 1 +亞邦 1 +亞麻 1 +亡故 1 +交付 1 +交代 1 +交出 1 +交口 1 +交回 1 +交州 1 +交替 1 +交棒 1 +交涉 1 +交界 1 +交行 1 +交角 1 +交談 1 +交道 1 +交錯 1 +亦即 1 +亨 1 +亨得利 1 +享 1 +京劇 1 +京王 1 +京釜 1 +亭湖 1 +亮相 1 +人世 1 +人仕 1 +人字 1 +人客 1 +人手 1 +人日 1 +人權 1 +人殉 1 +人氣 1 +人祭 1 +人種 1 +人稱 1 +人行 1 +人道 1 +人選 1 +人麻呂 1 +仁傑 1 +仁和 1 +仁壽 1 +仁守 1 +仁宗 1 +仁煥 1 +仁牙因 1 +仁玕 1 +仁穆 1 +仁粹 1 +仁青 1 +仇人 1 +今川 1 +介壽 1 +介質 1 +仍是 1 +仍有 1 +仍算 1 +他倆 1 +他家 1 +仙人打坐 1 +仙女木 1 +仙鶴 1 +代亞布羅 1 +代價 1 +代名詞 1 +代幣 1 +代數 1 +代牧 1 +代碼 1 +令狐 1 +令華 1 +以爲 1 +仰光 1 +仰望 1 +仲 1 +仲雄 1 +任免 1 +任選 1 +伊 1 +伊克巴爾 1 +伊利 1 +伊利沙伯 1 +伊塔蒂亞亞 1 +伊娃 1 +伊尹 1 +伊摩琴 1 +伊朗 1 +伊犁 1 +伊甸 1 +伊薩爾 1 +伊里亞德 1 +伊阿宋 1 +伊頓 1 +伍德 1 +伍德羅 1 +伎倆 1 +伏塔 1 +伏契克 1 +伏爾加 1 +伏瓦蒂爾 1 +伐 1 +休假 1 +休克 1 +休士頓 1 +休憩 1 +休斯 1 +休閑 1 +休養 1 +伙食 1 +伯克爾 1 +伯多祿 1 +伯恩 1 +伯恩哈德 1 +伯明翰 1 +伯格 1 +伯溫 1 +伯爾尼 1 +伯納姆 1 +伯納雷 1 +伯茲貝格 1 +伯莎 1 +伯虎 1 +伯謙 1 +伯達 1 +伴侶 1 +伴奏 1 +伴有 1 +伴生 1 +伶 1 +伸一 1 +伸冤 1 +伸延 1 +伸港 1 +伽馬 1 +佈局 1 +佈置 1 +佈道 1 +位在 1 +位居 1 +位階 1 +位面 1 +低下 1 +低估 1 +低價 1 +低層 1 +低平 1 +低座 1 +低檔 1 +低潮 1 +低等 1 +低調 1 +低額 1 +住所 1 +住進 1 +佐佐木 1 +佐勞爾 1 +佐和子 1 +佐民 1 +佔用 1 +何利菲德 1 +何力特 1 +何方 1 +佛事 1 +佛典 1 +佛瑞爾斯 1 +佛經 1 +佛羅倫斯 1 +佛羅里達 1 +佛萊明 1 +佛蒙特 1 +佛頭 1 +作對 1 +作怪 1 +作曲 1 +作次郎 1 +作法 1 +作為 1 +作畫 1 +作雲 1 +作風 1 +佩佐拉諾 1 +佩儂 1 +佩戴 1 +佩琪 1 +佩蘭多 1 +佬 1 +佳作 1 +佳佳 1 +佳節 1 +併發 1 +使喚 1 +使團 1 +使節 1 +侄子 1 +來看 1 +來臨 1 +來襲 1 +來館 1 +侈談 1 +侍奉 1 +侍女 1 +侍從 1 +侏羅 1 +供水 1 +供電 1 +供養 1 +依次 1 +依照 1 +依瑪 1 +依託 1 +依託泊苷 1 +依附 1 +侮辱 1 +侯 1 +侵佔 1 +侵害 1 +便利 1 +便捷 1 +便是 1 +便服 1 +便當 1 +便秘 1 +俊業 1 +俗 1 +俘獲 1 +保 1 +保住 1 +保全 1 +保加爾 1 +保大 1 +保定 1 +保密 1 +保明 1 +保溫 1 +保羅費雷拉 1 +保送 1 +保養 1 +俠 1 +信中 1 +信念 1 +信教 1 +信玄 1 +信神 1 +信竹 1 +信裡 1 +修好 1 +修學 1 +修憲 1 +修煉 1 +修羅 1 +修葺 1 +修鞋 1 +修養 1 +俯瞰 1 +俸祿 1 +俾路支 1 +倉促 1 +倉庫 1 +個位 1 +個個 1 +個展 1 +倒下 1 +倒入 1 +倖免 1 +候旨 1 +候補 1 +倚天 1 +倚靠 1 +借 1 +倩文 1 +倫巴底 1 +倫拜 1 +倫納特 1 +倬標 1 +倭國 1 +倭寇 1 +假使 1 +假借 1 +假名 1 +假帳 1 +假設 1 +假說 1 +假象 1 +假釋 1 +假面 1 +偉 1 +偉強 1 +偏低 1 +偏僻 1 +偏向 1 +偏小 1 +偏東 1 +偏重 1 +偏離 1 +做到 1 +停刊 1 +停業 1 +停機 1 +停泊 1 +停職 1 +停辦 1 +停靠 1 +停飛 1 +健壯 1 +健將 1 +健身 1 +側目 1 +側邊 1 +偵察 1 +偵測 1 +偵緝 1 +偶像 1 +偶發 1 +偷取 1 +偷羊 1 +偷襲 1 +偷走 1 +偽 1 +偽季米特里 1 +偽裝 1 +傀儡 1 +傅萊 1 +傍 1 +傍晚 1 +傑克托爾 1 +傑志 1 +傑斐遜 1 +備忘 1 +備戰 1 +備案 1 +備用 1 +備註 1 +傢具 1 +催芽 1 +傭人 1 +傳來 1 +傳給 1 +傳記 1 +傳遍 1 +債券 1 +傷及 1 +傷心 1 +傷患 1 +傷悲 1 +傷病 1 +傷透 1 +傻 1 +傾心 1 +傾談 1 +僅屬 1 +僅用 1 +像差 1 +僑 1 +僕人 1 +僖 1 +僧人 1 +僧孺 1 +僧尼 1 +僧格 1 +僧祐 1 +僱主 1 +僱傭 1 +僵局 1 +價位 1 +價錢 1 +儀器 1 +億 1 +儒士 1 +儘快 1 +儘量 1 +償付 1 +優 1 +優值 1 +優良 1 +優裕 1 +優質 1 +儲量 1 +儷 1 +允 1 +允良 1 +元子 1 +元朝 1 +元氣 1 +元澄 1 +元老 1 +元起 1 +兄 1 +兄長 1 +充任 1 +充分 1 +充氣 1 +充滿 1 +充軍 1 +兆基 1 +兆楠 1 +兆陽 1 +兇多吉少 1 +兇悍 1 +兇猛 1 +先前 1 +先帝 1 +先師 1 +先賢 1 +先鋒 1 +先驗 1 +光啟 1 +光學 1 +光宇 1 +光州 1 +光度 1 +光復 1 +光景 1 +光束 1 +光泰 1 +光滑 1 +光照 1 +光環 1 +光范 1 +光華 1 +光顧 1 +克利普頓 1 +克力佛 1 +克勤 1 +克家 1 +克拉瑪 1 +克拉西奇 1 +克敏能 1 +克欽 1 +克洛頓 1 +克特勒 1 +克羅維茲 1 +克羅迪歐 1 +克蘇魯 1 +克裡斯 1 +克農 1 +克里姆希爾特 1 +克里斯多夫 1 +克里斯多弗 1 +克里斯托弗 1 +克里波門 1 +克魯 1 +兌換 1 +免 1 +免疫 1 +免遭 1 +兔毛 1 +兢兢業業 1 +入世 1 +入地 1 +入塞 1 +入境 1 +入手 1 +入聲 1 +入股 1 +入閘 1 +入院 1 +入駐 1 +內化 1 +內卡薩 1 +內在 1 +內埔 1 +內壁 1 +內政 1 +內置 1 +內胎 1 +內臟 1 +內載 1 +內遷 1 +全劇 1 +全名 1 +全境 1 +全壘 1 +全套 1 +全島 1 +全州 1 +全得 1 +全德 1 +全效 1 +全敗 1 +全數 1 +全書 1 +全盛 1 +全盤 1 +全省 1 +全福 1 +全程 1 +全稱 1 +全線 1 +全興 1 +全邨 1 +全鎮 1 +全隊 1 +全額 1 +全黑 1 +兩億 1 +兩千五百萬 1 +兩千萬 1 +八世 1 +八十九 1 +八卦 1 +八思巴 1 +八成 1 +八杉 1 +八百 1 +公仔 1 +公佈 1 +公克 1 +公告 1 +公墓 1 +公屋 1 +公斤 1 +公款 1 +公正 1 +公狼 1 +公約 1 +公衛 1 +公袥 1 +公視 1 +公超 1 +公關 1 +公頃 1 +公館 1 +六七 1 +六千 1 +六千四百萬 1 +六合 1 +六四 1 +六安 1 +共享 1 +共尾 1 +共生 1 +共處 1 +共識 1 +共鳴 1 +兵房 1 +兵鋒 1 +其妻 1 +其子 1 +其次 1 +其母 1 +典籍 1 +兼修 1 +兼具 1 +兼容 1 +兼屬 1 +兼并 1 +冀望 1 +冉 1 +冊 1 +再三 1 +再保 1 +再用 1 +再臨 1 +再補 1 +再見 1 +冒 1 +冒險 1 +冠 1 +冠上 1 +冠峰 1 +冠狀 1 +冠玉 1 +冢 1 +冤案 1 +冥冥 1 +冥想 1 +冬初 1 +冬眠 1 +冬青 1 +冰 1 +冰冰 1 +冰塔 1 +冰晶 1 +冰柱 1 +冰河 1 +冰湖 1 +冰瀑 1 +冰球 1 +冰風 1 +冷凍 1 +冷暖氣 1 +冷次 1 +冷氣 1 +冷眼 1 +冷遇 1 +冷靜 1 +凄美 1 +准 1 +准考 1 +凈白 1 +凊 1 +凌 1 +凌日 1 +凌晨 1 +凌辱 1 +凌駕 1 +凍傷 1 +凝結 1 +凡爾登 1 +凡爾賽 1 +凱恩 1 +凱文 1 +凱爾特 1 +凱維埃爾 1 +凱美特 1 +凱茜 1 +凶 1 +凸 1 +凸起 1 +凹版 1 +出世 1 +出人意料 1 +出到 1 +出動 1 +出去 1 +出名 1 +出品 1 +出國 1 +出城 1 +出奇 1 +出嫁 1 +出局 1 +出師 1 +出廠 1 +出征 1 +出手 1 +出擊 1 +出校 1 +出榜 1 +出血 1 +出訪 1 +出路 1 +出逃 1 +出門 1 +出頭 1 +刀鞘 1 +分工 1 +分店 1 +分批 1 +分攤 1 +分數 1 +分明 1 +分枝 1 +分校 1 +分泌 1 +分流 1 +分發 1 +分科 1 +分立 1 +分站 1 +分管 1 +分組 1 +分缺 1 +分貝 1 +分辨 1 +分部 1 +分鏡 1 +分隔 1 +分離 1 +分題 1 +分點 1 +切下 1 +切分 1 +切割 1 +切合 1 +切實 1 +切成 1 +切望 1 +切爾尼赫 1 +切片 1 +刑事 1 +刑部 1 +划算 1 +划艇 1 +列斯聯 1 +列維爾 1 +初中 1 +初始 1 +初時 1 +初次 1 +初步 1 +初見 1 +判 1 +判令 1 +判定 1 +判寺事 1 +判詞 1 +別人 1 +別名 1 +別院 1 +利他能 1 +利刃 1 +利好 1 +利潘迪特蘭堡 1 +利維奧 1 +刪剪 1 +刮目相看 1 +到任 1 +到期 1 +到發 1 +制動 1 +制式 1 +制瓷 1 +制約 1 +制酸 1 +刷 1 +刷到 1 +券 1 +券頂 1 +刺殺 1 +刻劃 1 +刻寫 1 +刻板 1 +刻滿 1 +刻畫 1 +則士 1 +則里拉 1 +削減 1 +前傾 1 +前去 1 +前因後果 1 +前奏 1 +前委 1 +前嫌 1 +前季 1 +前提 1 +前景 1 +前稱 1 +前端 1 +前綴 1 +前者 1 +前肢 1 +前齒 1 +剛剛 1 +剛性 1 +剛直 1 +剛鐸 1 +剩 1 +剩餘 1 +副長 1 +割據 1 +割破 1 +割讓 1 +割開 1 +創保 1 +創傷 1 +創刊 1 +創煥 1 +創生 1 +剷除 1 +剿 1 +剿滅 1 +劃出 1 +劃歸 1 +劃界 1 +劇中 1 +劇作 1 +劇場 1 +劇組 1 +劍俠 1 +劍法 1 +劍麻 1 +劑量 1 +力克 1 +力圖 1 +力霸 1 +功勞 1 +功德 1 +功樂 1 +功績 1 +加侖 1 +加值 1 +加冕 1 +加利奇 1 +加劇 1 +加勁 1 +加恩卡納 1 +加爾文 1 +加粗 1 +加藤 1 +加賀 1 +加速 1 +加電 1 +劣 1 +助 1 +助手 1 +助燃 1 +助聽 1 +助長 1 +努兒道刺特 1 +劫匪 1 +劫持 1 +効忠 1 +勁光 1 +勁報 1 +勁敵 1 +勁歌 1 +勃起 1 +勇俊 1 +勇士 1 +勇武 1 +勒溫 1 +動人 1 +動向 1 +動土 1 +動漫 1 +動漫畫 1 +動用 1 +動能 1 +動蕩 1 +動詞 1 +動量 1 +勘探 1 +務工 1 +勝 1 +勝任 1 +勝昭 1 +勝素 1 +勝者 1 +勝訴 1 +勝賴 1 +勞埃德 1 +勞累 1 +募款 1 +募集 1 +勢傾中外 1 +勢能 1 +勤先 1 +勤快 1 +勳位 1 +勳爵 1 +勵珍 1 +勸 1 +勾形 1 +勾畫 1 +勾結 1 +包袱 1 +包裹 1 +包覆 1 +包頭 1 +化名 1 +化妝 1 +化成 1 +化整為零 1 +化用 1 +化肥 1 +北伐 1 +北側 1 +北冰 1 +北卡羅萊納 1 +北景 1 +北歐 1 +北段 1 +北甘馬粦 1 +北美擬獅 1 +北車 1 +北返 1 +北達科他 1 +北邊 1 +匡 1 +匯入 1 +匯合 1 +匯報 1 +匯聯 1 +匯集 1 +匹 1 +匹茲堡 1 +匾額 1 +區塊 1 +區段 1 +區間 1 +十二世 1 +十二烷基苯 1 +十全十美 1 +十八億 1 +十八大 1 +十四 1 +十數 1 +十萬 1 +十餘 1 +千兆 1 +千克 1 +千島 1 +千方百計 1 +千春 1 +千瓦 1 +千米 1 +千萬 1 +千里迢迢 1 +千陽 1 +千鶴 1 +升值 1 +升到 1 +升天 1 +升越 1 +升降 1 +升高 1 +午膳 1 +半導體 1 +半牧 1 +半農 1 +卑詩 1 +卓著 1 +協合 1 +協理 1 +南人 1 +南卡羅萊納 1 +南哲 1 +南大 1 +南安 1 +南安普頓 1 +南寧 1 +南市 1 +南征 1 +南端 1 +南線 1 +南美 1 +南臨 1 +南航 1 +南船 1 +南路 1 +南通 1 +南遷 1 +南鄰 1 +南門 1 +南開 1 +南院 1 +南雄 1 +南麓 1 +博 1 +博凱蒂 1 +博多 1 +博學 1 +博斯維爾 1 +博格 1 +博洛尼亞 1 +博滕 1 +博義 1 +博覽 1 +占星 1 +卡亞尼 1 +卡內拉 1 +卡利帕斯 1 +卡力崗 1 +卡夫 1 +卡夫卡 1 +卡巴雷羅 1 +卡希 1 +卡帕克 1 +卡拉ok 1 +卡拉柯伊 1 +卡拉維拿 1 +卡斯楚 1 +卡斯特羅 1 +卡普里維 1 +卡波特 1 +卡洛克 1 +卡洛斯 1 +卡洛曼 1 +卡洛琳 1 +卡羅來納 1 +卡羅萊納 1 +卡臣 1 +卡薩諾瓦 1 +卡車 1 +卡達 1 +卦 1 +卧底 1 +卧病 1 +卧薪嘗膽 1 +印信 1 +印刷 1 +印地安那 1 +印度尼西亞 1 +印第安納 1 +印第安納波利斯 1 +印表 1 +危在旦夕 1 +危害 1 +危殆 1 +即場 1 +即有 1 +卵內 1 +厘 1 +原先 1 +原型 1 +原姓 1 +原屬 1 +原平 1 +原意 1 +原指 1 +原文 1 +原核 1 +原畫 1 +原籍 1 +原罪 1 +原諒 1 +厥 1 +厭世 1 +厭惡 1 +去搶 1 +去留 1 +去看 1 +參戰 1 +參政 1 +參演 1 +參看 1 +參禮 1 +參贊 1 +參閱 1 +又廷 1 +又或 1 +及後 1 +及時 1 +友 1 +友情 1 +友邦 1 +反共 1 +反動 1 +反右 1 +反向 1 +反恐 1 +反省 1 +反綁 1 +反證 1 +反響 1 +反黨 1 +叔父 1 +取下 1 +取出 1 +取名 1 +取回 1 +取悅 1 +取液 1 +取物 1 +取用 1 +取而代之 1 +受命 1 +受孕 1 +受害 1 +受挫 1 +受洗 1 +受精 1 +受罰 1 +受襲 1 +受賄 1 +受阻 1 +受雇 1 +叛徒 1 +叛變 1 +叛軍 1 +叡 1 +叢刊 1 +叢書 1 +口供 1 +口信 1 +口吻 1 +口感 1 +口服 1 +口音 1 +古喙龍 1 +古堡 1 +古寺 1 +古廟 1 +古德諾 1 +古惑 1 +古斯塔夫 1 +古爾德 1 +古迹 1 +古都斯 1 +句子 1 +句點 1 +另加 1 +另娶 1 +另立 1 +另築 1 +另類 1 +只好 1 +只是 1 +只會 1 +只知 1 +只能 1 +叫作 1 +叫拜 1 +叫聲 1 +召 1 +召集 1 +可可 1 +可塑 1 +可愛 1 +可憐 1 +可樂 1 +可欣 1 +可西卡 1 +可靠 1 +可風 1 +台南 1 +台標 1 +台視 1 +台詞 1 +台長 1 +史前 1 +史坦貝克 1 +史官 1 +史帝芬 1 +史特勞斯 1 +史稱 1 +史記 1 +史跡 1 +史館 1 +右任 1 +右手 1 +右方 1 +右臂 1 +司可巴比妥 1 +司鐸 1 +吁宋 1 +吃上 1 +吃到 1 +吃掉 1 +吃法 1 +吃起 1 +各方 1 +各球 1 +各異 1 +各科 1 +各職 1 +各處 1 +各行各業 1 +各隊 1 +各項 1 +合共 1 +合力 1 +合和 1 +合唱 1 +合夥 1 +合奏 1 +合流 1 +合約 1 +合計 1 +合資 1 +合辦 1 +合適 1 +合陽 1 +合體 1 +吉利 1 +吉奧瓦尼 1 +吉姆 1 +吉布地 1 +吉拉德 1 +吉爾伯特 1 +吉祥 1 +吉米 1 +吉西 1 +吉隆坡 1 +吋 1 +同仁社 1 +同伴 1 +同僚 1 +同台 1 +同型 1 +同工 1 +同志 1 +同日 1 +同校 1 +同步 1 +同母 1 +同父 1 +同甘共苦 1 +同行 1 +同郷 1 +同食 1 +同飲 1 +名作 1 +名分 1 +名利雙收 1 +名城 1 +名帥 1 +名師 1 +名村 1 +名氣 1 +名流 1 +名聲 1 +名臣 1 +名茶 1 +名號 1 +名門 1 +名額 1 +后 1 +后妃 1 +吐 1 +吐嘈 1 +向前 1 +向滋 1 +君如 1 +君權 1 +君長 1 +吞下 1 +吟唱 1 +否 1 +否決 1 +吧 1 +吩咐 1 +含 1 +含糖 1 +含量 1 +吳王 1 +吵醒 1 +吸塵 1 +吸毒 1 +吸菸 1 +吸附 1 +吸食 1 +吹來 1 +吹氣 1 +吹滅 1 +吻部 1 +呀 1 +呂宋 1 +呆 1 +呈交 1 +告戒 1 +告白 1 +周代 1 +周刊 1 +周敏 1 +周日 1 +周朝 1 +周期 1 +周迅 1 +周遭 1 +味道 1 +呼 1 +呼倫貝爾 1 +呼和浩特 1 +命題 1 +和夫 1 +和好 1 +和宜合 1 +和康 1 +和暖 1 +和會 1 +和林 1 +和樹 1 +和睦 1 +和美 1 +和衷 1 +和親 1 +和記 1 +和諧 1 +和議 1 +咧嘴 1 +咬弦 1 +咸平 1 +咸康 1 +咸淳 1 +咸美頓 1 +咸鏡 1 +咸陽 1 +哀悼 1 +品嘗 1 +品學兼優 1 +品德 1 +品源 1 +哈 1 +哈丹姆 1 +哈依拉爾 1 +哈剌旭烈 1 +哈吉 1 +哈布斯堡 1 +哈希姆 1 +哈恩 1 +哈拉帕那瓦 1 +哈索爾 1 +哈羅 1 +哈萊姆 1 +哈薩克 1 +哈達 1 +哈里斯堡 1 +哈里森 1 +哈默史密斯 1 +員佐 1 +員外 1 +哥利茲 1 +哥德堡 1 +哨所 1 +哪 1 +哭 1 +哲也 1 +哲元 1 +哲孟雄 1 +哲生 1 +哲蚌 1 +唇槍舌劍 1 +唐代 1 +售予 1 +售出 1 +售票 1 +唯 1 +唯獨 1 +唱戲 1 +唱法 1 +唸 1 +唸珠 1 +唾液 1 +商事 1 +商務 1 +商圈 1 +商城 1 +商埠 1 +商場 1 +商幫 1 +商朝 1 +商湯 1 +商用 1 +商羯羅 1 +商船 1 +商量 1 +啊 1 +問吧 1 +問話 1 +啟 1 +啟傑 1 +啟明 1 +啟發 1 +啟示 1 +啟程 1 +啟聯 1 +啟鑰 1 +啤酒 1 +喀什 1 +喀拉拉邦 1 +喀里多尼亞 1 +善事 1 +善作 1 +善待 1 +善後 1 +善惡 1 +善撲 1 +善良 1 +喇薩 1 +喊出 1 +喘息 1 +喙 1 +喙端 1 +喚 1 +喚回 1 +喚起 1 +喜 1 +喜好 1 +喝醉 1 +喝采 1 +喪失 1 +喬姆斯基 1 +喬木 1 +喬科維奇 1 +單獨 1 +單調 1 +單質 1 +單項 1 +嗅到 1 +嗎 1 +嗜酸 1 +嗜鹼 1 +嗣位 1 +嗣業 1 +嘉木揚 1 +嘉木樣 1 +嘉樂 1 +嘉許 1 +嘉道理 1 +嘉陵 1 +嘉靖 1 +嘔吐 1 +嘩然 1 +嘯林 1 +嘴 1 +噁心 1 +噁爆 1 +器具 1 +器械 1 +器蓋 1 +器身 1 +噴射 1 +噸位 1 +嚇人 1 +嚮導 1 +嚴 1 +嚴令 1 +嚴加 1 +嚴島 1 +嚴懲 1 +嚴斥 1 +嚴氏 1 +嚴肅 1 +嚴謹 1 +囊胚 1 +囑咐 1 +囚犯 1 +四十三 1 +四十多 1 +四十餘 1 +四周 1 +四平 1 +四方八面 1 +四牌 1 +四萬 1 +四郎 1 +回信 1 +回合 1 +回填 1 +回家 1 +回寺 1 +回彈 1 +回復 1 +回教 1 +回生 1 +回程 1 +回答 1 +因弗內斯 1 +因達農 1 +困 1 +困住 1 +困擾 1 +固 1 +固態 1 +固有 1 +固醇 1 +國中 1 +國主 1 +國光 1 +國公 1 +國共 1 +國史 1 +國名 1 +國君 1 +國土 1 +國奧 1 +國妃 1 +國安會 1 +國府 1 +國庫 1 +國情 1 +國慶 1 +國成 1 +國松 1 +國父 1 +國產 1 +國界 1 +國立 1 +國策 1 +國諱 1 +國雄 1 +圍坐 1 +圍棋 1 +圍牆 1 +圍魏救趙 1 +園丁 1 +園主 1 +園內 1 +園明園 1 +園林 1 +圓 1 +圓圓 1 +圓弧 1 +圓柱 1 +圓滑 1 +圓環 1 +圖取 1 +圖布丹 1 +圖形 1 +圖片 1 +圖示 1 +圖稿 1 +團圓 1 +團隊 1 +土匪 1 +土司 1 +土石 1 +土虱 1 +在崗 1 +在校 1 +在身 1 +地名 1 +地域 1 +地基 1 +地平 1 +地庫 1 +地政 1 +地板 1 +地標 1 +地盤 1 +地級 1 +地表 1 +地貌 1 +地質 1 +地道 1 +地震 1 +坂本 1 +均勻 1 +均衡 1 +坎特伯里 1 +坎貝爾 1 +坎農 1 +坐在 1 +坐監 1 +坐骨 1 +坡子 1 +坤玲 1 +坦 1 +坦克 1 +坦干伊喀 1 +坦然 1 +坦白 1 +型式 1 +垮台 1 +埃內韋塔克 1 +埃弗里 1 +埃米內斯庫 1 +埃米琳 1 +埃胡德 1 +埃雷拉 1 +埋怨 1 +埋葬 1 +埋藏 1 +城主 1 +城光 1 +城內 1 +城南 1 +城址 1 +城巴 1 +城池 1 +城牆 1 +城西 1 +城隍 1 +埜堂 1 +埤 1 +執委 1 +執業 1 +執飛 1 +培元 1 +培育 1 +基層 1 +基希涅夫 1 +基平 1 +基徹 1 +基數 1 +基石 1 +基頻 1 +堂堂正正 1 +堅城 1 +堅定 1 +堅尼地 1 +堅拒 1 +堅蜥 1 +堆填 1 +堆積 1 +堈 1 +堪憐 1 +堪稱 1 +堪薩斯 1 +報仇 1 +報刊 1 +報名 1 +報復 1 +報讀 1 +場內 1 +場均 1 +場景 1 +塑像 1 +塑料 1 +塑有 1 +塑膠 1 +塔利班 1 +塔台 1 +塔吉克 1 +塔塔爾 1 +塔夫茨 1 +塔林 1 +塔樓 1 +塔西佗 1 +塗黑 1 +塚 1 +塞古拉 1 +塞德爾恰尼 1 +塞普提米烏斯 1 +塞法迪 1 +塞爾達 1 +塞琉古 1 +塞琉西 1 +塞維利亞 1 +塞維魯 1 +塞維魯敉 1 +塞隆 1 +塞音 1 +塞馬 1 +墓葬 1 +墓頂 1 +墜入 1 +墜落 1 +增殖 1 +增生 1 +增祥 1 +增進 1 +增額 1 +墟 1 +墟內 1 +墨 1 +墨客 1 +墨色 1 +墳 1 +墾田 1 +壓 1 +壓縮 1 +壞球 1 +壩上 1 +壩下 1 +士珍 1 +士禛 1 +士評 1 +壯漢 1 +壯烈 1 +壹 1 +壺 1 +壺中仙 1 +壽命 1 +壽宴 1 +壽星 1 +夏威夷 1 +夏愨 1 +夏秋季 1 +夏至 1 +夏茸切哇 1 +夏茸穹哇 1 +夏荷林 1 +夏默 1 +外借 1 +外力 1 +外加 1 +外務 1 +外匯 1 +外地 1 +外壁 1 +外套 1 +外層 1 +外形 1 +外殼 1 +外甥 1 +外甥女 1 +外省 1 +外管 1 +外表 1 +外褂 1 +外訪 1 +外語 1 +外銷 1 +多倫 1 +多元 1 +多汁 1 +多納德 1 +多謝 1 +多雨 1 +夜夜 1 +夜戰 1 +夠大 1 +夢中 1 +夢境 1 +夢幻 1 +夢想 1 +夢雲 1 +夢鴿 1 +夥兒 1 +大不了 1 +大乘 1 +大事 1 +大二 1 +大儒 1 +大區 1 +大友 1 +大受 1 +大吉 1 +大名 1 +大君 1 +大和 1 +大喊 1 +大國 1 +大圍 1 +大城 1 +大堆 1 +大堤 1 +大增 1 +大士 1 +大失所望 1 +大島 1 +大嶼 1 +大幅 1 +大怒 1 +大悟 1 +大敵 1 +大新 1 +大校 1 +大概 1 +大正 1 +大殿 1 +大汗 1 +大河 1 +大洋 1 +大湖 1 +大溪 1 +大漠 1 +大獲 1 +大理 1 +大發 1 +大窘 1 +大紅 1 +大經 1 +大綱 1 +大腦 1 +大腸 1 +大膽 1 +大舉 1 +大艇 1 +大華 1 +大蒜 1 +大街小巷 1 +大跌 1 +大路 1 +大辦 1 +大通 1 +大進 1 +大郎 1 +大部 1 +大都 1 +大釗 1 +大銘 1 +大門 1 +大雄 1 +大韓 1 +大馬 1 +大驚 1 +大體 1 +大鬧 1 +大黨 1 +大鼠 1 +天份 1 +天佐 1 +天使 1 +天倫之樂 1 +天元 1 +天安 1 +天寶樓 1 +天差地遠 1 +天性 1 +天悅 1 +天慶 1 +天才 1 +天母 1 +天河 1 +天涯 1 +天球 1 +天祐 1 +天窗 1 +天紀 1 +天翔 1 +天賜 1 +天賦 1 +天馬 1 +太傅 1 +太元 1 +太冷 1 +太初 1 +太后 1 +太宗 1 +太宰 1 +太尉 1 +太常 1 +太極 1 +太湖 1 +太炎 1 +太監 1 +太行 1 +太近 1 +太遠 1 +太郎 1 +夫仇 1 +夫妻 1 +央行 1 +失利 1 +失地 1 +失效 1 +失職 1 +失能 1 +失落 1 +失誤 1 +失蹤 1 +夷昧 1 +夾 1 +夾狀 1 +奇俠 1 +奇幻 1 +奇怪 1 +奇缺 1 +奈葉 1 +奉 1 +奉命 1 +奉安 1 +奉律 1 +奉新 1 +奉系 1 +奎德林堡 1 +奏 1 +奏鳴 1 +奕 1 +奕詝 1 +套出 1 +套用 1 +奢華 1 +奧伊 1 +奧克尼 1 +奧克蘭 1 +奧古斯丁 1 +奧姆 1 +奧得 1 +奧托 1 +奧斯卡 1 +奧斯威爾 1 +奧斯汀 1 +奧林匹亞絲 1 +奧林匹斯 1 +奧格斯堡 1 +奧爾滕 1 +奧爾登堡 1 +奧爾良 1 +奧特 1 +奧特伊 1 +奧的斯 1 +奧米加 1 +奧羽 1 +奧蒂洛 1 +奪去 1 +奬懲 1 +女人 1 +女傭 1 +女僕 1 +女優 1 +女友 1 +女嬰 1 +女水 1 +女版 1 +女生 1 +女眷 1 +奴役 1 +奶爸 1 +奸 1 +她倆 1 +好上 1 +好奇 1 +好手 1 +好氧 1 +好色 1 +如數 1 +妄圖 1 +妊娠 1 +妖怪 1 +妙 1 +妮科爾 1 +妮綺 1 +妳 1 +妹 1 +妹夫 1 +妻妹 1 +妻姐 1 +妻室 1 +姊姊 1 +始發 1 +始祖 1 +始稱 1 +始興 1 +姑娘 1 +姑母 1 +委內瑞拉 1 +委身 1 +姚里 1 +姥姥 1 +姦情 1 +姪女 1 +姿色 1 +威 1 +威光 1 +威嚇 1 +威塞克斯 1 +威斯特米思從 1 +威格莫爾 1 +威權 1 +威爾伯 1 +威爾歇 1 +威特 1 +威舍 1 +威靈頓 1 +娘 1 +娘家 1 +娜塔莉 1 +婁 1 +婆 1 +婆羅 1 +婚 1 +婚事 1 +婚宴 1 +婚禮 1 +婢女 1 +婦 1 +婷婷 1 +媒介 1 +媚娘 1 +嫁與 1 +嫘縈 1 +嫣然 1 +嬰孩 1 +子孫 1 +子文 1 +子球 1 +子程 1 +孕育 1 +孕酮 1 +字喃 1 +字幕 1 +字模 1 +字號 1 +存世 1 +存取 1 +存放 1 +孝感 1 +孝次 1 +孟 1 +孟加拉 1 +孟德爾 1 +季後 1 +季惟 1 +季風 1 +季龍 1 +孤島 1 +孤芳自賞 1 +孤身 1 +孩提 1 +學到 1 +學前 1 +學家 1 +學府二道 1 +學業 1 +學民 1 +學津 1 +學社 1 +學聯 1 +學苑 1 +宇航 1 +守備 1 +守孝 1 +守文 1 +守法 1 +守臣 1 +守謙 1 +守齋 1 +安二郎 1 +安妮 1 +安安 1 +安岳 1 +安徒生 1 +安得拉 1 +安得拉邦 1 +安德魯 1 +安托瓦內特 1 +安撫 1 +安放 1 +安東 1 +安樂 1 +安正 1 +安民 1 +安汶 1 +安然 1 +安營 1 +安理 1 +安納 1 +安聯 1 +安葬 1 +安蘭 1 +安達信 1 +安那瑞安 1 +安那罕 1 +宋國 1 +完好 1 +完畢 1 +宏偉 1 +宏坤 1 +宏聲 1 +宏道 1 +宏量 1 +宗偉 1 +宗憲 1 +宗谷 1 +宗龍 1 +官兵 1 +官司 1 +官府 1 +官服 1 +官腔 1 +官話 1 +官邸 1 +官長 1 +宙域 1 +定位 1 +定價 1 +定向 1 +定影 1 +定性 1 +定案 1 +定理 1 +定量 1 +宛城 1 +宜興 1 +客場 1 +客家 1 +客觀 1 +客貨運 1 +客輪 1 +客量 1 +宣 1 +宣判 1 +宣化 1 +宣帝 1 +宣誓 1 +室外 1 +室溫 1 +宦官 1 +宮人 1 +宮崎 1 +宰李 1 +宴席 1 +宴會 1 +家光 1 +家勁 1 +家務 1 +家外 1 +家奴 1 +家干 1 +家用 1 +家立 1 +家道中落 1 +家驤 1 +容 1 +容器 1 +容忍 1 +容許 1 +容量 1 +宿敵 1 +宿根 1 +寄存 1 +寄送 1 +寅成 1 +密 1 +密山 1 +密文 1 +密歇根 1 +密西西比 1 +密集 1 +富商 1 +富恩特德奧羅 1 +富翁 1 +富蘭克林 1 +富裕 1 +富豪 1 +富貴 1 +富邦 1 +察合台 1 +察哈爾 1 +察沃 1 +寡尿 1 +實 1 +實則 1 +實屬 1 +實情 1 +實戰 1 +實收 1 +實權 1 +實況 1 +實踐 1 +寧波 1 +審批 1 +審理 1 +審計 1 +審評 1 +審議 1 +寫下 1 +寫信 1 +寫入 1 +寫出 1 +寫字 1 +寫成 1 +寫進 1 +寬容 1 +寬度 1 +寬敞 1 +寬條 1 +寬順 1 +寮國 1 +寵物 1 +寵臣 1 +寶光 1 +寶劍 1 +寶如 1 +寶應 1 +寶殿 1 +寶玉 1 +寶田 1 +寶血 1 +寶雞 1 +寶雲 1 +寶麗金 1 +寺前 1 +封土 1 +封為 1 +封爵 1 +封穴 1 +封號 1 +封裝 1 +封路 1 +射失 1 +射程 1 +射箭 1 +射線 1 +射鵰 1 +將來 1 +將領 1 +專 1 +專任 1 +專制 1 +專吃 1 +專指 1 +專政 1 +專機 1 +專橫 1 +專欄 1 +專權 1 +專款 1 +專注 1 +專線 1 +專註 1 +專賣 1 +專長 1 +專項 1 +尊崇 1 +尊敬 1 +尊稱 1 +尋回 1 +尋親 1 +對上 1 +對付 1 +對撞 1 +對準 1 +對照 1 +對生 1 +對白 1 +對稱 1 +對立 1 +對簿公堂 1 +對話 1 +對面 1 +對飛 1 +導 1 +導入 1 +導出 1 +導向 1 +導彈 1 +導播 1 +導正 1 +導體 1 +小人 1 +小兔 1 +小刀 1 +小南 1 +小國 1 +小小 1 +小島 1 +小息 1 +小數 1 +小書 1 +小欖 1 +小水鴨 1 +小河兒 1 +小津 1 +小浪底 1 +小澤 1 +小片 1 +小生 1 +小田急 1 +小知 1 +小石 1 +小童 1 +小舖 1 +小虎 1 +小街 1 +小輪 1 +小野 1 +小隊 1 +小順 1 +小顏 1 +小風 1 +小體 1 +少兒 1 +少將 1 +少年 1 +少懷 1 +少林 1 +少見 1 +少許 1 +少量 1 +尖端 1 +尖酸 1 +尖頂 1 +尚州 1 +尚德 1 +尚方 1 +尚書 1 +尤利烏斯 1 +尤勒 1 +尤指 1 +尤里卡 1 +就此 1 +就熟 1 +就職 1 +尷尬 1 +尹 1 +尹氏 1 +尼克貝 1 +尼古丁 1 +尼古拉 1 +尼奧爾德 1 +尼師今 1 +尼庫瑙 1 +尼歐斯 1 +尼比魯 1 +尼爾 1 +尼爾斯 1 +尼爾馬爾 1 +尾 1 +尾巴 1 +尾柄 1 +尾隨 1 +尾鰭 1 +尾龍 1 +局勢 1 +局間 1 +居家 1 +居所 1 +居留 1 +居禮 1 +屆滿 1 +屋 1 +屋大薇 1 +屋宇 1 +屋頂 1 +屍 1 +屍體 1 +屏山 1 +屏東 1 +屏風 1 +展品 1 +展望 1 +展貿 1 +屠村 1 +屠龍 1 +層壓 1 +層次 1 +層疊 1 +層級 1 +層面 1 +履行 1 +屬國 1 +屬於 1 +屬靈 1 +屯南 1 +山下 1 +山內 1 +山口 1 +山地 1 +山姆 1 +山峰 1 +山崖 1 +山手 1 +山月 1 +山村 1 +山楂 1 +山猿 1 +山田 1 +山胞 1 +山葉 1 +山陵 1 +山麓 1 +山龍眼 1 +岐女短 1 +岐阜 1 +岐陽 1 +岑 1 +岔江 1 +岡恩 1 +岡本 1 +岩屋 1 +岩心 1 +岩手 1 +岩漿 1 +岳 1 +岳泰 1 +岷江 1 +岸川 1 +岸賈 1 +岸邊 1 +峯崎 1 +峰倉 1 +峰景 1 +島內 1 +島國 1 +島蚺 1 +峽 1 +峽灣 1 +峽谷 1 +崇善 1 +崇尚 1 +崇敬 1 +崎頭 1 +崔 1 +崔陂 1 +崗 1 +崗斜 1 +崙頂 1 +崞縣 1 +崩坍 1 +崩潰 1 +嵩祝 1 +巔峰 1 +川南 1 +川村 1 +川邊 1 +州界 1 +州舞 1 +巡査 1 +巢 1 +工事 1 +工務 1 +工序 1 +工廠 1 +工會 1 +工法 1 +工潮 1 +左右神策軍 1 +左岸 1 +左拉 1 +左派 1 +左膀 1 +左轉 1 +巨作 1 +巨像 1 +巨冊 1 +巨型 1 +巨石 1 +巨賈 1 +巨野 1 +巫師 1 +差 1 +差分 1 +差別 1 +差勁 1 +差會 1 +己二胺 1 +己巳 1 +己酉 1 +已故 1 +已晚 1 +已死 1 +巴 1 +巴亞莫 1 +巴克 1 +巴克禮 1 +巴列姆 1 +巴列斯特爾 1 +巴卑爾 1 +巴喬 1 +巴城 1 +巴塞 1 +巴塞羅那 1 +巴塞隆拿 1 +巴塞隆納 1 +巴孛許諾 1 +巴巴克 1 +巴庫 1 +巴思缽 1 +巴恩斯 1 +巴拉克 1 +巴拉尼 1 +巴斯克 1 +巴斯德 1 +巴斯蒂亞 1 +巴比 1 +巴爾虎 1 +巴爾齊蒂斯 1 +巴納夫 1 +巴納巴 1 +巴羅爾 1 +巴英額 1 +巴莫鱷 1 +巴蒂斯塔 1 +巴西利卡 1 +巴西班讓 1 +巴諾 1 +巴賽 1 +巴赫 1 +巴頓 1 +市售 1 +市縣 1 +市轄 1 +市面 1 +布 1 +布伯 1 +布倫努斯 1 +布列塔尼 1 +布哈林 1 +布宜諾斯艾利斯 1 +布拉亞斯 1 +布拉德 1 +布政 1 +布料 1 +布林 1 +布氏奇非鯽 1 +布爾 1 +布置 1 +布萊姆 1 +布蘭特福德 1 +布蘭登堡 1 +布賴滕費爾德 1 +布里奇曼 1 +布里斯托 1 +布里斯班 1 +布雷克 1 +布雷西亞 1 +布魯克林 1 +布魯斯 1 +帆布 1 +帆船 1 +希伯來 1 +希克森 1 +希爾曼 1 +希特勒 1 +希皮奧內 1 +希鵬 1 +帕克 1 +帕內爾 1 +帕搏 1 +帕爾曼 1 +帕特羅克洛斯 1 +帕米爾 1 +帕納辛奈克斯 1 +帕納辛納克斯 1 +帕維亞 1 +帕薩迪納 1 +帕西奧利 1 +帕迪恩 1 +帕金森 1 +帝王 1 +帝都 1 +師團 1 +師徒 1 +師從 1 +師父 1 +師生 1 +席勒 1 +帳目 1 +帶上 1 +帶出 1 +帶子 1 +帶少 1 +帶水 1 +常住 1 +常勝 1 +常客 1 +常態 1 +常春 1 +常春藤 1 +常盛 1 +常識 1 +常量 1 +常青 1 +常駐 1 +幀 1 +幅 1 +幅員遼闊 1 +幕 1 +幕府 1 +幕後 1 +幢 1 +幣原 1 +幪面 1 +幫主 1 +干王 1 +平反 1 +平和 1 +平地 1 +平坦 1 +平帝 1 +平常 1 +平手 1 +平日 1 +平林 1 +平沼 1 +平滑 1 +平臺 1 +平行 1 +平陵 1 +平陽 1 +年中 1 +年份 1 +年幼 1 +年息 1 +年第 1 +年老 1 +年號 1 +年資 1 +年青 1 +并行 1 +幸一 1 +幸好 1 +幸運 1 +幹 1 +幹事 1 +幹掉 1 +幹流 1 +幹道 1 +幼子 1 +幼年 1 +幼弟 1 +幼發拉底 1 +幼稚 1 +幼貓 1 +幼魚 1 +幼鯨 1 +幼鳥 1 +幽閣 1 +幾內亞 1 +幾十 1 +幾千 1 +幾多 1 +幾百 1 +床 1 +床鋪 1 +底冊 1 +底格里斯 1 +底比斯 1 +底片 1 +底特律 1 +底稿 1 +底質 1 +店家 1 +庚戌 1 +府中 1 +府城 1 +府尹 1 +府第 1 +度宗 1 +座位 1 +座右 1 +座座 1 +座椅 1 +座西 1 +座談 1 +庫伊瓦 1 +庫伊瓦涅米 1 +庫哈斯 1 +庫柏力克 1 +庫欣 1 +庫爾特 1 +庫賽 1 +庫赫莫 1 +庫迪尼奧 1 +庫頁 1 +庭園 1 +庭薺 1 +庭長 1 +康乃狄克 1 +康史 1 +康奈爾 1 +康子 1 +康寧 1 +康樂 1 +康濟鼐 1 +康福 1 +康科德 1 +康羅伊 1 +廂 1 +廉潔 1 +廚師 1 +廝守 1 +廟倉 1 +廟方 1 +廟橋 1 +廟鎮 1 +廢棄 1 +廢熱 1 +廢舊 1 +廣受 1 +廣大 1 +廣大興 1 +廣權 1 +廣澳 1 +廣稱 1 +廣金 1 +廬山 1 +廳局 1 +廳長 1 +延安 1 +延年益壽 1 +延音 1 +廷和 1 +廷尉 1 +建好 1 +建威 1 +建市 1 +建御名方 1 +建御雷 1 +建構 1 +建武 1 +建置 1 +建華 1 +建超 1 +廿五 1 +廿六 1 +弄到 1 +弄清 1 +弊案 1 +式微 1 +弓尾 1 +弓弦 1 +弓箭 1 +引來 1 +引咎 1 +引導 1 +引江 1 +引渡 1 +引申 1 +引資 1 +弗拉格斯塔夫 1 +弗朗丹 1 +弗朗恰 1 +弗朗索 1 +弗朗西絲 1 +弗格森 1 +弗洛伊德 1 +弗特 1 +弗蘭克 1 +弗里德里希 1 +弗里施 1 +弗里茨 1 +弘 1 +弘前 1 +弘宣 1 +弭兵 1 +弱 1 +張家口 1 +張氏 1 +強勁 1 +強化 1 +強拍 1 +強暴 1 +強權 1 +強求 1 +強盜 1 +強迫 1 +強韌 1 +強項 1 +彈劾 1 +彈塗魚 1 +彈撥 1 +彈盡糧絕 1 +彌撒 1 +彌補 1 +彌賽亞 1 +彎曲 1 +彗差 1 +彗星 1 +彙編 1 +彝 1 +形像 1 +形同 1 +形體 1 +彥根 1 +彥直 1 +彩 1 +彩畫 1 +彩繪 1 +彩雲 1 +彩鳳 1 +彪馬 1 +彭劉楊 1 +彭博倫 1 +彭古魯 1 +彭定康 1 +彭拿路 1 +彰信 1 +影帝 1 +影線 1 +影評 1 +影迷 1 +影集 1 +影音 1 +彷彿 1 +役 1 +彼特 1 +往上 1 +往世 1 +往日 1 +征西 1 +待到 1 +很小 1 +很強 1 +很忙 1 +很懶 1 +很是 1 +很深 1 +很遠 1 +很重 1 +很長 1 +律定 1 +後世 1 +後代 1 +後勤 1 +後南 1 +後周 1 +後宮 1 +後庄 1 +後悔 1 +後援 1 +後梁 1 +後段 1 +後母 1 +後稱 1 +後續 1 +後置 1 +後藤 1 +後送 1 +後防 1 +後齒 1 +徒具 1 +徒手 1 +得克薩斯 1 +得心應手 1 +得悉 1 +得獎 1 +得益 1 +從來 1 +從句 1 +從周 1 +從善如流 1 +從政 1 +御史 1 +御墨 1 +御宅 1 +御窯 1 +復健 1 +復合 1 +復寫 1 +復甦 1 +循道 1 +微型 1 +微妙 1 +微小 1 +微波 1 +微粒 1 +微粒體 1 +微觀 1 +微量 1 +徵兆 1 +徵招 1 +徵祥 1 +德勝 1 +德國牧羊犬 1 +德妃 1 +德宏德特 1 +德富卡 1 +德干 1 +德愛 1 +德懷 1 +德拉瓦 1 +德文 1 +德比 1 +德江 1 +德爾 1 +德爾加多 1 +德爾斐 1 +德甲 1 +德高 1 +德魯茲 1 +徽 1 +徽章 1 +心境 1 +心宿 1 +心意 1 +心智 1 +心目 1 +心肌 1 +必和必拓 1 +必走 1 +必需 1 +忍心 1 +忍氣吞聲 1 +志 1 +志摩 1 +志明 1 +志道 1 +忘 1 +忘記 1 +忙 1 +忠 1 +忠於 1 +忠誠 1 +快上 1 +快捷 1 +快綫 1 +忽 1 +忽視 1 +怎 1 +怒 1 +怕 1 +思侯 1 +思成 1 +思維 1 +思考 1 +怡 1 +急劇 1 +急忙 1 +急救 1 +急於 1 +急流 1 +急症 1 +急行 1 +性向 1 +性命 1 +性情 1 +性腺 1 +怪 1 +怪圈 1 +怪聲 1 +恆 1 +恆大 1 +恆德 1 +恆河 1 +恐嚇 1 +恐懼 1 +恢豐 1 +恣意 1 +恤 1 +恨 1 +恩南伽 1 +恩慈 1 +恩秀 1 +恩贈 1 +恭子 1 +息率 1 +悉心 1 +悉達多 1 +悟到 1 +悟空 1 +患 1 +患得患失 1 +患病 1 +您 1 +悲傷 1 +悲劇 1 +悲嘆 1 +悲慘 1 +悲痛 1 +悲痛欲絕 1 +悲鴻 1 +悼念 1 +情 1 +情不自禁 1 +情人 1 +情勢 1 +情愁 1 +情愛 1 +情景 1 +情結 1 +情誼 1 +情資 1 +情陷 1 +情願 1 +惇曧 1 +惟 1 +惠亞 1 +惠梨香 1 +惠特蘭 1 +惡 1 +惡人 1 +惡化 1 +惡夢 1 +惡性 1 +惡搞 1 +惡臭 1 +惡靈 1 +惡魔 1 +想必 1 +想起 1 +愈加 1 +愈大 1 +愈高 1 +愉快 1 +意圖 1 +意念 1 +意料 1 +意甲 1 +意魔 1 +愙威 1 +愚園 1 +愚昧 1 +愛好 1 +愛娜 1 +愛娜茲薇 1 +愛思德 1 +愛恨 1 +愛意 1 +愛慕 1 +愛明內斯庫 1 +愛樂 1 +愛河 1 +愛莎尼亞 1 +愛迪生 1 +愛默生 1 +感冒 1 +感謝 1 +慈湖 1 +慈濟 1 +慌亂 1 +慎 1 +慎太郎 1 +慕容 1 +慕肯 1 +慘叫 1 +慘重 1 +慚愧 1 +慢行 1 +慢駛 1 +慧嫻 1 +慰安 1 +慶 1 +慶典 1 +慶曆 1 +慶貽 1 +慶黎 1 +慷慨 1 +憂 1 +憂憤 1 +憲政 1 +憲民 1 +憲法 1 +憶蓮 1 +懂 1 +應付 1 +應允 1 +應屆 1 +應戰 1 +應昌 1 +應當 1 +應許 1 +應邀 1 +懲罰 1 +懶爪龍 1 +懷 1 +懷仁 1 +懷克里夫 1 +懷念 1 +懷慶 1 +懷抱 1 +懷水 1 +懷聖 1 +懸掛 1 +懼高 1 +懿 1 +戀人 1 +戀屍 1 +戀童 1 +戈德曼 1 +戈爾 1 +戈登 1 +戈矛 1 +戈蘭 1 +成事 1 +成仁 1 +成化 1 +成名 1 +成品 1 +成套 1 +成對 1 +成形 1 +成梁 1 +成行 1 +成語 1 +我國 1 +截 1 +截然不同 1 +截至 1 +截頜鯉 1 +戰事 1 +戰力 1 +戰勝 1 +戰地 1 +戰平 1 +戰情 1 +戰船 1 +戲子 1 +戲曲 1 +戲法 1 +戲碼 1 +戲謔 1 +戲院 1 +戴上 1 +戴克里先 1 +戴斯德 1 +戴爾馬 1 +戴維斯 1 +戴蒙 1 +戴頓 1 +戶田 1 +戶籍 1 +房東 1 +所為 1 +所長 1 +手上 1 +手工 1 +手感 1 +手抄 1 +手指 1 +手提 1 +手槍 1 +手稿 1 +手筆 1 +手腳 1 +手邊 1 +手風 1 +才子 1 +才是 1 +才智 1 +扎什倫布 1 +打亂 1 +打人 1 +打包 1 +打撈 1 +打死 1 +打水 1 +打牌 1 +打碎 1 +打造 1 +打響 1 +扔出 1 +托倫 1 +托加下 1 +托洛洛 1 +托盤 1 +托米 1 +托茂 1 +扣上 1 +批次 1 +扼止 1 +找來 1 +找續 1 +承天 1 +承德 1 +承接 1 +承斌 1 +承租 1 +技師 1 +技戰術 1 +技法 1 +抑制 1 +抑鬱 1 +抒解 1 +抓到 1 +投交 1 +投奔 1 +投標 1 +投球 1 +投身 1 +投靠 1 +抗大 1 +抗拒 1 +抗衡 1 +抗體 1 +折射 1 +折斷 1 +折衷 1 +抨擊 1 +披覆 1 +披頭士 1 +抬昇 1 +抱 1 +抱持 1 +抵受 1 +抵禦 1 +押韻 1 +抽檢 1 +抽煙 1 +抽象 1 +抽走 1 +拆分 1 +拆卸 1 +拆掉 1 +拆遷 1 +拉 1 +拉什沃思 1 +拉卜楞 1 +拉塞爾 1 +拉多加 1 +拉奏 1 +拉姆齊 1 +拉差諾 1 +拉布 1 +拉彼魯茲 1 +拉日色布 1 +拉林 1 +拉森 1 +拉爾夫 1 +拉特蘭 1 +拉珀斯維爾 1 +拉瑙 1 +拉籌伯 1 +拉美西斯 1 +拉薩 1 +拉西拉 1 +拉赫曼尼諾夫 1 +拋棄 1 +拋物 1 +拍 1 +拍照 1 +拍賣 1 +拒不 1 +拓務 1 +拓建 1 +拓撲 1 +拔刀 1 +拖進 1 +拖鞋 1 +拙劣 1 +招 1 +招潮蟹 1 +招生 1 +招聘 1 +招降 1 +拜仁慕尼黑 1 +拜拜 1 +括弧 1 +拱廊 1 +拱橋 1 +拳一 1 +拳擊 1 +拳賽 1 +拷問 1 +拼寫 1 +拾糞 1 +拿來 1 +持久 1 +持球 1 +指使 1 +指標 1 +指派 1 +指稱 1 +指責 1 +挑選 1 +挖 1 +挖子 1 +挖掘 1 +挪動 1 +挪用 1 +振 1 +振動 1 +振幅 1 +振林 1 +挹江 1 +挺身而出 1 +挽回 1 +挾持 1 +捉弄 1 +捉拿 1 +捉襟見肘 1 +捍衛 1 +捐 1 +捐款 1 +捐獻 1 +捕撈 1 +捕殺 1 +捕獵 1 +捕魚 1 +捕鼠 1 +捲入 1 +捷徑 1 +授勳 1 +授意 1 +授權 1 +授與 1 +掉頭 1 +掌 1 +掌控 1 +掌摑 1 +掌權 1 +掌鏡 1 +排場 1 +排外 1 +排序 1 +掙扎 1 +掛 1 +掛果 1 +掛牌 1 +掛鉤 1 +掠奪 1 +採 1 +採信 1 +採摘 1 +採樣 1 +採納 1 +採購 1 +採集 1 +採食 1 +探明 1 +探望 1 +探求 1 +探究 1 +探險 1 +接到 1 +接力 1 +接班 1 +接納 1 +接聽 1 +接見 1 +接辦 1 +接送 1 +接連 1 +控告 1 +控訴 1 +推介 1 +推免生 1 +推前 1 +推力 1 +推導 1 +推斷 1 +推測 1 +推演 1 +推特 1 +推理 1 +推舉 1 +推論 1 +推遲 1 +掩 1 +掩蓋 1 +描摹 1 +描繪 1 +提前 1 +提問 1 +提子 1 +提康德羅加 1 +提拔 1 +提攜 1 +提昇 1 +提煉 1 +提督 1 +提籃 1 +提醒 1 +插手 1 +插曲 1 +揚言 1 +換成 1 +換算 1 +握帶 1 +握持 1 +揭曉 1 +揭發 1 +揭開 1 +揮舞 1 +援 1 +援助 1 +援外 1 +援引 1 +援手 1 +援救 1 +搜尋 1 +搜狐 1 +搜羅 1 +搜集 1 +搞垮 1 +搞錯 1 +搬動 1 +搬往 1 +搬移 1 +搬遷 1 +搭乘 1 +搭配 1 +搶 1 +搶先 1 +搶劫 1 +搶奪 1 +搶救 1 +摒棄 1 +摔 1 +摘下 1 +摘星 1 +摘錄 1 +摧毀 1 +摩加迪沙 1 +摩天 1 +摩崖 1 +摩托 1 +摩擦 1 +摩爾多瓦 1 +摩登 1 +摩納哥 1 +摩西 1 +摯友 1 +摸摸 1 +撒拉 1 +撒營盤 1 +撞入 1 +撞死 1 +撤回 1 +撤職 1 +撤退 1 +撤除 1 +撥 1 +撥出 1 +撥號 1 +撫養 1 +播種 1 +撮合 1 +撰述 1 +撲克 1 +撿 1 +撿起 1 +擁 1 +擁堵 1 +擁戴 1 +擁擠 1 +擁護 1 +擂台 1 +擊中 1 +擊劍 1 +擊斃 1 +擊毀 1 +擊潰 1 +擊破 1 +擋住 1 +操 1 +操控 1 +操縱 1 +擒拿 1 +擔憂 1 +擔竿 1 +擔綱 1 +據傳 1 +據此 1 +據稱 1 +據點 1 +擠塞 1 +擠壓 1 +擠奶 1 +擠眉弄眼 1 +擠迫 1 +擢升 1 +擬 1 +擬桿菌 1 +擬訂 1 +擬議 1 +擴散 1 +擴編 1 +擺弄 1 +擺渡 1 +擾亂 1 +攀爬 1 +攔截 1 +攝像 1 +攝取 1 +攪拌 1 +支取 1 +支廳 1 +支派 1 +支那 1 +支隊 1 +收場 1 +收容 1 +收市 1 +收支 1 +收生 1 +收益 1 +收租 1 +收緊 1 +收聽 1 +收買 1 +收費 1 +收養 1 +攸之 1 +改作 1 +改屬 1 +改投 1 +改採 1 +改換 1 +改派 1 +改發 1 +改穿 1 +改組 1 +改選 1 +改隸 1 +攻下 1 +攻勢 1 +攻堅 1 +攻方 1 +攻殺 1 +攻訐 1 +攻讀 1 +放任 1 +放入 1 +放出 1 +放到 1 +放大 1 +放榜 1 +放牧 1 +放緩 1 +放送 1 +放逐 1 +放開 1 +放鬆 1 +政團 1 +政委 1 +政局 1 +政廳 1 +政敵 1 +政樞 1 +政法 1 +政爭 1 +政界 1 +故郷 1 +效尤 1 +效能 1 +敏銳 1 +救人 1 +救出 1 +救助 1 +救國 1 +救援 1 +救星 1 +救災 1 +救生 1 +救贖 1 +敕 1 +敕令 1 +敕書 1 +敗 1 +敗局 1 +敗死 1 +敗瓦 1 +敗退 1 +教務 1 +教士 1 +教室 1 +教席 1 +教材 1 +教案 1 +教科 1 +教籍 1 +教總 1 +教義 1 +教職員 1 +散射 1 +敦 1 +敦煌 1 +敬仰 1 +敬堯 1 +敬請 1 +敲擊 1 +敲訂 1 +整 1 +整塊 1 +整所 1 +整架 1 +整片 1 +整篇 1 +整軍 1 +整顆 1 +整齊 1 +敵兵 1 +敵方 1 +數以千計 1 +數值 1 +數十億 1 +數十萬 1 +數澤 1 +數百 1 +數碼 1 +數萬 1 +數論 1 +文哲 1 +文姬 1 +文岳 1 +文巨 1 +文德 1 +文摘 1 +文政 1 +文書 1 +文本 1 +文楷 1 +文武 1 +文法 1 +文清 1 +文職 1 +文賢 1 +文集 1 +文飾曲口魚 1 +文體 1 +文體教 1 +斑塊 1 +斑點 1 +斗貴子 1 +料 1 +斜 1 +斜坡 1 +斥教 1 +斬落 1 +斯佩克特 1 +斯凱勒 1 +斯哥特 1 +斯坦利 1 +斯坦福 1 +斯坦頓 1 +斯基龍 1 +斯塔茨門 1 +斯尼夫魯 1 +斯德哥爾摩 1 +斯托克 1 +斯氏亞冠龍 1 +斯洛伐克 1 +斯洛特 1 +斯特奇斯 1 +斯特萊默 1 +斯瓦爾恩 1 +斯科特 1 +斯維亞托斯拉夫 1 +斯里賽拉姆古德姆德瓦斯塔納姆 1 +新任 1 +新修 1 +新址 1 +新埔 1 +新太郎 1 +新奧爾良 1 +新字 1 +新寧 1 +新屋 1 +新巴 1 +新思 1 +新昌 1 +新明 1 +新春 1 +新月 1 +新核 1 +新榮 1 +新民 1 +新浪 1 +新版 1 +新生 1 +新秀 1 +新篇 1 +新編 1 +新罕布夏 1 +新罕布希爾 1 +新義 1 +新舊 1 +新製 1 +新開 1 +新飛 1 +新馬 1 +新高 1 +新鴻基 1 +新黨 1 +斷後 1 +斷盡 1 +斷言 1 +方丈 1 +方尖 1 +方正 1 +方田 1 +方石 1 +方程 1 +方蓋 1 +方蟹 1 +於維西 1 +施奈德 1 +施文 1 +施瓦本 1 +施用 1 +施韋比施哈爾 1 +旅 1 +旅居 1 +旅程 1 +旋渦 1 +旋轉 1 +族雄 1 +族頭 1 +旗艦 1 +旗面 1 +既得 1 +既是 1 +既然 1 +日出 1 +日向 1 +日夜 1 +日子 1 +日日 1 +日照 1 +日用 1 +日落 1 +日誌 1 +日賜 1 +旦增 1 +早有 1 +早餐 1 +旭 1 +旱災 1 +旻寧 1 +昆丁 1 +昆蟲 1 +昌吉 1 +昌都 1 +明中 1 +明亞 1 +明亮 1 +明代 1 +明宗 1 +明尼蘇達 1 +明憲 1 +明昌 1 +明智 1 +明正 1 +明潭 1 +明白 1 +明碁 1 +明視 1 +易卜拉欣 1 +易守 1 +易幟 1 +易斯 1 +易水 1 +易燃 1 +易經 1 +昔蘭尼 1 +星團 1 +星塵 1 +星展 1 +星崎 1 +星系 1 +映像 1 +春 1 +春丕 1 +春季 1 +春日井 1 +春會 1 +春田 1 +春節 1 +春緋 1 +春耕 1 +昨日 1 +昭侯 1 +昭儀 1 +昭宗 1 +昭禮 1 +昭通 1 +是年 1 +是方 1 +是次 1 +時事 1 +時份 1 +時值 1 +時光 1 +時刻 1 +時報 1 +時弊 1 +時稱 1 +時舉 1 +時針 1 +晃動 1 +晉 1 +晉北 1 +晉哲 1 +晉江 1 +晉級 1 +晒乾 1 +晨間 1 +普世 1 +普什圖 1 +普伊瑪諾娃 1 +普利茅斯 1 +普朗克 1 +普爾塔龍 1 +景泰 1 +晴神 1 +晶 1 +晶瑩 1 +晶閘 1 +智伯 1 +智利 1 +智趣 1 +暑期 1 +暖 1 +暗中 1 +暗喻 1 +暗影 1 +暗房 1 +暗指 1 +暗礁 1 +暗紅 1 +暗號 1 +暫 1 +暫別 1 +暫無 1 +暮光 1 +暱稱 1 +暴亂 1 +暴斂 1 +暴死 1 +暴風雪 1 +暹羅 1 +曄之 1 +曉彬 1 +曉得 1 +曉聲 1 +曉舟 1 +曖昧 1 +曬相 1 +曬衣 1 +曲張 1 +曲率 1 +曲目 1 +曲線 1 +曲藝 1 +曲阜 1 +曲頜形翼龍 1 +更低 1 +更佳 1 +更大 1 +更審 1 +更小 1 +更強 1 +更快 1 +更新世 1 +更是 1 +更硬 1 +更衣 1 +更輕 1 +更長 1 +曷懶甸 1 +書本 1 +書裡 1 +書迷 1 +書面 1 +書香世家 1 +曹家 1 +曹甸 1 +曹記 1 +曼切華 1 +曼哈頓 1 +曼城 1 +曼寧 1 +曼徹斯特 1 +曼成 1 +曼斯菲爾德 1 +曼海姆 1 +曼涅托 1 +曼玉 1 +曼科 1 +曾任 1 +曾孫 1 +曾愛 1 +曾祖父母 1 +替人 1 +最內 1 +最前 1 +最受 1 +最外 1 +最強 1 +最旺 1 +最最 1 +最末 1 +最東 1 +最純 1 +最遠 1 +會上 1 +會址 1 +會師 1 +會戰 1 +會所 1 +會晤 1 +會章 1 +會見 1 +會計 1 +月色 1 +月薪 1 +有份 1 +有別 1 +有力 1 +有名 1 +有愛 1 +有方 1 +有期 1 +有染 1 +有條不紊 1 +有異 1 +有病 1 +有稱 1 +有花 1 +有點 1 +服刑 1 +朔 1 +朗豪 1 +朗頓 1 +望族 1 +朝下 1 +朝元 1 +朝政 1 +朝散 1 +朝東 1 +朝聖 1 +朝覲 1 +朝貢 1 +朝陽 1 +期刊 1 +木中 1 +木乃伊 1 +木刻 1 +木卡姆 1 +木城 1 +木尼 1 +木屋 1 +木工 1 +木戶 1 +木斯塘 1 +木村 1 +木櫾 1 +木蘭 1 +木造 1 +未入 1 +未敢 1 +未有 1 +未深 1 +未滿 1 +末端 1 +本劇 1 +本名 1 +本城 1 +本始 1 +本季 1 +本島 1 +本市 1 +本德 1 +本書 1 +本營 1 +本目 1 +本省 1 +本社 1 +本縣 1 +本能 1 +本著 1 +本郡 1 +本部 1 +本鄉 1 +本集 1 +本領 1 +札幌 1 +朱里 1 +朴次茅斯 1 +杉並 1 +李察 1 +杏子 1 +材 1 +材官 1 +材質 1 +村旁 1 +杖責 1 +杜乃爾 1 +杜伊 1 +杜利華 1 +杜成 1 +杜浦 1 +杜甫 1 +杜蘭戈維多利亞 1 +杜隆坦 1 +束 1 +杯賽 1 +杰仔 1 +東主 1 +東加 1 +東勝 1 +東南亞 1 +東坡 1 +東姑 1 +東宮 1 +東尼 1 +東岸 1 +東巡 1 +東急 1 +東支 1 +東昇 1 +東映 1 +東桑 1 +東條 1 +東武 1 +東涌 1 +東渡 1 +東直 1 +東站 1 +東興 1 +東華 1 +東西向 1 +東距 1 +東道 1 +東邊 1 +東郊 1 +東鄉 1 +東鐵 1 +東隧 1 +東風 1 +松下 1 +松坂 1 +松山 1 +松島 1 +松州 1 +松翔 1 +松花 1 +松鼠 1 +板 1 +板式 1 +林克 1 +林地 1 +林場 1 +林業 1 +林檎 1 +林翼 1 +林胡 1 +果然 1 +果真 1 +果酒 1 +枝葉 1 +架次 1 +枸杞 1 +柏 1 +柏加 1 +柏村 1 +柏松 1 +柏臣 1 +染手 1 +染病 1 +柔道 1 +柚木 1 +柝聲 1 +查找 1 +查普曼 1 +查氏 1 +查爾頓 1 +查理曼 1 +柬 1 +柬埔寨 1 +柯克伍德 1 +柯林斯 1 +柯爾 1 +柯爾克孜 1 +柯爾貝爾 1 +柱銘 1 +柳川 1 +柳州 1 +柳德米拉 1 +柳葉魚 1 +柴電 1 +柿本 1 +栗橋 1 +校呔 1 +校簿 1 +校門 1 +栩栩如生 1 +株 1 +株式 1 +核孔 1 +核實 1 +核工 1 +核彈 1 +核發 1 +核研 1 +核算 1 +根 1 +根培烏孜 1 +根深柢固 1 +根生 1 +根莖 1 +根部 1 +格丁尼亞 1 +格仔 1 +格但斯克 1 +格來 1 +格勞庇烏 1 +格勞賓登 1 +格奧爾格 1 +格子 1 +格式塔 1 +格拉博夫斯基 1 +格拉漢姆 1 +格林威治 1 +格林布希 1 +格羅先 1 +格羅夫納 1 +格羅希 1 +格蘭特 1 +格陵蘭 1 +格魯 1 +格魯瓊茲與姆瓦瓦 1 +桂陵 1 +桃子 1 +框架 1 +框線 1 +案例 1 +案達羅 1 +桐生 1 +桑德威斯狸藻 1 +桑托斯 1 +桓子 1 +桓玄 1 +梁贊諾夫 1 +梁龍 1 +梅園 1 +梅塔 1 +梅塔拉 1 +梅帕器 1 +梅里納 1 +梓里 1 +條款 1 +條紋 1 +梧州 1 +梨花 1 +梭羅 1 +梯隊 1 +梳 1 +梳頜翼龍 1 +梵安 1 +棉條 1 +棋局 1 +棋盤 1 +棋聖 1 +棋院 1 +棋類 1 +棒 1 +棒錘樹 1 +棕色 1 +棕褐 1 +森德靈 1 +棲地 1 +棲身 1 +棵 1 +植株 1 +椎名 1 +椰林 1 +楓樹 1 +楚克 1 +楚瑜 1 +楚紅 1 +楠桂 1 +楠溪 1 +業主 1 +業餘 1 +極北 1 +極區 1 +極少 1 +極為 1 +極矮 1 +極長 1 +極闊 1 +極限 1 +楷書 1 +楷模 1 +概要 1 +榆林 1 +榔頭 1 +榕樹 1 +榜羅 1 +榨出 1 +榫眼 1 +榮廷 1 +榮洲 1 +榮茂 1 +榴彈 1 +構思 1 +構造 1 +槍尖 1 +槍尾 1 +槍殺 1 +槍術 1 +槳 1 +樂園 1 +樂安 1 +樂官 1 +樂山 1 +樂師 1 +樂手 1 +樂敏錠 1 +樂樂 1 +樂活 1 +樂翠 1 +樂觀 1 +樂趣 1 +樓宇 1 +樓層 1 +樓底 1 +樓煩 1 +樓盤 1 +樓面 1 +樓高 1 +標 1 +標售 1 +標志 1 +標明 1 +標有 1 +標示 1 +標籤 1 +標記 1 +標註 1 +標高 1 +樞密 1 +模里西斯 1 +樣 1 +樣品 1 +樣式 1 +樣貌 1 +樸實 1 +樹上 1 +樹幹 1 +樹枝 1 +橈腳 1 +橋上 1 +橋樑 1 +橋面 1 +機上 1 +機位 1 +機型 1 +機密 1 +機師 1 +機床 1 +機敏 1 +機械 1 +機理 1 +機種 1 +機能 1 +機製 1 +機遇 1 +橡樹 1 +橡樹龍 1 +橢 1 +橫 1 +橫帶 1 +橫徵 1 +橫渡 1 +橫線 1 +檔案 1 +檔次 1 +檜山 1 +檢驗 1 +檨仔林 1 +檳榔 1 +檸七 1 +櫃 1 +櫃檯 1 +櫟社 1 +欄目 1 +權氏 1 +權限 1 +次席 1 +次月 1 +次生 1 +次程 1 +欣快 1 +欺 1 +欽 1 +款式 1 +歆 1 +歌人 1 +歌壇 1 +歌星 1 +歌舞 1 +歌詞 1 +歌頌 1 +歐律狄刻 1 +歐斯巴特 1 +歐盟 1 +歐羅巴 1 +歐青 1 +歐麥爾 1 +歡 1 +歡慶 1 +歡樂 1 +正值 1 +正傳 1 +正夫 1 +正子 1 +正宇 1 +正巧 1 +正平 1 +正比 1 +正派 1 +正版 1 +正當 1 +正經 1 +正負粒子 1 +正配 1 +正陽 1 +此事 1 +此地 1 +此夢 1 +此書 1 +此樓 1 +此橋 1 +此片 1 +此處 1 +此語 1 +此起彼落 1 +此路 1 +此陵 1 +此項 1 +此魚 1 +步伐 1 +步蟾 1 +步行 1 +步驟 1 +武克希 1 +武力 1 +武威 1 +武帝 1 +武廟 1 +武廠 1 +武德 1 +武打 1 +武王 1 +武略 1 +武皇 1 +武者 1 +武藏 1 +歩 1 +歲月 1 +歷代 1 +歷來 1 +歷屬 1 +歷程 1 +歸來 1 +歸入 1 +歸到 1 +歸功 1 +歸咎 1 +歸案 1 +歸還 1 +歸附 1 +死刑 1 +死因 1 +死地 1 +死戰 1 +死期 1 +死板 1 +死狀 1 +死而復生 1 +死黨 1 +殉教 1 +殉爆 1 +殉職 1 +殊榮 1 +殘疾 1 +殘破 1 +殘遺 1 +殘部 1 +殲滅 1 +殺人 1 +殺手 1 +殺機 1 +殼層 1 +殼體 1 +殿堂 1 +毀壞 1 +毀容 1 +毅 1 +毅仁 1 +毅然 1 +母會 1 +母校 1 +母狼 1 +母猴 1 +母艦 1 +母語 1 +母貓 1 +毎年 1 +每元 1 +每座 1 +每戶 1 +每所 1 +每枚 1 +每每 1 +每股 1 +每邊 1 +每集 1 +每鼎 1 +毒​​物 1 +毒品 1 +毒死 1 +毒癮 1 +毒舌 1 +毓林 1 +毓楓 1 +毓芳 1 +比亞迪 1 +比亞韋斯托克 1 +比利 1 +比利牛斯 1 +比哈爾 1 +比喻 1 +比得哥什 1 +比方 1 +比武 1 +比薩 1 +比袍 1 +比褂 1 +毛色 1 +毛髮 1 +毫安 1 +毫無 1 +毯子 1 +氈幕 1 +民事 1 +民俗 1 +民力 1 +民居 1 +民工 1 +民心 1 +民意 1 +民房 1 +民柬 1 +民權 1 +民法 1 +民盟 1 +民答那峨 1 +民航 1 +民英 1 +民謠 1 +民豐 1 +民選 1 +民鐸 1 +民防 1 +氘 1 +氚 1 +氣息 1 +氣態 1 +氣憤 1 +氣旋 1 +氣槍 1 +氣死 1 +氣溫 1 +氣燄 1 +氣胸 1 +氣象 1 +氦 1 +氧化鐵 1 +氨基酸 1 +氫 1 +氫化氦 1 +氫氣 1 +氫鍵 1 +氮 1 +氮素 1 +氯化 1 +氯化氫 1 +氯化銠 1 +氯化鋁 1 +氯雷他定 1 +水世 1 +水份 1 +水圈 1 +水壓 1 +水床 1 +水扁 1 +水攻 1 +水晶 1 +水汽 1 +水流 1 +水火不容 1 +水球 1 +水產 1 +水療 1 +水翼 1 +水能 1 +水警 1 +水面 1 +水鳥 1 +永久 1 +永元 1 +永升 1 +永吉 1 +永和 1 +永壽 1 +永平 1 +永成 1 +永昌 1 +永樂 1 +永樂環 1 +永權 1 +永續 1 +永輝 1 +永靖 1 +汁液 1 +求 1 +求偶 1 +求出 1 +求助 1 +求問 1 +求婚 1 +求情 1 +求援 1 +求籤 1 +求醫 1 +汝寧 1 +汞柱 1 +江協 1 +江口 1 +江浙 1 +江海 1 +江源 1 +江漢 1 +江灣 1 +江谷 1 +江都 1 +江閣 1 +江魚 1 +池塘 1 +池田 1 +污損 1 +污點 1 +汪 1 +汪達 1 +汪達爾 1 +汲及 1 +決意 1 +決擇 1 +決然 1 +決裂 1 +汽油 1 +汽船 1 +沃奎茲 1 +沃季采 1 +沃州 1 +沃思 1 +沃斯托克 1 +沃爾 1 +沃羅涅日 1 +沈氏 1 +沉水 1 +沉迷 1 +沉重 1 +沉降 1 +沒能 1 +沒落 1 +沒錯 1 +沖之 1 +沖片 1 +沖走 1 +沙丘 1 +沙依 1 +沙崙 1 +沙巴 1 +沙普爾 1 +沙梁伐 1 +沙池 1 +沙洛蒙 1 +沙漠 1 +沙瓦納 1 +沙田 1 +沙畹 1 +沙蠶 1 +沙迦罕 1 +沙邦 1 +沙里亞 1 +河卡 1 +河圖 1 +河岸 1 +河心 1 +河段 1 +河漫 1 +河西 1 +油煙 1 +油田 1 +油菜 1 +油量 1 +油電 1 +治中 1 +治勲 1 +治勳 1 +治喪 1 +治國 1 +治學 1 +治水 1 +治理 1 +治軍 1 +沼 1 +沽渚 1 +沾解 1 +沿 1 +沿線 1 +沿襲 1 +沿途 1 +泉 1 +法令 1 +法師 1 +法拉利 1 +法拉龍 1 +法政 1 +法斯塔夫 1 +法格拿 1 +法比恩 1 +法海 1 +法登 1 +法羅 1 +法老 1 +法蘭克尼亞 1 +法西斯 1 +法輪 1 +泛濫 1 +泠 1 +波包 1 +波卡特洛 1 +波及 1 +波因 1 +波圖 1 +波城 1 +波塞冬 1 +波形 1 +波恩 1 +波折 1 +波普 1 +波森 1 +波爾 1 +波特威瑟 1 +波特蘭 1 +波瓦坦 1 +波的尼亞 1 +波西斯 1 +波錠 1 +波黑 1 +泥土 1 +泥潭 1 +注資 1 +泰 1 +泰共 1 +泰勒 1 +泰北 1 +泰始 1 +泰姬 1 +泰姬瑪哈 1 +泰州 1 +泰曾 1 +泰然 1 +泰琳達 1 +泰米爾納德 1 +泰興 1 +泳屋 1 +泳灘 1 +洋介 1 +洗劫 1 +洗衣 1 +洛佩斯 1 +洛加尼斯 1 +洛城 1 +洛夫喬伊 1 +洛夫森 1 +洛布尼亞 1 +洛恩 1 +洛書 1 +洛珊 1 +洛維爾 1 +洛茲 1 +洛雷托 1 +洞子 1 +洞穴 1 +洞窟 1 +津 1 +津貼 1 +洩慾 1 +洩漏 1 +洪堡 1 +洪家 1 +洪橋 1 +洵 1 +洵美 1 +活出 1 +活化 1 +活埋 1 +活水 1 +活潑 1 +活用 1 +活躍 1 +活靈活現 1 +派對 1 +派往 1 +流 1 +流下 1 +流亡 1 +流入 1 +流出 1 +流嶼 1 +流放 1 +流星 1 +流標 1 +流民 1 +流水 1 +流浪 1 +流產 1 +流程 1 +流言 1 +流逝 1 +流露 1 +浚稽 1 +浦市 1 +浦那 1 +浦鎮 1 +浪 1 +浪漫 1 +浪潮 1 +浪費 1 +浪跡 1 +浮 1 +浮動 1 +浴場 1 +海事 1 +海光 1 +海因茨 1 +海地 1 +海峰 1 +海布隆 1 +海平 1 +海廷 1 +海德克 1 +海怡 1 +海昌 1 +海景 1 +海淀 1 +海港 1 +海濱 1 +海灘 1 +海爾賽 1 +海神 1 +海秀 1 +海老名 1 +海航 1 +海藍 1 +海螺 1 +海豐 1 +海陸 1 +海風 1 +海鷗 1 +浸染 1 +浸泡 1 +涅爾皮奇耶 1 +涇波 1 +涇陽 1 +消極 1 +消耗 1 +消退 1 +消除 1 +涉世 1 +涉嫌 1 +涉足 1 +涪江 1 +涮煮 1 +液 1 +液化 1 +液壓 1 +涵蓋 1 +淄川 1 +淑妃 1 +淑怡 1 +淘寶 1 +淘金 1 +淡 1 +淡定 1 +淡色 1 +淨土 1 +淪 1 +淪落 1 +淪陷 1 +淫蕩 1 +淮南 1 +淮許 1 +深受 1 +深埋 1 +深層 1 +深度 1 +深感 1 +深有 1 +深海 1 +深港 1 +深溪 1 +深紅 1 +深綠 1 +深色 1 +深處 1 +深造 1 +淵源 1 +混 1 +混亂 1 +混凝 1 +混沌 1 +混為一談 1 +混燃 1 +淹浸 1 +淺 1 +淺水 1 +淺綠 1 +添丁 1 +清償 1 +清凈 1 +清單 1 +清帝 1 +清拆 1 +清教 1 +清文 1 +清明 1 +清潔 1 +清理 1 +清道 1 +清遠 1 +清還 1 +清鄉 1 +減低 1 +減刑 1 +減小 1 +減退 1 +渠子 1 +渡 1 +渣打 1 +渤海 1 +測繪 1 +渭州 1 +港交 1 +港區 1 +港府 1 +渴求 1 +游 1 +游標 1 +游說 1 +渾 1 +湄洲 1 +湖上 1 +湖人 1 +湖名 1 +湖畔 1 +湘南 1 +湘西 1 +湘陰 1 +湛恩 1 +湧現 1 +湮滅 1 +湯姆萊利 1 +湯料 1 +源於 1 +源田 1 +準 1 +準基 1 +準將 1 +準確 1 +溝 1 +溝壑 1 +溝齒鼩 1 +溢漏 1 +溪 1 +溪水 1 +溪美 1 +溪鱂 1 +溫哥華 1 +溫坡 1 +溫布萊 1 +溫布頓 1 +溫徹斯特 1 +溫斯頓 1 +溫柔 1 +溫特夸特斯 1 +溫特斯 1 +溶劑 1 +溶氣 1 +滅 1 +滑板 1 +滑稽 1 +滑鼠 1 +滕氏 1 +滙業 1 +滬江 1 +滯洪 1 +滲出 1 +滴下 1 +滾動 1 +滾石 1 +滿意 1 +滿清 1 +滿載 1 +漁村 1 +漁梁 1 +漁船 1 +漂浮 1 +漆器 1 +演 1 +演成 1 +演戲 1 +演技 1 +演繹 1 +演義 1 +演講 1 +漢中 1 +漢娜 1 +漢字 1 +漢桓 1 +漫漶 1 +漫長 1 +漬 1 +漱芳 1 +漲幅 1 +漸變 1 +漸趨 1 +潑 1 +潔瑩 1 +潘丘 1 +潘恩 1 +潛伏 1 +潛力 1 +潛望 1 +潛水 1 +潛游 1 +潟湖 1 +潢川 1 +潭村 1 +潭東 1 +潭陽 1 +潰散 1 +澀谷 1 +澤尻 1 +激勵 1 +激發 1 +激素 1 +激進 1 +濁 1 +濃 1 +濃厚 1 +濃煙 1 +濕地 1 +濟 1 +濟世 1 +濟科 1 +濟邦 1 +濤 1 +濫用 1 +濱海 1 +濾掉 1 +瀏陽 1 +瀕危 1 +瀘溪 1 +瀝泗 1 +瀟洒 1 +火上加薪 1 +火候 1 +火喉 1 +火山 1 +火心 1 +火掌 1 +火炮 1 +火爆 1 +火鍋 1 +灰棕 1 +灰雲 1 +灰黑 1 +災禍 1 +炎熱 1 +炙手可熱 1 +炭疽 1 +炮 1 +炸彈 1 +炸死 1 +炸毀 1 +炸糕 1 +為時 1 +烈格司 1 +烏代 1 +烏來杜鵑 1 +烏孜別克 1 +烏宗哈珊 1 +烏干達 1 +烏德特 1 +烏扎 1 +烏拉圭 1 +烏普薩拉 1 +烏腳 1 +烏魯木齊 1 +烴 1 +烹煮 1 +焊接 1 +焗豆 1 +焚 1 +焚屍 1 +焚燒 1 +焜耀 1 +無俚頭 1 +無危 1 +無厭 1 +無子 1 +無家可歸 1 +無心 1 +無忌 1 +無所不能 1 +無暇 1 +無有 1 +無機 1 +無氧 1 +無水氯化鋁 1 +無派 1 +無產 1 +無疑 1 +無盡 1 +無罪 1 +無能為力 1 +無與倫比 1 +無色 1 +無處 1 +無視 1 +無誤 1 +無過 1 +無量壽 1 +無關緊要 1 +無限 1 +無雙 1 +無頭 1 +無點 1 +無齒龍 1 +焦尼 1 +焦點 1 +煉油 1 +煉金 1 +煙 1 +煙囪 1 +煙槍 1 +煙霧 1 +煜全 1 +煤建 1 +煤氣 1 +煥 1 +煦 1 +照射 1 +煮 1 +煮食 1 +煽動 1 +熄匙 1 +熊族 1 +熊本 1 +熊隊 1 +熏烤 1 +熏陶 1 +熔化 1 +熔岩 1 +熟知 1 +熟釜 1 +熱值 1 +熱刺 1 +熱力 1 +熱心 1 +熱愛 1 +熱羅姆 1 +熱身 1 +熱量 1 +熱電 1 +熱鬧 1 +熾熱 1 +燁 1 +燃氣 1 +燈謎 1 +燒灼 1 +燒荒 1 +燕 1 +燕窩 1 +營口 1 +營團 1 +營地 1 +營寨 1 +營帳 1 +營火 1 +營造 1 +營長 1 +營養 1 +燦爛 1 +燭光 1 +燾 1 +爐 1 +爪部 1 +爬到 1 +爬山 1 +爬梯 1 +爭冠 1 +爭占 1 +爭吵 1 +爭奪 1 +爭寵 1 +爭得 1 +爭界 1 +爭相 1 +爭端 1 +爭競 1 +爭論 1 +爭鬥 1 +父風 1 +爸爸 1 +爺 1 +爺爺 1 +爽文 1 +爾炘 1 +牆 1 +牆上 1 +牆身 1 +牆面 1 +片劑 1 +片尾 1 +片斷 1 +片頭 1 +版主 1 +版畫 1 +牌照 1 +牙籤 1 +牙線 1 +牙薩克 1 +牙醫 1 +牛池 1 +牛潭尾 1 +牛石 1 +牛首 1 +牛鼻栓 1 +牟 1 +牟利 1 +牟合 1 +牠 1 +牡蠣 1 +牧 1 +牧區 1 +牧民 1 +牧羊 1 +牧谷 1 +物件 1 +物產 1 +物象 1 +物鏡 1 +物阜 1 +牲畜 1 +特備 1 +特優 1 +特務 1 +特區 1 +特工 1 +特快 1 +特意 1 +特拉華 1 +特攝 1 +特派 1 +特爾瑪 1 +特瓦史塔 1 +特產 1 +特異 1 +特菲爾 1 +特重 1 +特隆赫姆 1 +特雷格羅恩 1 +牽引 1 +牽牛花 1 +犧牲 1 +犬科 1 +犬種 1 +犬髖 1 +犯人 1 +狂亂 1 +狄 1 +狄拉克 1 +狐 1 +狐庸 1 +狡猾 1 +狸藻 1 +狹小 1 +狼人 1 +狼堡 1 +狼影 1 +狼群 1 +猜忌 1 +猜想 1 +猝死 1 +猴年 1 +猴群 1 +猶大 1 +獅子 1 +獎牌 1 +獎盃 1 +獨一無二 1 +獨具 1 +獨唱 1 +獨孤 1 +獨家 1 +獨有 1 +獨眠 1 +獨行 1 +獨資 1 +獲准 1 +獲判 1 +獲勳 1 +獲召 1 +獲悉 1 +獲授 1 +獲獎 1 +獲益 1 +獲薦 1 +獲選 1 +獲頒 1 +獵物 1 +獸人 1 +獸族 1 +獻 1 +獻上 1 +獻堂 1 +獻策 1 +獻議 1 +玄天 1 +玄宗 1 +玄武 1 +玄策 1 +玄貓 1 +玉柴 1 +玉純 1 +玉魔 1 +玉鳳花 1 +玉麟 1 +王儲 1 +王冠 1 +王墓 1 +王宮 1 +王座 1 +王爾德 1 +王蓮 1 +玩伴 1 +玩弄 1 +玩法 1 +玩笑 1 +玫瑰 1 +玲玲 1 +玷染 1 +珀斯 1 +珍寶 1 +珠 1 +珠璣 1 +珠鋼 1 +班克斯 1 +班卓 1 +班子 1 +班布里奇 1 +班機 1 +班次 1 +班禪 1 +班級 1 +現役 1 +現身 1 +球壇 1 +球差 1 +球星 1 +球根 1 +球狀 1 +球道 1 +球面 1 +琅 1 +理性 1 +理由 1 +琦 1 +琬 1 +琳 1 +琳達 1 +琴弓 1 +琺琅 1 +瑋 1 +瑛 1 +瑜伽 1 +瑞普肯 1 +瑞欽 1 +瑞霖 1 +瑟洛 1 +瑣法 1 +瑪 1 +瑪利 1 +瑪利亞路易莎 1 +瑪利歐 1 +瑪君龍 1 +瑪莉安 1 +瑪莎 1 +瑪麗特 1 +瑾 1 +環保 1 +環帶 1 +環狀 1 +環節 1 +環繞 1 +瓊斯 1 +瓊珊 1 +瓘 1 +瓜里利亞 1 +瓦伊什維爾卡斯 1 +瓦伊杜 1 +瓦卡加 1 +瓦德 1 +瓦拉 1 +瓦薩 1 +瓦解 1 +瓦里奧 1 +甄別 1 +甘草 1 +甚厚 1 +甚嚴 1 +甚多 1 +甚小 1 +甚深 1 +甚篤 1 +甚至是 1 +甜兒 1 +甜度 1 +生主 1 +生出 1 +生動 1 +生天 1 +生子 1 +生平 1 +生性 1 +生效 1 +生機 1 +生殺 1 +生氣 1 +生火 1 +生肖 1 +生財之道 1 +生還 1 +產 1 +產出 1 +產經 1 +甦醒 1 +用人 1 +用來 1 +用光 1 +用兵 1 +用字 1 +用完 1 +用手 1 +用有 1 +用水 1 +用藥 1 +用計 1 +用詞 1 +甬 1 +田園 1 +田地 1 +田心 1 +田納西 1 +田野 1 +田頭 1 +甲山 1 +甲殼 1 +申辦 1 +男人 1 +男士 1 +男嬰 1 +男方 1 +男童 1 +界定 1 +界限 1 +畔 1 +留傳 1 +留哥 1 +留待 1 +留空 1 +留聲 1 +留良 1 +畜牧 1 +畜養 1 +畢打 1 +畢氏 1 +畢蘭德拉 1 +畢馬威 1 +略帶 1 +略有 1 +略為 1 +畫下 1 +畫中 1 +畫分 1 +畫會 1 +畫畫 1 +畫面 1 +異事 1 +異姓 1 +異度 1 +異形 1 +異曲同工 1 +異母 1 +異端 1 +當上 1 +當下 1 +當值 1 +當官 1 +當屆 1 +當政 1 +當晚 1 +當期 1 +當歸 1 +當面 1 +疆域 1 +疏浚 1 +疏遠 1 +疑 1 +疑點 1 +疙瘩 1 +疲勞 1 +疲弱 1 +疼痛 1 +病原 1 +病患 1 +病情 1 +病歷 1 +病死 1 +病重 1 +症候 1 +症狀 1 +痕跡 1 +痙攣 1 +痛心疾首 1 +痢疾 1 +痰 1 +瘦 1 +瘧疾 1 +癌 1 +癖 1 +癥狀 1 +登 1 +登丹 1 +發 1 +發佈 1 +發作 1 +發兵 1 +發呆 1 +發奮 1 +發揚光大 1 +發改委 1 +發放 1 +發洩 1 +發炎 1 +發燒 1 +發牌 1 +發球 1 +發病 1 +發聲 1 +發財 1 +發車 1 +發配 1 +白丁 1 +白井 1 +白公 1 +白利南 1 +白化 1 +白堊 1 +白天 1 +白宮 1 +白砂 1 +白蓮 1 +白蛇 1 +白軍 1 +白金 1 +白銅 1 +白陵 1 +白雲 1 +白面 1 +白頸長尾雉 1 +白鹿 1 +白麗 1 +百事 1 +百代 1 +百億 1 +百兆 1 +百帕斯卡 1 +百廢待舉 1 +百濟 1 +百無聊賴 1 +百老匯 1 +百花齊放 1 +百萬 1 +百貨 1 +百餘 1 +百鳴 1 +的士 1 +的確 1 +的黎波里 1 +皇位 1 +皇冠 1 +皇城 1 +皇太極 1 +皇妃 1 +皇廷 1 +皇權 1 +皇發 1 +皈依 1 +皋 1 +皓 1 +皓若 1 +皮亞韋 1 +皮克爾 1 +皮內羅洛 1 +皮特 1 +皮特凱恩 1 +皮耶特普拉桑克穆斯特魯 1 +皮雅福斯 1 +皰疹 1 +盆地 1 +盈盈 1 +益 1 +益城 1 +益新 1 +益處 1 +盔甲 1 +盛事 1 +盛大 1 +盛妝 1 +盛揮 1 +盛產 1 +盛行 1 +盜用 1 +盟 1 +盟軍 1 +盡到 1 +盡喪 1 +盡情 1 +盡頭 1 +監工 1 +監控 1 +監測 1 +監禁 1 +監聽 1 +盤踞 1 +盧 1 +盧加 1 +盧溝 1 +盧瓦斯 1 +盧甘斯克 1 +盧福瓦 1 +盪 1 +目睹 1 +目鏡 1 +直勉 1 +直屬 1 +直覺 1 +直言 1 +直說 1 +直間 1 +相位 1 +相傳 1 +相容 1 +相差無幾 1 +相悖 1 +相應 1 +相挺 1 +相異 1 +相稱 1 +相約 1 +相繼 1 +相聲 1 +相若 1 +相處 1 +相見 1 +相較 1 +相通 1 +相速 1 +相鄰 1 +相間 1 +盾座苣苔 1 +盾系 1 +省務 1 +省思 1 +省油 1 +眉山 1 +看中 1 +看出 1 +看台 1 +看得 1 +看看 1 +看管 1 +看見 1 +看透 1 +看重 1 +真 1 +真光 1 +真北 1 +真名 1 +真好 1 +真希 1 +真木 1 +真核 1 +真相大白 1 +眯眼 1 +眷村 1 +眼下 1 +眼淚 1 +眼狀 1 +眼球 1 +眼皮 1 +眼神 1 +眾經 1 +眾說紛紜 1 +睡 1 +睡眠 1 +睡覺 1 +督撫 1 +睾丁蛋白 1 +睿 1 +睿智 1 +瞪羚 1 +瞬時 1 +瞭如指掌 1 +矗立 1 +矛 1 +矢口否認 1 +知府 1 +知曉 1 +知足 1 +短少 1 +短期 1 +短草 1 +短裙 1 +短詩 1 +短語 1 +短音 1 +短髮 1 +矮星 1 +石像 1 +石器 1 +石塊 1 +石材 1 +石湖 1 +石灰 1 +石牆 1 +石牌 1 +石頭門坎 1 +砂拉越 1 +砂漿 1 +砂紙 1 +砍伐 1 +砒霜 1 +研磨 1 +砝碼 1 +破損 1 +破滅 1 +破舊 1 +破落 1 +硝庫爾 1 +硝酸甘油片 1 +硫 1 +硫化氫 1 +硫化鉛 1 +硫酸銨 1 +硬幣 1 +碑亭 1 +碑刻 1 +碧波 1 +碧琴 1 +碰撞 1 +碳紙 1 +碳酸鎂 1 +確知 1 +確診 1 +碼 1 +磁性 1 +磐田 1 +磚室 1 +磨坊 1 +磨折 1 +磨槽 1 +磷化 1 +磷素 1 +磷酸 1 +礙 1 +礦場 1 +礦物 1 +礦石 1 +礦藏 1 +示人 1 +示愛 1 +社皮 1 +社論 1 +社長 1 +祁鏞 1 +祈願 1 +祐希 1 +祖 1 +祖上 1 +祖圭 1 +祖外公 1 +祖外婆 1 +祖宗 1 +祖籍 1 +神仙 1 +神偷 1 +神器 1 +神明 1 +神殿 1 +神社 1 +神秘果 1 +神籤 1 +神魔 1 +祠 1 +祥子 1 +票據 1 +票數 1 +祭司 1 +祭壇 1 +祭師 1 +祭物 1 +祭祀 1 +祭酒 1 +祿勸 1 +祿山 1 +禁煙 1 +禁用 1 +禁藥 1 +禁賽 1 +禍 1 +福克沙尼 1 +福安 1 +福康安 1 +福慧 1 +福池 1 +福清 1 +禕 1 +禪師 1 +禮堂 1 +禮濤 1 +禮炮 1 +禮物 1 +禱文 1 +禽流感 1 +秀實 1 +秀康 1 +秀怡 1 +秀珠 1 +私下 1 +私交 1 +私奔 1 +私宅 1 +私家 1 +私立 1 +私財 1 +秉國 1 +秋人 1 +秋山 1 +秋爽 1 +秋興 1 +秋香 1 +科多爾 1 +科屬 1 +科恩 1 +科教 1 +科朗 1 +科爾基斯 1 +科特 1 +科目 1 +秘指 1 +租予 1 +租務 1 +租地 1 +租戶 1 +租用 1 +秦城 1 +秦州 1 +秦晉之好 1 +秦朝 1 +秦石 1 +秩序 1 +移交 1 +移往 1 +移植 1 +移至 1 +移送 1 +稀釋 1 +稅項 1 +稍為 1 +稗官野史 1 +種內 1 +種名 1 +種子 1 +種屬 1 +稱海 1 +稱病 1 +稱銜 1 +稻子 1 +稻草 1 +稼祥 1 +穀 1 +穀物 1 +穆宗 1 +穆拉 1 +穆斯塔法凱馬爾帕沙 1 +穆爾西亞 1 +穆薩 1 +積山 1 +積良 1 +穩 1 +穩固 1 +穩妥 1 +究竟 1 +空出 1 +空前 1 +空名 1 +空客 1 +空戰 1 +空隙 1 +空難 1 +穿幫 1 +穿戴 1 +穿甲 1 +穿行 1 +穿過 1 +突尼西亞 1 +突感 1 +突現 1 +窄袖 1 +窗口 1 +窗外 1 +窘境 1 +窟檐 1 +窮苦 1 +窮追 1 +窯 1 +窯洞 1 +竄紅 1 +竊聽 1 +立交 1 +立國 1 +立村 1 +立營 1 +立花 1 +立蒙 1 +立面 1 +立體 1 +站內 1 +站名 1 +站坪 1 +站廳 1 +站點 1 +竟 1 +章回 1 +章斐 1 +童女 1 +童男 1 +端川 1 +競相 1 +竹 1 +竹器 1 +竹治 1 +竹溪 1 +竹片 1 +笛 1 +符 1 +符桐 1 +第 1 +第999 1 +第三十三 1 +第十七 1 +第十五 1 +第十四 1 +第廿 1 +第比利斯 1 +第谷 1 +笳冬 1 +等位 1 +等客 1 +等號 1 +筐仔沙 1 +筒狀 1 +答應 1 +箏 1 +算出 1 +算術 1 +管制 1 +管子 1 +箬松 1 +箱型 1 +箴言 1 +節度 1 +節節 1 +範疇 1 +篡位 1 +篡國 1 +篡地 1 +簡化 1 +簡約 1 +簡訊 1 +簧 1 +簽名 1 +簽定 1 +簽認 1 +簽證 1 +簽賬 1 +籃筐 1 +籌備 1 +籌措 1 +籌款 1 +籌資 1 +籌辦 1 +籍貫 1 +籠式 1 +米南加保 1 +米古 1 +米哈伊 1 +米拉麥克斯 1 +米沙鄢 1 +米洛塞維奇 1 +米特斯 1 +米線 1 +米酒 1 +米高梅 1 +粉 1 +粉碎 1 +粉紅 1 +粉絲 1 +粗壯 1 +粗鱗蟒 1 +粵明 1 +粽子 1 +精 1 +精力 1 +精子 1 +精密 1 +精心 1 +精湛 1 +精算 1 +精索 1 +精裝 1 +糖尿 1 +糖蒜 1 +糞 1 +糟糕 1 +糧儲 1 +糧餉 1 +系數 1 +糾正 1 +糾紛 1 +紀元 1 +紂 1 +約定 1 +約熱夫 1 +約瑟芬 1 +約翰內斯堡 1 +約翰麥克連 1 +約長 1 +紅旗 1 +紅日 1 +紅杏出牆 1 +紅樓 1 +紅樓夢 1 +紅樹 1 +紅玉 1 +紅磨 1 +紅茶 1 +紅襪 1 +紅遍 1 +紅酒 1 +紅點 1 +紈 1 +紋路 1 +紋飾 1 +納入 1 +納塔爾 1 +納爾西斯 1 +納爾遜 1 +納瓦拉 1 +納蘇爾 1 +紐國 1 +紐澤西 1 +紐約尼克斯 1 +紐芬蘭 1 +紐華克 1 +紐黑文 1 +純一 1 +純凈 1 +純樸 1 +純陽 1 +紙上 1 +紙條 1 +紙盒 1 +級數 1 +素包 1 +素食 1 +素餡 1 +索倫 1 +索尼 1 +索溪峪 1 +索維克 1 +索菲 1 +索菲亞 1 +索西納 1 +索賠 1 +索馬里 1 +紮實 1 +累計 1 +細 1 +細岡 1 +細窄 1 +細菌 1 +細部 1 +細長 1 +紳士 1 +紹 1 +紹儀 1 +紹榮 1 +紺三郎 1 +終審 1 +終身大事 1 +組件 1 +組像 1 +組別 1 +組口 1 +組態 1 +組織胺 1 +組隊 1 +結交 1 +結冰 1 +結尾 1 +結雅 1 +絕壁 1 +絕大 1 +絕後 1 +絕版 1 +絕罰 1 +絞刑 1 +絞死 1 +絞痛 1 +給定 1 +給職 1 +給藥 1 +給體 1 +統 1 +統帥 1 +統籌 1 +絲山 1 +絲帶 1 +絶 1 +綁 1 +綉 1 +綏遠 1 +經國 1 +經意 1 +經文 1 +經昌 1 +經期 1 +經由 1 +經界 1 +綜 1 +綜理 1 +綜錄 1 +綠化 1 +綠帶 1 +綠滙 1 +綠燈 1 +綠社 1 +綠黨 1 +維健 1 +維克托 1 +維利爾斯 1 +維埃拉 1 +維多莉亞 1 +維希 1 +維德 1 +維景灣 1 +維爾紐斯 1 +維生 1 +維祀 1 +維羅納 1 +維記 1 +維護 1 +維迪斯 1 +維迪爾 1 +綱領 1 +網址 1 +網易 1 +網線 1 +網購 1 +綺塍 1 +綺色佳 1 +綽號 1 +綿羊 1 +緊張 1 +緊緊 1 +緊貼 1 +緊逼 1 +緊閉 1 +線上 1 +線前 1 +線度 1 +線條 1 +線索 1 +線道 1 +締造 1 +編上 1 +編導 1 +編程 1 +編篡 1 +編繪 1 +編纂 1 +編者 1 +編腔 1 +編隊 1 +緩衝 1 +緩解 1 +緩鬢 1 +緩龍 1 +緬 1 +緯來 1 +練兵 1 +緹 1 +縣市 1 +縣裡 1 +縫 1 +縫製 1 +縮寫 1 +縮小 1 +縱 1 +縱使 1 +縱觀 1 +縱隊 1 +總區 1 +總和 1 +總局 1 +總站 1 +總行 1 +總裁 1 +總計 1 +總辦 1 +績效 1 +繁多 1 +繁瑣 1 +繁盛 1 +繁雜 1 +繁體 1 +繞境 1 +繞開 1 +繡 1 +繩架 1 +繭 1 +繳付 1 +繳納 1 +繼業 1 +繼科 1 +續航 1 +續部 1 +纏足 1 +纜車 1 +缺口 1 +缺失 1 +缺少 1 +缺氧 1 +缺血 1 +罕有 1 +罪惡 1 +置有 1 +置物 1 +罰則 1 +署理 1 +罵聲 1 +罷免 1 +罷工 1 +罹癌 1 +罹難 1 +羅乞多毗闍 1 +羅什艾因 1 +羅伊 1 +羅克斯堡 1 +羅培茲 1 +羅夫 1 +羅希 1 +羅德西亞 1 +羅拔 1 +羅曼什 1 +羅柔 1 +羅森費爾德 1 +羅爾夫 1 +羅隆基 1 +羊圈 1 +美味 1 +美孚 1 +美寶 1 +美幸 1 +美林豬籠草 1 +美琴 1 +美知留 1 +美稱 1 +美索不達米亞 1 +美聯 1 +美聲 1 +美薇 1 +美術 1 +美觀 1 +美譽 1 +美里 1 +美食 1 +美麗華 1 +羚羊 1 +羞恥 1 +群峰 1 +群族 1 +群組 1 +群落 1 +群速 1 +群雄 1 +群體 1 +羨慕 1 +義久 1 +義勇 1 +義安 1 +義工 1 +義弘 1 +義春 1 +義民 1 +義父 1 +義項 1 +羱羊 1 +羲 1 +羽田 1 +羽絨 1 +翌日 1 +習經 1 +翔 1 +翔麟 1 +翟 1 +翠鳥 1 +翻覆 1 +翼手龍 1 +翼龍 1 +耀樞 1 +耀武 1 +耀邦 1 +老人 1 +老大 1 +老套 1 +老婦 1 +老將 1 +老少 1 +老弱 1 +老橋 1 +老漢 1 +考上 1 +考夫卡 1 +考尼律斯 1 +考柯 1 +考牙 1 +考生 1 +考究 1 +考績 1 +考進 1 +考選 1 +而已 1 +耐受 1 +耐庵 1 +耐玩 1 +耐航 1 +耳光 1 +耳勺 1 +耳孔 1 +耳朵眼 1 +耳珠 1 +耳環 1 +耳癤 1 +耳蝸 1 +耳門 1 +耳骨 1 +耶索洛 1 +耶路撒冷 1 +耽擱 1 +聆聽 1 +聖人 1 +聖保羅 1 +聖克萊爾 1 +聖名 1 +聖地亞哥 1 +聖彌格 1 +聖彼得堡 1 +聖徒 1 +聖拉扎爾 1 +聖歌 1 +聖水 1 +聖求 1 +聖潔 1 +聖祖 1 +聖神 1 +聖經 1 +聖訓 1 +聖赫勒拿 1 +聖赫勒拿島戴勝 1 +聖路易斯 1 +聖體 1 +聘問 1 +聘用 1 +聚氯乙烯 1 +聚禮 1 +聚苯乙烯 1 +聚變 1 +聚體 1 +聞名 1 +聞言 1 +聯姻 1 +聯播 1 +聯江 1 +聯浦 1 +聯產 1 +聯美 1 +聰敏 1 +聲恆 1 +聲援 1 +聲波 1 +聲谷 1 +聲門 1 +聲音 1 +聶丞益 1 +職員 1 +職棒 1 +聽到 1 +聽命 1 +聽從 1 +聽眾 1 +聽聞 1 +聾人 1 +肅宗 1 +肆 1 +肆意 1 +肇 1 +肉夾 1 +肉湯 1 +肉瘤 1 +肉緊 1 +肌肉 1 +肖嚴 1 +肚臍 1 +肚餓 1 +肝 1 +股市 1 +股本 1 +肥牛 1 +肥田 1 +肥胖 1 +肩 1 +肯 1 +肯亞 1 +肯特 1 +育有 1 +育樂 1 +育空 1 +肺病 1 +胃 1 +胃石 1 +背上 1 +背依 1 +背包 1 +背叛 1 +背後 1 +背靠 1 +背面 1 +背鰭 1 +胎 1 +胚 1 +胚胎 1 +胞 1 +胞弟 1 +胡特勒 1 +胡禮 1 +胡蜂 1 +胡馬雍 1 +胸痛 1 +胸管 1 +胸部 1 +胸鰭 1 +能人 1 +能否 1 +能幹 1 +脆 1 +脊椎 1 +脫疽 1 +脫落 1 +脫隊 1 +脫離 1 +脱口秀 1 +脾氣 1 +腐敗 1 +腐蝕 1 +腓力 1 +腔 1 +腫瘤 1 +腳掌 1 +腳本 1 +腳點 1 +腸胃 1 +腸道 1 +腸骨 1 +腹 1 +腿 1 +腿部 1 +膝傷 1 +膝頭 1 +膠 1 +膠州 1 +膠東 1 +膠澳 1 +膠體 1 +膨脹 1 +膽 1 +膽酸 1 +臉 1 +臉頰 1 +臉龐 1 +臘 1 +臥龍 1 +臧 1 +臨 1 +臨榆 1 +臨終 1 +臨高 1 +自作自受 1 +自保 1 +自信 1 +自卑 1 +自在 1 +自學 1 +自帶 1 +自強 1 +自從 1 +自成 1 +自用 1 +自發 1 +自製 1 +自訂 1 +自負 1 +自辦 1 +至上 1 +至善 1 +至柔 1 +至正 1 +至死不渝 1 +至關 1 +至關重要 1 +致使 1 +致函 1 +致恐 1 +致病 1 +致瘋 1 +致癌 1 +臺大 1 +舀出 1 +舅父 1 +興 1 +興國 1 +興學 1 +興業 1 +興海 1 +興祖 1 +舉世矚目 1 +舉例 1 +舉國 1 +舉止 1 +舉薦 1 +舉起 1 +舊友 1 +舊屋 1 +舊時 1 +舊稱 1 +舊部 1 +舊金山 1 +舌頭 1 +舍爾 1 +舍訥費爾德 1 +舒 1 +舒查特 1 +舒爾特 1 +舜初 1 +舞 1 +舞劇 1 +舞陽 1 +舟 1 +航天 1 +航站 1 +般若 1 +船塢 1 +船山 1 +船業 1 +船體 1 +艦身 1 +良 1 +良師益友 1 +良心 1 +良性 1 +良田 1 +良知 1 +艱巨 1 +色帶 1 +色情 1 +色目 1 +色調 1 +艷姬 1 +艷麗 1 +艾伍士 1 +艾倫 1 +艾塞羅 1 +艾夏 1 +艾崔奇 1 +艾巴德 1 +艾度蘭 1 +艾琳 1 +艾瑞 1 +艾瑪 1 +艾登堡 1 +艾美 1 +艾蓮娜 1 +艾薩克 1 +艾迴 1 +艾雲 1 +艾麗卡 1 +芬妮 1 +芬華絲 1 +芬迪絲 1 +芭蕉 1 +芭黎絲 1 +花上 1 +花俏 1 +花園蔥蝸牛 1 +花坮 1 +花城 1 +花店 1 +花旗 1 +花月 1 +花果 1 +花枝 1 +花瓶 1 +花甲 1 +花蜜 1 +花鞋 1 +苗栗 1 +苗穗 1 +苟且 1 +若愚 1 +若羌 1 +若英 1 +苦 1 +苦力 1 +苦悶 1 +苦情 1 +苦苣苔 1 +苦讀 1 +苯並芘 1 +苯乙烯 1 +英一 1 +英乙 1 +英倫 1 +英傑 1 +英勇 1 +英吋 1 +英國短毛豬 1 +英寸 1 +英尺 1 +英年 1 +英廷 1 +英格瑪 1 +英男 1 +英里 1 +英龍華 1 +茂 1 +茂名 1 +范恩 1 +茄南 1 +茄芮 1 +茅家 1 +茲羅提 1 +茶樓 1 +茶湯 1 +茶館 1 +荃灣 1 +荃麟 1 +草原 1 +草地 1 +草坪 1 +草席 1 +草稿 1 +荊州 1 +荒地 1 +荒蕪 1 +荒誕不經 1 +荔灣 1 +荷爾蒙 1 +荷銀 1 +莆 1 +莊嚴 1 +莊王 1 +莎樂美 1 +莫 1 +莫吉爾諾 1 +莫埃索 1 +莫扎特 1 +莫札特 1 +莫桑 1 +莫瑙恩 1 +莫瓦桑 1 +莫納加斯 1 +莫臥兒 1 +莫過 1 +莫里亞 1 +莽山 1 +菅 1 +菊 1 +菊花 1 +菜 1 +華倫西亞 1 +華少 1 +華新 1 +華族 1 +華林 1 +華爾 1 +華界 1 +華石 1 +華秀 1 +華納 1 +華西 1 +華頓 1 +菲力 1 +菲國 1 +菲德爾 1 +菲爾 1 +菲萊 1 +菲詩 1 +菸害 1 +萊因 1 +萊夫斯 1 +萊希 1 +萊斯特 1 +萊爾 1 +萊特曼 1 +萊茵蘭 1 +萊蕪 1 +萊采巴 1 +萌 1 +萌芽 1 +萎縮 1 +萬一 1 +萬丹 1 +萬貴 1 +落 1 +落下 1 +落實 1 +落敗 1 +落葉 1 +葆玖 1 +葉利欽 1 +葉士域治 1 +葉序 1 +葉綠 1 +著手 1 +著有 1 +著譯 1 +葛 1 +葛力馬 1 +葛朱 1 +葛浩文 1 +葛羅斯 1 +葛蕾絲 1 +葛量洪 1 +葡 1 +葡超 1 +葫蘆 1 +葬禮 1 +葵青 1 +蒂利妮 1 +蒂娜 1 +蒂迦納 1 +蒙丹 1 +蒙卡達 1 +蒙哥 1 +蒙哥馬利 1 +蒙塔尼萊博恩 1 +蒙巴薩 1 +蒙得維 1 +蒙特利爾 1 +蒙羞 1 +蒙面 1 +蒙馬特 1 +蒲 1 +蒲飛 1 +蒸氣 1 +蒸發 1 +蒼白 1 +蓄水 1 +蓋兒 1 +蓋因 1 +蓋多 1 +蓋曼 1 +蓋朗杜克西亞 1 +蓋頂 1 +蓓 1 +蓓天翼龍 1 +蓬塔德馬塔 1 +蓬拉貝 1 +蓬皮杜 1 +蓮 1 +蓮安 1 +蓮花 1 +蔑稱 1 +蔡斯 1 +蔣公 1 +蕙嫻 1 +蕨類 1 +蕩漾 1 +蕾妮 1 +薄 1 +薄弱 1 +薄扶林 1 +薔 1 +薛慶 1 +薦 1 +薩克森 1 +薩凡娜 1 +薩卡拉瓦 1 +薩哈 1 +薩哈林 1 +薩平頓 1 +薩德 1 +薩拉只 1 +薩摩亞 1 +薩爾曼 1 +薩爾瓦多 1 +薩爾茨卡默古特 1 +薩爾馬提亞 1 +薩瑞阿尼迪 1 +薩維塔 1 +薩維奧洛夫 1 +薩馬 1 +薪俸 1 +藉助 1 +藉此 1 +藍儂 1 +藍寶石華麗雨林 1 +藍尼 1 +藍本 1 +藍欽 1 +藍潟 1 +藍灰 1 +藍田 1 +藍白 1 +藍背 1 +藍邊 1 +藍領 1 +藍黨 1 +藏之介 1 +藏寶 1 +藏有 1 +藝 1 +藝名 1 +藝能 1 +藝謀 1 +藝電 1 +藤原 1 +藤木 1 +藤本 1 +藤村 1 +藤枝 1 +藤藝 1 +藥品 1 +藥師 1 +藥材 1 +藥水 1 +藥石 1 +藩主 1 +藩士 1 +藩西 1 +蘇利文 1 +蘇北 1 +蘇尋三 1 +蘇木 1 +蘇格拉底 1 +蘇維匯 1 +蘇美爾 1 +蘇萊曼尼亞 1 +蘇醒 1 +蘇里南 1 +蘊藏 1 +蘭利 1 +蘭卡斯特 1 +蘭封 1 +蘭弗朗克 1 +蘭德 1 +虎式 1 +虎棒 1 +虎翼 1 +虎視眈眈 1 +虔信 1 +處之泰然 1 +處女 1 +處決 1 +處置 1 +處長 1 +虛弱 1 +虛榮 1 +虛無 1 +號吾 1 +號子 1 +號稱 1 +號誌 1 +虢 1 +虢國 1 +虹 1 +虹橋 1 +蚊類 1 +蚩尤 1 +蛇油 1 +蛇種 1 +蛇魔 1 +蛋 1 +蛋白質 1 +蛙 1 +蜂擁而至 1 +蜂蜜 1 +蜆殼 1 +蜚聲 1 +蜥蜴 1 +蜿蜒 1 +蝴蝶 1 +融入 1 +融化 1 +融和 1 +融雪 1 +螞蟻 1 +螢幕 1 +蟬聯 1 +蟲 1 +蟲洞 1 +蠟浸 1 +蠶院 1 +蠻子 1 +血型 1 +血液 1 +血竭 1 +血管 1 +血腥 1 +行人 1 +行使 1 +行列 1 +行將 1 +行用 1 +行禮 1 +行長 1 +行騙 1 +術 1 +街上 1 +街名 1 +街市 1 +街路 1 +街頭 1 +衛理 1 +衝動 1 +衝鋒 1 +衡 1 +衡量 1 +衢山 1 +衣 1 +衣冠 1 +衣物 1 +衣索比亞 1 +表型 1 +表妹 1 +表姐 1 +表徵 1 +表情 1 +表態 1 +表揚 1 +表格 1 +表決 1 +表白 1 +表述 1 +衰敗 1 +衰落 1 +袖手旁觀 1 +袖箭 1 +被告 1 +被子 1 +裁決 1 +裁減 1 +裂縫 1 +裂變 1 +裋褐 1 +裕 1 +裕智 1 +裕軍 1 +裙子 1 +補償 1 +補天 1 +補教 1 +補時 1 +補褂 1 +裝修 1 +裝備 1 +裝嵌 1 +裝有 1 +裝瓶 1 +裝葯 1 +裝設 1 +裝載 1 +裴 1 +裴林 1 +裸子 1 +裸照 1 +製備 1 +製得 1 +複數 1 +褐色 1 +褪色 1 +褲 1 +褲子 1 +褲袋 1 +襄 1 +襄助 1 +襄王 1 +襄陽 1 +襲 1 +襲封 1 +西亞特 1 +西京 1 +西周 1 +西哈莫尼 1 +西坑 1 +西域 1 +西夏 1 +西奧多 1 +西宮 1 +西岸 1 +西島 1 +西廠 1 +西式 1 +西弗萊德 1 +西斯廷 1 +西晉 1 +西段 1 +西河 1 +西洋坪 1 +西漢 1 +西甌 1 +西線 1 +西美 1 +西蒙 1 +西薩 1 +西蘭卡普 1 +西西里 1 +西距 1 +西迪 1 +西鄉 1 +要是 1 +要脅 1 +要衝 1 +要道 1 +見人 1 +見稱 1 +見聞 1 +見解 1 +見識 1 +見長 1 +規例 1 +覓食 1 +視乎 1 +視作 1 +視圖 1 +視角 1 +親人 1 +親信 1 +親政 1 +親朋 1 +親筆 1 +親臨 1 +親身 1 +覺察 1 +覽 1 +觀光 1 +觀察 1 +觀念 1 +觀戰 1 +觀望 1 +觀看 1 +觀者 1 +角膜 1 +解僱 1 +解夢 1 +解析 1 +解答 1 +解職 1 +解脫 1 +解說 1 +觸怒 1 +觸手可及 1 +觸覺 1 +觸診 1 +言官 1 +言語 1 +言辭 1 +訂位 1 +訃告 1 +訄書 1 +訇開 1 +計委 1 +計謀 1 +討逆 1 +訓 1 +託 1 +記念 1 +記述 1 +記集 1 +設站 1 +許昌 1 +許諾 1 +許願 1 +訴 1 +訴求 1 +訴諸 1 +註 1 +註明 1 +註銷 1 +詐死 1 +詔書 1 +評出 1 +評判 1 +評鑑 1 +詛咒 1 +詞幹 1 +詞義 1 +詢問 1 +試劑 1 +試播 1 +試種 1 +試製 1 +試音 1 +試飛 1 +詩文 1 +該事 1 +該人 1 +該墓 1 +該島 1 +該年 1 +該批 1 +該族 1 +該會 1 +該條 1 +該段 1 +該科 1 +該系 1 +該處 1 +該路 1 +該黨 1 +詳情 1 +詳細 1 +詹姆士 1 +詼諧 1 +誇德拉多 1 +誇祖魯 1 +誌 1 +誌家 1 +認一民 1 +認同 1 +認定 1 +認罪 1 +認證 1 +認輔 1 +誓言 1 +誕 1 +誕下 1 +誘因 1 +語文 1 +語法 1 +語流 1 +語訓 1 +語調 1 +語速 1 +語音 1 +誠意 1 +誤 1 +誤信 1 +誤差 1 +誤會 1 +誤槍 1 +誤譯 1 +誥命 1 +誦 1 +說出 1 +說客 1 +說成 1 +說話 1 +說謊 1 +說道 1 +課本 1 +誹謗 1 +調值 1 +調停 1 +調入 1 +調和 1 +調控 1 +調水 1 +調沙 1 +調研 1 +調節 1 +調職 1 +調解 1 +諂媚 1 +談判 1 +談妥 1 +談論 1 +請來 1 +請辭 1 +請願 1 +論事 1 +諜海 1 +諧波 1 +諶 1 +諸 1 +諸如 1 +諸暨 1 +諸河 1 +諺言 1 +諾丁漢 1 +諾域治 1 +諾斯 1 +諾曼 1 +諾爾曼 1 +謀取 1 +謀士 1 +謀求 1 +謀職 1 +謁者 1 +謇 1 +謊言 1 +謙卑 1 +謚 1 +講完 1 +講究 1 +講談 1 +講道 1 +謝世 1 +謝列梅捷沃 1 +謝爾比 1 +謝瓦爾德納澤 1 +謝蓋爾 1 +謹 1 +謹慎 1 +證 1 +譚 1 +譜代 1 +警務 1 +警句 1 +警告 1 +警員 1 +警戒 1 +警衛 1 +警覺 1 +警鐘 1 +譯作 1 +譯員 1 +譯場 1 +譯本 1 +議席 1 +譴責 1 +護佑 1 +護城 1 +護墊 1 +護送 1 +讀取 1 +讀法 1 +變動 1 +變差 1 +變調 1 +變身 1 +變遷 1 +變革 1 +讓步 1 +讓開 1 +讚喻 1 +讚揚 1 +讚美 1 +讚譽 1 +谷山 1 +谷氨酸 1 +豆瓣 1 +豈 1 +豎立 1 +豎起 1 +豐久 1 +豐厚 1 +豐城 1 +豐臣 1 +豐隆 1 +象數 1 +象晉 1 +象牙 1 +象牙喙啄木鳥 1 +豢養 1 +豪宅 1 +豪門 1 +豫南 1 +豬 1 +豬圈 1 +豬油 1 +豬肉 1 +貂 1 +貓咪 1 +貓囒 1 +貓科 1 +貝克 1 +貝克漢 1 +貝加爾 1 +貝南 1 +貝斯 1 +貝爾普 1 +貝爾蘇斯 1 +貝碧嘉 1 +貝納斯科 1 +貝都因 1 +貝類 1 +貞昌 1 +貞潔 1 +貞觀 1 +負擔 1 +負芻 1 +負荷 1 +負面 1 +負額 1 +財經 1 +財落 1 +貢 1 +貢品 1 +貢哥拉 1 +貢嘎 1 +貢巴 1 +貧 1 +貧乏 1 +貧窮 1 +貧鈾 1 +貨 1 +貨品 1 +貨機 1 +販賣 1 +貪圖 1 +貪婪 1 +貪心 1 +貪瀆 1 +貫徹 1 +貫穿 1 +貫通 1 +責怪 1 +責難 1 +貴築 1 +貴賓 1 +貴陽 1 +貴霜 1 +貶意 1 +買入 1 +買賣 1 +費曼 1 +費爾南多 1 +費用 1 +費盡 1 +費羅 1 +貼身 1 +賀特 1 +賀立 1 +賄選 1 +資 1 +資政 1 +資陽 1 +賈亞辛哈 1 +賈多特 1 +賈斯丁 1 +賈斯珀 1 +賈氏 1 +賓客 1 +賓尼迪斯 1 +賓州 1 +賞識 1 +賠禮 1 +賡臣 1 +賢思 1 +賣 1 +賣出 1 +賣到 1 +賣地 1 +賣家 1 +賣掉 1 +賣空 1 +賤女 1 +賤民 1 +質詢 1 +賭徒 1 +賭檔 1 +賴宣 1 +賺取 1 +賺錢 1 +購得 1 +購置 1 +賽場 1 +賽普勒斯 1 +賽爾金德 1 +賽車 1 +賽道 1 +贈 1 +贈送 1 +贊博尼 1 +贊成 1 +贊比西亞 1 +贏家 1 +贖回 1 +赤坂 1 +赤壁 1 +赤樹 1 +赤狐 1 +赤鱲 1 +赦 1 +赫伯特 1 +赫塔卜 1 +赫斯 1 +赫比格 1 +赫爾克 1 +赫爾辛基 1 +赫雷爾斯 1 +赫魯曉夫 1 +走上 1 +走到 1 +走勢 1 +走漏 1 +走私 1 +起事 1 +起伏 1 +起初 1 +起名 1 +起因 1 +起始 1 +起建 1 +起止 1 +起死回生 1 +起碼 1 +起端 1 +起舞 1 +起落 1 +起訖 1 +起降 1 +起點 1 +趁 1 +超出 1 +超導 1 +超強 1 +超我 1 +超時 1 +超武 1 +超然 1 +超重 1 +超齡 1 +越亮 1 +越共 1 +越前 1 +越好 1 +越弱 1 +越戰 1 +越早 1 +越暗 1 +越牆 1 +越發 1 +越近 1 +越過 1 +趕往 1 +趙氏 1 +趟 1 +趣事 1 +趨勢 1 +趨於 1 +足不出戶 1 +足夠 1 +足見 1 +足跡 1 +趾爪 1 +趾骨 1 +跋扈 1 +跌 1 +跑 1 +跑壘 1 +跑步 1 +跑車 1 +跑馬 1 +跟操 1 +跟班 1 +跟蹤 1 +跟進 1 +跟隨 1 +跨 1 +跨國 1 +跨度 1 +跨步 1 +跨足 1 +跨過 1 +路政 1 +路易斯安那 1 +路濟亞 1 +路綫 1 +路網 1 +路透 1 +路過 1 +路障 1 +路面 1 +跳動 1 +跳槽 1 +跳過 1 +跳遠 1 +跳高 1 +踏上 1 +踏入 1 +踢進 1 +躁 1 +躁動 1 +躍升 1 +身受 1 +身型 1 +身旁 1 +身為 1 +身無分文 1 +身著 1 +身軀 1 +身高 1 +躬耕 1 +躲到 1 +車上 1 +車仁 1 +車型 1 +車士打菲特 1 +車外 1 +車尾 1 +車市 1 +車廠 1 +車手 1 +車票 1 +車程 1 +車窗 1 +車系 1 +車號 1 +車費 1 +車路士 1 +車迷 1 +車頭 1 +軋箏 1 +軌跡 1 +軍中 1 +軍備 1 +軍功 1 +軍務 1 +軍委 1 +軍師 1 +軍援 1 +軍方 1 +軍服 1 +軍營 1 +軍艦 1 +軍裝 1 +軍階 1 +軍需 1 +軒轅 1 +軟 1 +軟化 1 +軟硬體 1 +軟骨 1 +軸 1 +軸心 1 +較低 1 +較佳 1 +較厚 1 +較快 1 +較深 1 +載人 1 +載淳 1 +輔 1 +輔佐 1 +輕微 1 +輕易 1 +輕軌 1 +輕鐵 1 +輕髻 1 +輕鬆 1 +輝 1 +輝彥 1 +輪周 1 +輪廓 1 +輪流 1 +輪船 1 +輪迴 1 +輯 1 +輯錄 1 +輸 1 +輸掉 1 +輸精 1 +輸血 1 +輸送 1 +輻轍 1 +輻鰭 1 +輾轉 1 +轅 1 +轉交 1 +轉任 1 +轉動 1 +轉化 1 +轉向 1 +轉型 1 +轉差 1 +轉往 1 +轉念 1 +轉播 1 +轉會 1 +轉正 1 +轉角 1 +轉賣 1 +轉赴 1 +辛普朗 1 +辛普森 1 +辛辛那提 1 +辜 1 +辟邪 1 +辦學 1 +辦有 1 +辨別 1 +辨明 1 +辨識 1 +辭典 1 +辭官 1 +辭歲 1 +辯證 1 +辰國 1 +辰男 1 +農事 1 +農墾 1 +農書 1 +農林 1 +農舍 1 +迅 1 +迅即 1 +迅猛 1 +迎 1 +迎神 1 +迎賓 1 +迎送 1 +迎面 1 +近似 1 +近侍 1 +近平 1 +近日 1 +近東 1 +近海 1 +近現代 1 +近親 1 +近鄰 1 +返 1 +返樸歸真 1 +迦南 1 +迦納 1 +迪克 1 +迪克蘭 1 +迪士尼 1 +迪斯雷利 1 +迪比亞吉奧 1 +迪爾汗 1 +迪米特 1 +迫切 1 +述 1 +迴流 1 +迷你變色龍 1 +迷唐 1 +迷路 1 +追兇 1 +追回 1 +追封 1 +追尋 1 +追尾 1 +追思 1 +追憶 1 +追查 1 +追根究底 1 +追殺 1 +追求 1 +追究 1 +追討 1 +追述 1 +退位 1 +退回 1 +退夷 1 +退居 1 +退敵 1 +退隱 1 +送來 1 +送到 1 +送回 1 +送殯 1 +送給 1 +送院 1 +逃亡 1 +逃奔 1 +逃至 1 +逃跑 1 +逆 1 +逆戟鯨 1 +逍遙 1 +透徹 1 +透支 1 +透水 1 +透視 1 +透鏡 1 +逐客 1 +途中 1 +途人 1 +途經 1 +這兒 1 +這時 1 +通俗 1 +通商 1 +通天 1 +通宏 1 +通州 1 +通渭 1 +通貨 1 +通通 1 +通運 1 +通靈 1 +通風 1 +逛街 1 +速往 1 +速銷 1 +造價 1 +造反 1 +造就 1 +造幣 1 +造福 1 +造血 1 +造訪 1 +造謠 1 +逢吉 1 +連串 1 +連克 1 +連坐 1 +連年 1 +連座 1 +連成 1 +連拍 1 +連筆 1 +連篇累牘 1 +連結 1 +連絡 1 +連通 1 +連進 1 +連餓 1 +週末 1 +週邊 1 +進位 1 +進來 1 +進出 1 +進動 1 +進犯 1 +逼 1 +逼使 1 +逼停 1 +逼到 1 +逾期 1 +遂起 1 +遇上 1 +遇刺 1 +遇有 1 +遇陛 1 +遇難 1 +遊憩 1 +遊擊 1 +遊歷 1 +遊艇 1 +遊覽 1 +遊說 1 +遊離 1 +運 1 +運回 1 +運往 1 +運煤 1 +運算 1 +運糧 1 +運補 1 +運載 1 +遍 1 +遍布 1 +過冷 1 +過剩 1 +過多 1 +過往 1 +過敏 1 +過橋 1 +過濾 1 +過甚 1 +過繼 1 +過苛 1 +過路 1 +過頭 1 +道世民 1 +道具 1 +道墟 1 +道士 1 +道學 1 +道宇 1 +道安 1 +道格拉斯 1 +道歉 1 +道理 1 +道綽 1 +道羅 1 +道義 1 +道靜 1 +達上 1 +達人 1 +達克斯 1 +達古武 1 +達恩利 1 +達拉斯 1 +達拏 1 +達拖錯 1 +達母拿錯 1 +達濠 1 +達爾文 1 +達章 1 +達華 1 +達賴 1 +違背 1 +遙陽 1 +遜位 1 +遞交 1 +遞增 1 +遠呂智 1 +遠嫁 1 +遠揚 1 +遠日 1 +遠洋 1 +遠處 1 +遠遠 1 +遠離 1 +遣 1 +遣返 1 +適之 1 +適用 1 +遭殃 1 +遮天 1 +遮蔭 1 +遮陰 1 +遲 1 +遲遲 1 +遷出 1 +遷居 1 +遷校 1 +選上 1 +選修 1 +選定 1 +選用 1 +選美 1 +選訓 1 +選調 1 +選進 1 +選題 1 +遹 1 +遺物 1 +遺留 1 +遺腹 1 +遺迹 1 +遺骸 1 +遼西翼龍 1 +避 1 +避禍 1 +避開 1 +邁克 1 +邁向 1 +邁阿密 1 +還擊 1 +還有 1 +邊區 1 +邗江 1 +那時 1 +那普拉夫尼克 1 +那曲 1 +邦國 1 +邦德 1 +邦蒂 1 +邦達倉 1 +邪惡 1 +邪神 1 +邪馬台 1 +邱家 1 +邳縣 1 +邵伯 1 +邵氏 1 +郊狼 1 +郎 1 +郝 1 +郡區 1 +郡縣 1 +郡艾塞克斯 1 +部位 1 +部字 1 +部將 1 +部首 1 +郪江 1 +郫縣 1 +郭家 1 +郵報 1 +郵輪 1 +都城嘉慕 1 +都察 1 +都尉 1 +都會 1 +都有 1 +都督 1 +都靈 1 +鄂 1 +鄂倫春 1 +鄂溫克 1 +鄂霍次克 1 +鄉內 1 +鄉團 1 +鄉村 1 +鄉長 1 +鄰 1 +鄰域 1 +鄰居 1 +鄰里 1 +酃縣 1 +酆 1 +配上 1 +配件 1 +配備 1 +配器 1 +配有 1 +配角 1 +酒家 1 +酒杯 1 +酒樓 1 +酒鬼 1 +酩酊大醉 1 +酵母 1 +酷似 1 +酷刑 1 +醉醺醺 1 +醋酸根 1 +醫書 1 +醫科 1 +醫術 1 +醬貨 1 +醴陵 1 +釀成 1 +釀造 1 +釉色 1 +釋出 1 +釋迦 1 +釋迦牟尼 1 +里士滿 1 +里奧多 1 +里港 1 +里馬 1 +重創 1 +重力 1 +重回 1 +重復 1 +重心 1 +重情 1 +重播 1 +重核 1 +重物 1 +重獲 1 +重現 1 +重生 1 +重用 1 +重疊 1 +重禮 1 +重組 1 +重義 1 +重考 1 +重製 1 +重複 1 +重見天日 1 +重讀 1 +重鎮 1 +重開 1 +重陽 1 +重音 1 +重鳳 1 +野外 1 +野心勃勃 1 +野戰 1 +野木 1 +野球 1 +野菜 1 +量度 1 +金剛 1 +金寶 1 +金帶英麗魚 1 +金幣 1 +金平 1 +金氏 1 +金泉 1 +金浦 1 +金湖 1 +金牛 1 +金獎 1 +金箔 1 +金羅斯 1 +金美 1 +金華 1 +金質 1 +金邊 1 +金銀 1 +金錢 1 +金門 1 +金靴 1 +金頂 1 +金魚 1 +金鵰 1 +釜山 1 +針劑 1 +釧路 1 +鈇 1 +鈦 1 +鈺源 1 +鉑金 1 +銀杏 1 +銀熊 1 +銀牌 1 +銀白 1 +銀紅 1 +銀色 1 +銅仁 1 +銅像 1 +銅削 1 +銅斧 1 +銅柄 1 +銅臿 1 +銅製 1 +銅銎 1 +銅錛 1 +銅錢 1 +銘 1 +銘皖 1 +銘銘 1 +銜稱 1 +銠 1 +銳利 1 +銷毀 1 +銷量 1 +鋒 1 +鋪成 1 +鋪有 1 +鋸齒龍 1 +鋼板 1 +錄影 1 +錄得 1 +錄放影機 1 +錢上 1 +錦 1 +錦俊 1 +錦承 1 +錦江 1 +錦田 1 +錫 1 +錫伯 1 +錫勇 1 +錫昌 1 +錯 1 +錯視 1 +錯覺 1 +錳 1 +錳礦 1 +鍊金 1 +鍋中 1 +鍋內 1 +鍋爐 1 +鍔 1 +鍛鍊 1 +鍝 1 +鍾 1 +鎖妖 1 +鎖閉 1 +鎮守 1 +鎮岳 1 +鎮朔 1 +鎮賚 1 +鎮里 1 +鎮靜 1 +鎰 1 +鎳銀 1 +鏈 1 +鏡波 1 +鏡湖 1 +鐳 1 +鐵削 1 +鐵匾 1 +鐵棍 1 +鐵民 1 +鐵爐 1 +鐵管 1 +鐵釘 1 +鐵銹 1 +鐵錛 1 +鑑別 1 +鑑定 1 +鑑泉 1 +鑑證 1 +鑒定 1 +鑫新 1 +鑽入 1 +鑽出 1 +鑽探 1 +鑿出 1 +長凳 1 +長史 1 +長婁 1 +長孫 1 +長岡 1 +長崎 1 +長廊 1 +長廷 1 +長方 1 +長榮 1 +長毛 1 +長治 1 +長溝 1 +長滿 1 +長瑪喀比 1 +長盛 1 +長笛 1 +長篇 1 +長編 1 +長跑 1 +長頸鹿 1 +長髮 1 +門修斯 1 +門廳 1 +門式 1 +閃米特 1 +閃長 1 +閃電 1 +閉日 1 +開價 1 +開光 1 +開啟 1 +開場 1 +開墾 1 +開學 1 +開工 1 +開往 1 +開戰 1 +開拓 1 +開挖 1 +開支 1 +開教 1 +開業 1 +開槍 1 +開球 1 +開瑞坦 1 +開票 1 +開車 1 +開辦 1 +開錄 1 +閑聊 1 +閑談 1 +閒言閒語 1 +間斷 1 +間碟 1 +間距 1 +閘口 1 +閘機 1 +閣 1 +閩侯 1 +閩南 1 +闖進 1 +關中 1 +關斷 1 +關連 1 +闡述 1 +闢 1 +阡陌 1 +阪神 1 +防凍 1 +防止 1 +防盜 1 +防護 1 +阻塞 1 +阻撓 1 +阻隔 1 +阿一 1 +阿仙奴 1 +阿信 1 +阿修羅 1 +阿內爾卡 1 +阿勒格尼郡 1 +阿勝 1 +阿勞 1 +阿基里斯 1 +阿堯 1 +阿奇里斯 1 +阿寧 1 +阿布 1 +阿拉法特 1 +阿斗 1 +阿普第 1 +阿曼達 1 +阿東 1 +阿格拉 1 +阿格雷斯蒂 1 +阿森斯 1 +阿森納 1 +阿比西尼亞豬 1 +阿波羅 1 +阿爾及利亞 1 +阿爾及爾 1 +阿爾布巴 1 +阿爾扎阿爾拉齊蓋 1 +阿爾法 1 +阿爾發 1 +阿爾茨海默 1 +阿爾高 1 +阿特 1 +阿特拉斯 1 +阿猴 1 +阿瑜陀耶 1 +阿穆爾 1 +阿羅那順 1 +阿耳忒彌斯 1 +阿聯酋 1 +阿育 1 +阿茲海默 1 +阿諾 1 +阿賈克斯 1 +阿赫 1 +阿連德 1 +阿道夫 1 +阿達姆庫斯 1 +阿里 1 +阿隆索 1 +陀斯妥也夫斯基 1 +附上 1 +附加 1 +附蟲 1 +附表 1 +附身 1 +降將 1 +降格 1 +降水 1 +降班 1 +降臨 1 +降魔 1 +限 1 +限定 1 +限時 1 +陞 1 +陡壁 1 +院士 1 +院子 1 +院落 1 +陣 1 +除冰 1 +除夕 1 +除此 1 +除非 1 +陪葬 1 +陪都 1 +陰天 1 +陰暗 1 +陰陽 1 +陳國 1 +陳屍 1 +陳相 1 +陳述 1 +陵園 1 +陶恩 1 +陷落 1 +陸仔 1 +陸域 1 +陸行 1 +陽 1 +陽安 1 +陽明 1 +隆亨 1 +隊列 1 +隊名 1 +隔日 1 +隔開 1 +隕星 1 +隕鐵 1 +際春 1 +隠居 1 +隨丁 1 +隨便 1 +隨同 1 +隨往 1 +隨時 1 +隨軍 1 +隨隊 1 +險些 1 +險要 1 +隱含 1 +隱姓埋名 1 +隱居 1 +隱性 1 +隱私 1 +隻身 1 +雄 1 +雄師 1 +雄獅 1 +雅克 1 +雅加達 1 +雅各布 1 +雅君 1 +集寧 1 +集結 1 +集聚 1 +雌性 1 +雌獸 1 +雌鯨 1 +雎 1 +雙十 1 +雙子 1 +雙江 1 +雜姓 1 +雜糧 1 +雜處 1 +雜食 1 +雞腿 1 +雞頭 1 +離別 1 +離域 1 +離場 1 +離子 1 +離島 1 +離群索居 1 +離職 1 +難吃 1 +難得 1 +難攻 1 +難過 1 +雨季 1 +雨後春筍 1 +雨林 1 +雪上加霜 1 +雪佛龍 1 +雪兒 1 +雪崩 1 +雪弟 1 +雪梅 1 +雲中 1 +雲亭 1 +雲岩 1 +雲松 1 +雲里 1 +零件 1 +零部件 1 +零食 1 +雷 1 +雷克南 1 +雷克斯 1 +雷切爾 1 +雷姆 1 +雷定 1 +雷昂納多 1 +雷曼 1 +雷王 1 +雷蒂亞 1 +雷雨 1 +電信 1 +電器 1 +電極 1 +電氣 1 +電瓶 1 +電線 1 +電通 1 +電邀 1 +需時 1 +霆鋒 1 +震寰 1 +震波 1 +震災 1 +霍亂 1 +霍伊爾 1 +霍夫堡 1 +霍姆 1 +霍巴特 1 +霍斯 1 +霍普金斯 1 +霍爾滕 1 +霍爾特 1 +霞 1 +霧 1 +露出 1 +露比 1 +露臉 1 +露西 1 +霸佔 1 +霸權 1 +靈前 1 +靈力 1 +靈性 1 +靈感 1 +靈柩 1 +靈活 1 +靈異 1 +靈籤 1 +靈長 1 +靈魂 1 +青 1 +青梅 1 +青森 1 +青睞 1 +青訓 1 +青金 1 +靖 1 +靖雯 1 +靜安 1 +靜岡 1 +靜華 1 +靠右 1 +靠左 1 +面具 1 +面向 1 +面貌 1 +革除 1 +鞏 1 +鞦韆 1 +韃靼 1 +韋 1 +韋契特 1 +韋德 1 +韋拉克魯斯 1 +韋拿 1 +韋斯特 1 +韋科 1 +韌 1 +韓氏 1 +韓浜 1 +音律 1 +音色 1 +音量 1 +音高 1 +韶之 1 +響號 1 +頂上 1 +頂尖 1 +頂峰 1 +頂端 1 +頂級 1 +項鏈 1 +順宗 1 +順岸 1 +順德 1 +順應 1 +順懷 1 +順治 1 +順滑 1 +順陽 1 +頌平 1 +頌揚 1 +預 1 +預估 1 +預告 1 +預知 1 +預示 1 +預約 1 +頑石 1 +頒給 1 +頗 1 +頗多 1 +頗大 1 +頗有 1 +頗盛 1 +頗豐 1 +領事 1 +領取 1 +領奏 1 +領航 1 +領軍 1 +領隊 1 +頡 1 +頭上 1 +頭前 1 +頭型 1 +頭尾 1 +頭槌 1 +頭版 1 +頭盔 1 +頭紗 1 +頭髮 1 +頸 1 +頸部 1 +頹垣 1 +頻 1 +頻寬 1 +頻散 1 +頻繁 1 +頻頻 1 +題獻 1 +題記 1 +額外 1 +額度 1 +類別 1 +類固醇 1 +顥 1 +顯 1 +顯光 1 +顯徑 1 +顯現 1 +顯靈 1 +風化 1 +風尚 1 +風波 1 +風行 1 +風間 1 +風雨 1 +飈 1 +飛往 1 +飛抵 1 +飛毛 1 +飛沫 1 +飛碟 1 +飛鏢 1 +飛靶 1 +飛鳥 1 +飛龍 1 +食人 1 +食肆 1 +食肉 1 +食蟲 1 +食鹽 1 +飲茶 1 +飼料 1 +飼草 1 +飽和 1 +飽經 1 +飾物 1 +餃子 1 +餅 1 +養份 1 +養大 1 +養女 1 +養母 1 +養父 1 +養精蓄銳 1 +養育 1 +養菊 1 +養蠶 1 +餐車 1 +餘 1 +餘熱 1 +餘眾 1 +館前 1 +館名 1 +館址 1 +饃 1 +饑餓 1 +饒平 1 +饕餮 1 +首仗 1 +首個 1 +首名 1 +首場 1 +首屈一指 1 +首席 1 +首戰 1 +首批 1 +首日 1 +首映 1 +首條 1 +首艦 1 +首讀 1 +香 1 +香亭 1 +香儂 1 +香吉士 1 +香味 1 +香坊 1 +香塍 1 +香水 1 +香洲 1 +香火 1 +香織 1 +馬丁尼茲 1 +馬丁斯維勒 1 +馬上 1 +馬修 1 +馬克安諾 1 +馬克西米利 1 +馬內阿 1 +馬六甲 1 +馬匹 1 +馬喇 1 +馬圈 1 +馬奇頓 1 +馬尼拉 1 +馬托格羅索 1 +馬爾他 1 +馬爾吉阿納 1 +馬爾地夫 1 +馬爾默 1 +馬球 1 +馬約拉那 1 +馬莎 1 +馬薩 1 +馬薩諸塞 1 +馬賽 1 +馬赫盧普 1 +馬路 1 +馬達加斯加 1 +馬里內蒂 1 +馬里蘭 1 +馬雅可夫斯基 1 +馬鞍 1 +馬黑麻 1 +馳名 1 +馴化 1 +駐任 1 +駐地 1 +駐防 1 +駕崩 1 +駙馬 1 +駛 1 +駛入 1 +駛過 1 +駿業 1 +騁遠 1 +騎 1 +騎馬 1 +騏一郎 1 +騙徒 1 +騰出 1 +騰訊 1 +騷擾 1 +驅 1 +驗屍 1 +驗票 1 +驗證 1 +驗電 1 +驚人 1 +驚動 1 +驚喜 1 +驚嘆 1 +驚訝 1 +驚醒 1 +驟減 1 +驟逝 1 +驢肉 1 +驥 1 +骨幹 1 +骯髒 1 +骷髏 1 +體側 1 +體外 1 +體委 1 +體工 1 +體會 1 +體溫 1 +髖骨 1 +高下 1 +高傲 1 +高傲不群 1 +高出 1 +高升 1 +高地 1 +高大 1 +高峰 1 +高座 1 +高手 1 +高效 1 +高新 1 +高杉 1 +高檔 1 +高清 1 +高漲 1 +高熱 1 +高燥 1 +高爾夫 1 +高爾德 1 +高琦 1 +高盧 1 +高聳 1 +高處 1 +高買 1 +高質 1 +高超 1 +高雄 1 +高高在上 1 +髮 1 +髮生 1 +髮辮 1 +鬆髻 1 +鬚 1 +鬚鯨 1 +鬥雞 1 +鬧 1 +鬧出 1 +鬼影 1 +鬼怪 1 +鬼道 1 +魁智 1 +魅惑 1 +魏國 1 +魏斯曼 1 +魏氏 1 +魏澤爾 1 +魔力 1 +魔界 1 +魔石 1 +魔鬼 1 +魚尾 1 +魚腹 1 +魚苗 1 +魚類 1 +魯 1 +魯伯 1 +魯國 1 +魯特 1 +魯登尼亞 1 +魯良新元 1 +魯茨科伊 1 +魯西迪 1 +魯道夫 1 +鮑亞士 1 +鮑克瑟 1 +鮑爾溫 1 +鮑維 1 +鮑里斯 1 +鮑魚 1 +鮮 1 +鮮有 1 +鮮用 1 +鮮虞 1 +鯉齒 1 +鰓蓋 1 +鰭條 1 +鰺沢駅 1 +鱗 1 +鱗甲 1 +鱗骨 1 +鳥 1 +鳥獸 1 +鳥種 1 +鳳 1 +鳳彬 1 +鳴叫 1 +鳴放 1 +鳴道 1 +鴛鴦 1 +鴻南 1 +鴻章 1 +鴻績 1 +鴻華 1 +鴻超 1 +鴻逵 1 +鴻銘 1 +鹽 1 +鹽城 1 +鹽州 1 +鹽酸 1 +鹿兒島 1 +鹿鼎 1 +麒 1 +麗晶 1 +麗泰 1 +麗珍 1 +麗華 1 +麗閣 1 +麥克 1 +麥克佛森 1 +麥克羅伯特森 1 +麥克默多 1 +麥加利 1 +麥卡特尼 1 +麥拉倫 1 +麥格林 1 +麥當勞 1 +麥芽 1 +麥迪文 1 +麩氨酸 1 +麵 1 +麵團 1 +麵皮 1 +麻城 1 +麻塞諸塞 1 +麻將 1 +麻布 1 +麻木 1 +麻痹 1 +黃岡 1 +黃巾 1 +黃昏 1 +黃沙 1 +黃河 1 +黃蜂 1 +黎家 1 +黎明 1 +黎筍 1 +黑奴 1 +黑帶 1 +黑手 1 +黑暗 1 +黑木 1 +黑板 1 +黑死 1 +黑海 1 +黑衫 1 +黑錢 1 +黑鐵木 1 +黑雲 1 +黑髮 1 +默多克 1 +默比施 1 +默默 1 +黛安娜 1 +黛絲 1 +點陣 1 +點點頭 1 +黨團 1 +黨委 1 +黨校 1 +黨歌 1 +黨衛 1 +黨部 1 +黨魁 1 +鼎灶 1 +鼎芬 1 +鼎金 1 +鼓手 1 +鼬鼠 1 +齊國 1 +齋 1 +齒狀 1 +齒輪 1 +齲齒 1 +龍台 1 +龍女 1 +龍文 1 +龍耳 1 +龍頭 1 +龐 1 +龐特佛雷特 1 +龐貝 1 +龜茲 1 diff --git a/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/prefix-table b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/prefix-table new file mode 100644 index 0000000000000000000000000000000000000000..1e49511d31a9733c33068faa4dffceb916bcb4c8 Binary files /dev/null and b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/prefix-table differ diff --git a/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/suffix-table b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/suffix-table new file mode 100644 index 0000000000000000000000000000000000000000..1e6ca998763792cb4a84595351b4513a71d3ea82 Binary files /dev/null and b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/suffix-table differ diff --git a/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/tag-map b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/tag-map new file mode 100644 index 0000000000000000000000000000000000000000..7e4f4c0ba54c71e47a061bbb5de6be4d7943e6c9 --- /dev/null +++ b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/tag-map @@ -0,0 +1,43 @@ +42 +NN 21794 +VV 13177 +NNP 8280 +, 5824 +CD 5082 +DEC 4350 +RB 4323 +SFN 4229 +IN 4165 +NNB 3963 +. 3807 +JJ 2318 +VC 1935 +CC 1329 +PRP 996 +DT 994 +EC 942 +FW 778 +AS 718 +MD 681 +( 641 +) 641 +PFA 555 +BB 472 +'' 331 +`` 329 +PRD 324 +/ 202 +: 165 +UH 150 +DEV 96 +HYPH 76 +WP 23 +SFV 18 +XX 17 +ADD 8 +SFA 7 +... 4 +PFN 4 +LS 3 +" 1 +VERB 1 diff --git a/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/tag-to-category b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/tag-to-category new file mode 100644 index 0000000000000000000000000000000000000000..13174eaa43d58b1614e797076f8c09af33ee54b8 --- /dev/null +++ b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/tag-to-category @@ -0,0 +1,42 @@ +" PUNCT +'' PUNCT +( PUNCT +) PUNCT +, PUNCT +. PUNCT +... PUNCT +/ PUNCT +: PUNCT +ADD NOUN +AS PART +BB VERB +CC CCONJ +CD NUM +DEC PART +DEV PART +DT DET +EC PUNCT +FW X +HYPH PUNCT +IN ADP +JJ ADJ +LS X +MD AUX +NN NOUN +NNB NOUN +NNP PROPN +PFA PART +PFN PART +PRD PRON +PRP PRON +RB ADV +SFA PART +SFN PART +SFV PART +UH X +VC VERB +VERB VERB +VV AUX +WP PRON +XX X +`` PUNCT diff --git a/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/word-map b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/word-map new file mode 100644 index 0000000000000000000000000000000000000000..3b10bdefd7229ebd0c1ceb6bb8e283bc11b1cf80 --- /dev/null +++ b/syntaxnet/dragnn/conll2017/sample/zh-segmenter-resource/word-map @@ -0,0 +1,16269 @@ +16268 +, 5851 +的 4289 +. 3759 +在 1273 +年 1165 +9999 1070 +、 952 +是 918 +為 887 +一 863 +於 680 +99 647 +和 639 +9 617 +了 614 +人 467 +個 466 +月 456 +有 453 +他 439 +( 429 +) 429 +與 380 +中 376 +日 356 +」 321 +「 320 +被 315 +這 300 +會 258 +並 255 +以 253 +而 245 +也 244 +上 228 +中國 218 +由 215 +《 213 +》 213 +之 211 +兩 203 +後 202 +及 191 +時 188 +位 186 +· 183 +999 178 +等 175 +到 172 +但 162 +對 158 +大 157 +此 157 +不 156 +其 155 +所 150 +種 143 +或 140 +將 139 +次 132 +美國 131 +成 130 +者 127 +至 125 +該 123 +區 118 +開始 118 +部 117 +三 116 +家 116 +可以 115 +她 115 +都 114 +來 113 +因 113 +國 109 +人口 108 +軍 107 +市 104 +使用 102 +省 102 +從 101 +名 98 +著 97 +則 95 +多 94 +用 94 +日本 93 +沒有 93 +地 92 +曾 92 +第一 92 +他們 91 +州 90 +公司 88 +就 88 +性 88 +由於 88 +其中 87 +地區 87 +新 87 +稱 87 +國家 86 +政府 86 +: 84 +已 84 +主要 83 +小 82 +; 81 +世界 81 +可 81 +大學 81 +下 80 +不同 79 +自 79 +香港 79 +縣 77 +自己 77 +前 76 +因為 76 +研究 76 +總 76 +最 75 +面積 75 +李 74 +還 73 +向 72 +王 72 +進行 72 +它 71 +包括 69 +站 69 +四 68 +號 67 +當時 66 +這些 66 +部分 66 +工作 65 +米 65 +認為 65 +也是 64 +以及 64 +學 64 +村 64 +發現 64 +說 64 +作 63 +又 62 +屬 62 +平方公里 62 +中華 61 +同時 60 +學院 60 +條 60 +成立 59 +第二 59 +二 58 +五 58 +亦 58 +代表 58 +發展 58 +發生 58 +美 58 +能 58 +之後 57 +使 57 +社會 57 +要 57 +一些 56 +人民 56 +內 56 +其他 56 +約 56 +世紀 54 +元 54 +場 54 +過 54 +建築 53 +為了 53 +線 53 +只 52 +張 52 +把 52 +獲得 52 +目前 52 +台 51 +文化 51 +英國 51 +重要 51 +中心 50 +但是 50 +局 50 +更 50 +許多 50 +之間 49 +可能 49 +如 49 +歷史 49 +遊戲 49 +公里 48 +共 48 +帝國 48 +期間 48 +歲 48 +處 48 +音樂 48 +黨 48 +一般 47 +年代 47 +根據 47 +行星 47 +隊 47 +電影 47 +政治 46 +鐵路 46 +城市 45 +故事 45 +組織 45 +便 44 +學校 44 +所有 44 +科學 44 +英 44 +- 43 +任 43 +作品 43 +指 43 +最後 43 +機 43 +語 43 +通過 43 +間 43 +關係 43 +已經 42 +建立 42 +時間 42 +當 42 +電視 42 +共和 41 +後來 41 +比 41 +管理 41 +表示 41 +讓 41 +通常 41 +高 41 +出現 40 +影響 40 +成功 40 +戰爭 40 +提供 40 +系統 40 +動物 39 +地方 39 +就是 39 +座 39 +設計 39 +負責 39 +鎮 39 +長 39 +館 39 +卻 38 +國際 38 +德國 38 +技術 38 +方面 38 +最終 38 +父親 38 +車站 38 +上海 37 +人物 37 +出 37 +分 37 +台灣 37 +各 37 +層 37 +山 37 +方 37 +河 37 +即 36 +參加 36 +擔任 36 +時期 36 +服務 36 +正式 36 +生活 36 +給 36 +要求 36 +路 36 +運動 36 +9,999 35 +一直 35 +再 35 +單位 35 +委員 35 +很 35 +書 35 +段 35 +民國 35 +法國 35 +理論 35 +人類 34 +均 34 +女 34 +才 34 +教 34 +文 34 +歐洲 34 +決定 34 +漢 34 +現在 34 +第三 34 +航空 34 +行政 34 +足球 34 +雖然 34 +八 33 +問題 33 +小說 33 +我 33 +教育 33 +製作 33 +不是 32 +保護 32 +全國 32 +北 32 +印度 32 +員 32 +形成 32 +很多 32 +得到 32 +活動 32 +節目 32 +西班牙 32 +主義 31 +寺 31 +屆 31 +島 31 +市鎮 31 +方式 31 +時代 31 +最高 31 +生 31 +街 31 +起 31 +需要 31 +99% 30 +中央 30 +另 30 +另外 30 +器 30 +天 30 +得 30 +控制 30 +擁有 30 +每 30 +產生 30 +經濟 30 +羅馬 30 +進入 30 +隨 30 +仍 29 +公園 29 +具有 29 +去 29 +大陸 29 +式 29 +接受 29 +東 29 +球隊 29 +當地 29 +院 29 +雙 29 +9.99 28 +並且 28 +北京 28 +受到 28 +同 28 +如果 28 +學生 28 +工程 28 +時候 28 +港 28 +物 28 +級 28 +計劃 28 +超過 28 +道 28 +電腦 28 +存在 27 +室 27 +對於 27 +情況 27 +戰鬥 27 +方法 27 +林 27 +機場 27 +比賽 27 +總統 27 +義大利 27 +都是 27 +非 27 +非常 27 +點 27 +人員 26 +做 26 +原因 26 +國民 26 +支持 26 +數 26 +法 26 +派 26 +然而 26 +獨立 26 +甚至 26 +生物 26 +聯合 26 +項 26 +主 25 +兒子 25 +出版 25 +劉 25 +南 25 +巴士 25 +幾 25 +我們 25 +權 25 +海拔 25 +第99 25 +經過 25 +議會 25 +賽 25 +99.99 24 +交通 24 +例如 24 +分布 24 +加入 24 +化 24 +同年 24 +城 24 +大量 24 +於是 24 +族 24 +最大 24 +未 24 +海 24 +湖 24 +生產 24 +皇帝 24 +科 24 +第9 24 +系列 24 +高度 24 +9.9 23 +事件 23 +們 23 +內容 23 +命名 23 +型 23 +宣布 23 +導致 23 +帶 23 +必須 23 +成員 23 +本 23 +正 23 +清朝 23 +演出 23 +無 23 +直接 23 +行為 23 +裡 23 +西 23 +距離 23 +軍事 23 +部隊 23 +鄉 23 +銀行 23 +集團 23 +99,999 22 +一樣 22 +不少 22 +不過 22 +傳統 22 +僅 22 +副 22 +反對 22 +單 22 +增加 22 +它們 22 +思想 22 +有關 22 +業 22 +此外 22 +母親 22 +水 22 +灣 22 +版 22 +紐約 22 +組成 22 +結構 22 +聯盟 22 +聯賽 22 +能力 22 +華 22 +設 22 +語言 22 +附近 22 +除 22 +一起 21 +作用 21 +出生 21 +制 21 +力 21 +受 21 +古 21 +只有 21 +唯一 21 +地位 21 +府 21 +廣泛 21 +植物 21 +海軍 21 +無法 21 +獲 21 +率 21 +球 21 +環境 21 +紀念 21 +結束 21 +舉行 21 +角色 21 +議員 21 +選舉 21 +里 21 +量 21 +韓 21 +體 21 +主席 20 +仍然 20 +六 20 +冠軍 20 +出任 20 +分子 20 +原子 20 +參與 20 +地下 20 +城鎮 20 +天津 20 +工業 20 +希臘 20 +度 20 +引起 20 +採用 20 +攻擊 20 +整個 20 +文學 20 +文物 20 +朝鮮 20 +東北 20 +核 20 +機構 20 +比較 20 +清 20 +猶太 20 +現代 20 +管轄 20 +範圍 20 +細胞 20 +經常 20 +胡 20 +自治 20 +自由 20 +角 20 +逐漸 20 +重新 20 +類型 20 +不久 19 +不能 19 +代 19 +以上 19 +佔領 19 +全 19 +分別 19 +原 19 +台北 19 +唐 19 +多數 19 +天文 19 +字 19 +巴黎 19 +最早 19 +會議 19 +有些 19 +民族 19 +洋 19 +結果 19 +繼續 19 +能夠 19 +趙 19 +造成 19 +達 19 +達到 19 +部份 19 +鄭 19 +風格 19 +不會 18 +亞 18 +令 18 +任何 18 +企業 18 +先後 18 +列車 18 +功能 18 +半 18 +取得 18 +合併 18 +外交 18 +子 18 +廣州 18 +戰役 18 +所以 18 +明朝 18 +期 18 +每年 18 +毛 18 +治療 18 +法院 18 +畢業 18 +疾病 18 +相當 18 +節 18 +艦隊 18 +身體 18 +軍隊 18 +進 18 +陳 18 +離開 18 +領導 18 +體育 18 +99.9 17 +七 17 +你 17 +再次 17 +十 17 +名字 17 +大戰 17 +宗教 17 +家族 17 +希望 17 +廣場 17 +想 17 +戰 17 +採取 17 +提出 17 +改 17 +教堂 17 +新聞 17 +星 17 +曲 17 +最初 17 +歐 17 +漫畫 17 +片 17 +物理 17 +特別 17 +發行 17 +經 17 +總部 17 +自然 17 +蘇聯 17 +行動 17 +製造 17 +西北 17 +資料 17 +選擇 17 +那 17 +金 17 +領域 17 +顆 17 +類 17 +飛機 17 +九龍 16 +低 16 +像 16 +共同 16 +利用 16 +制度 16 +前往 16 +創作 16 +勢力 16 +區域 16 +協助 16 +各種 16 +大樓 16 +家庭 16 +實驗 16 +居民 16 +山東 16 +心理 16 +或者 16 +拒絕 16 +步 16 +武器 16 +民主 16 +法律 16 +爆發 16 +狀態 16 +而且 16 +藝術 16 +表現 16 +記者 16 +設有 16 +設立 16 +資源 16 +軌道 16 +過程 16 +道路 16 +還是 16 +革命 16 +首次 16 +高速 16 +下轄 15 +中共 15 +主角 15 +作戰 15 +初 15 +則是 15 +化石 15 +十分 15 +南京 15 +南部 15 +商 15 +噸 15 +回到 15 +國內 15 +國王 15 +地球 15 +基督 15 +大廈 15 +大約 15 +太陽 15 +女兒 15 +女性 15 +如此 15 +學習 15 +完全 15 +實際 15 +常 15 +常見 15 +幾乎 15 +應用 15 +承認 15 +投資 15 +指出 15 +指揮 15 +普查 15 +未來 15 +東南 15 +橋 15 +此後 15 +火星 15 +版本 15 +牠們 15 +發表 15 +白 15 +直到 15 +碼頭 15 +科技 15 +立法 15 +組 15 +統治 15 +老 15 +職業 15 +著名 15 +蒙古 15 +西部 15 +調查 15 +跟 15 +路線 15 +車輛 15 +農業 15 +這樣 15 +酒 15 +鐵道 15 +集 15 +/ 14 +99999 14 +999萬 14 +一定 14 +交易 14 +人們 14 +今 14 +以來 14 +位置 14 +使得 14 +俄羅斯 14 +俱樂部 14 +傳播 14 +兒童 14 +公主 14 +劇 14 +北部 14 +博物 14 +合作 14 +基本 14 +境內 14 +外 14 +太平 14 +失去 14 +完成 14 +容易 14 +密度 14 +專業 14 +市場 14 +幫助 14 +建造 14 +抗 14 +擊敗 14 +旗 14 +曾經 14 +有限 14 +架 14 +案 14 +棲息 14 +波蘭 14 +澳門 14 +營運 14 +特色 14 +獎 14 +男 14 +相同 14 +看到 14 +簡稱 14 +系 14 +統計 14 +網路 14 +聯邦 14 +色 14 +董事 14 +規模 14 +視 14 +解決 14 +言 14 +起來 14 +車 14 +這裡 14 +進攻 14 +開發 14 +限制 14 +顯示 14 +黃 14 +99萬 13 +九 13 +倫敦 13 +全部 13 +公路 13 +公開 13 +其後 13 +初期 13 +加上 13 +博士 13 +司令 13 +同意 13 +因而 13 +圖書 13 +土地 13 +埃及 13 +基礎 13 +堂 13 +墨西哥 13 +天主 13 +妻子 13 +娛樂 13 +建 13 +建設 13 +形 13 +形式 13 +從事 13 +手 13 +打 13 +改變 13 +故 13 +教會 13 +數學 13 +數據 13 +數量 13 +早期 13 +更多 13 +東京 13 +梁 13 +樂團 13 +樓 13 +模式 13 +死 13 +死亡 13 +每個 13 +水平 13 +流域 13 +準備 13 +物種 13 +物質 13 +王國 13 +玩家 13 +男性 13 +當選 13 +病 13 +目 13 +目標 13 +相關 13 +知識 13 +社 13 +第四 13 +紀錄 13 +統一 13 +舊 13 +街道 13 +設定 13 +身份 13 +較 13 +辦公 13 +速度 13 +運輸 13 +郡 13 +項目 13 +食物 13 +馬 13 +---- 12 +一帶 12 +上帝 12 +且 12 +中學 12 +中部 12 +之前 12 +京 12 +人數 12 +什麼 12 +以下 12 +份 12 +保留 12 +個人 12 +價值 12 +元素 12 +內部 12 +公元 12 +具 12 +半島 12 +原本 12 +反應 12 +反映 12 +可是 12 +商業 12 +嚴重 12 +基地 12 +大型 12 +女子 12 +孫 12 +將軍 12 +尤其 12 +居住 12 +師 12 +帶來 12 +平均 12 +建議 12 +很大 12 +律師 12 +恆星 12 +恐怖 12 +應 12 +據 12 +改革 12 +政策 12 +新加坡 12 +月台 12 +有時 12 +東部 12 +楊 12 +標準 12 +機關 12 +歌手 12 +決賽 12 +汽車 12 +減少 12 +潛艇 12 +熱帶 12 +瑞典 12 +生命 12 +產品 12 +產業 12 +盃 12 +相對 12 +眾 12 +眾多 12 +知道 12 +神 12 +精神 12 +經營 12 +船 12 +該國 12 +變成 12 +賽事 12 +近 12 +透過 12 +遭到 12 +遺址 12 +避免 12 +郭 12 +醫院 12 +重建 12 +重慶 12 +門 12 +電子 12 +? 11 +主張 11 +主持 11 +主教 11 +之中 11 +亨利 11 +人士 11 +以前 11 +以色列 11 +件 11 +伊斯蘭 11 +佔 11 +作者 11 +保持 11 +信仰 11 +先生 11 +全球 11 +出身 11 +創立 11 +創辦 11 +力量 11 +去世 11 +反 11 +取代 11 +召開 11 +周 11 +園 11 +團 11 +大會 11 +奧地利 11 +威脅 11 +季 11 +安全 11 +專輯 11 +帝 11 +平方米 11 +強烈 11 +接近 11 +推出 11 +描述 11 +播放 11 +文字 11 +普遍 11 +末 11 +朱 11 +業務 11 +殖民 11 +江 11 +江蘇 11 +涉及 11 +現時 11 +界 11 +留下 11 +目的 11 +相信 11 +看 11 +社區 11 +福建 11 +管 11 +給予 11 +網站 11 +線路 11 +繼承 11 +英格蘭 11 +見 11 +試圖 11 +資訊 11 +超 11 +邊 11 +部門 11 +隻 11 +面 11 +首 11 +99.9% 10 +99.99% 10 +並非 10 +事 10 +事業 10 +交流 10 +以後 10 +來往 10 +供 10 +俄 10 +儘管 10 +勞動 10 +包含 10 +化學 10 +協會 10 +君主 10 +和平 10 +唱片 10 +圈 10 +國旗 10 +國會 10 +報 10 +報告 10 +威廉 10 +學位 10 +寬 10 +廠 10 +徐 10 +復興 10 +感到 10 +手術 10 +投入 10 +接 10 +推動 10 +播出 10 +支 10 +改名 10 +文明 10 +文藝 10 +明顯 10 +有效 10 +杭州 10 +東方 10 +條件 10 +模型 10 +殺 10 +河流 10 +法庭 10 +波 10 +洲 10 +派遣 10 +演員 10 +演唱 10 +火車 10 +爭議 10 +特定 10 +特徵 10 +特殊 10 +獨特 10 +生長 10 +當中 10 +症 10 +發動 10 +發射 10 +確定 10 +神話 10 +移民 10 +空間 10 +立 10 +篇 10 +終於 10 +結婚 10 +綫 10 +維持 10 +總理 10 +群 10 +若 10 +華盛頓 10 +葡萄 10 +蔡 10 +藏 10 +蘇 10 +衝突 10 +西藏 10 +規定 10 +訓練 10 +記 10 +記載 10 +記錄 10 +話 10 +該市 10 +警察 10 +變化 10 +責任 10 +起源 10 +逝世 10 +運行 10 +醫 10 +錦標 10 +關於 10 +陸軍 10 +雜誌 10 +需 10 +類似 10 +飛行 10 +首都 10 +駐 10 +'' 9 +一切 9 +一致 9 +上陣 9 +下降 9 +不斷 9 +不滿 9 +中山 9 +丹麥 9 +之外 9 +事務 9 +互相 9 +介紹 9 +來到 9 +健康 9 +光 9 +內閣 9 +全長 9 +公布 9 +其實 9 +再度 9 +出來 9 +出售 9 +分支 9 +到達 9 +動畫 9 +南方 9 +危險 9 +古代 9 +古典 9 +叫 9 +吃 9 +各類 9 +品 9 +國務 9 +團體 9 +地點 9 +執行 9 +塔 9 +士兵 9 +奪得 9 +好 9 +媒體 9 +字母 9 +孩子 9 +學者 9 +寫 9 +對手 9 +就讀 9 +工人 9 +帶領 9 +廟 9 +引擎 9 +強 9 +強大 9 +後期 9 +快速 9 +恢復 9 +意外 9 +戰略 9 +打擊 9 +批評 9 +拍攝 9 +接觸 9 +攻入 9 +放棄 9 +政權 9 +教學 9 +星期 9 +普通 9 +朋友 9 +未能 9 +本人 9 +本身 9 +枚 9 +柏林 9 +核心 9 +森林 9 +標誌 9 +機會 9 +機車 9 +權利 9 +此時 9 +殿 9 +民間 9 +沿海 9 +浙江 9 +湖泊 9 +滿洲 9 +爆炸 9 +特大 9 +狀況 9 +現 9 +瑞士 9 +當局 9 +發布 9 +皇后 9 +皇家 9 +相互 9 +相似 9 +石 9 +破壞 9 +穩定 9 +空中 9 +第五 9 +絕對 9 +經歷 9 +經理 9 +綜合 9 +總督 9 +老師 9 +而是 9 +聯繫 9 +職務 9 +肉 9 +自行 9 +芬蘭 9 +花園 9 +菲律賓 9 +處理 9 +觀眾 9 +解放 9 +評 9 +貢獻 9 +資格 9 +進士 9 +運作 9 +遭 9 +那麼 9 +酒店 9 +金屬 9 +階段 9 +隧道 9 +隨後 9 +集中 9 +電話 9 +青年 9 +頻道 9 +顏色 9 +高等 9 +-- 8 +上升 8 +下來 8 +中環 8 +主題 8 +亞洲 8 +人工 8 +以外 8 +佔地 8 +何 8 +依據 8 +俄國 8 +保守 8 +信息 8 +傅 8 +價格 8 +儒 8 +光棍 8 +內地 8 +內戰 8 +公分 8 +分鐘 8 +利益 8 +劇情 8 +劑 8 +加 8 +加拿大 8 +十一 8 +即使 8 +原來 8 +口 8 +古老 8 +同樣 8 +命令 8 +喜歡 8 +因素 8 +圖 8 +圖案 8 +地鐵 8 +報道 8 +增長 8 +大多 8 +大小 8 +大道 8 +始 8 +官 8 +家人 8 +專門 8 +小型 8 +小時 8 +尚 8 +局長 8 +山脈 8 +山西 8 +工藝 8 +工資 8 +左右 8 +巨大 8 +平方千米 8 +幻想 8 +廣播 8 +廣東 8 +廳 8 +往往 8 +從此 8 +德 8 +意義 8 +意見 8 +或是 8 +房屋 8 +批 8 +按 8 +提升 8 +提高 8 +攝影 8 +政 8 +效果 8 +教授 8 +文章 8 +方案 8 +旅遊 8 +早 8 +明確 8 +書記 8 +書院 8 +曹 8 +材料 8 +武 8 +武漢 8 +比如 8 +污染 8 +注意 8 +測試 8 +澳大利亞 8 +澳洲 8 +瀋陽 8 +燃料 8 +爵士 8 +父母 8 +現存 8 +男子 8 +病逝 8 +發明 8 +白色 8 +的話 8 +皆 8 +監督 8 +真正 8 +知 8 +知名 8 +秘書 8 +秦 8 +程度 8 +立方米 8 +符號 8 +等等 8 +粒子 8 +紅 8 +維也納 8 +編碼 8 +編輯 8 +署 8 +羽毛 8 +翻譯 8 +考慮 8 +聚集 8 +股份 8 +臨時 8 +良好 8 +芝加哥 8 +葉 8 +表達 8 +複雜 8 +襲擊 8 +西南 8 +解釋 8 +討論 8 +許 8 +詞 8 +變 8 +貓 8 +賽季 8 +贏得 8 +軟體 8 +轉 8 +通 8 +過去 8 +邨 8 +部長 8 +鄧 8 +重 8 +重大 8 +銀河 8 +鏡 8 +長度 8 +隨即 8 +雄性 8 +靠 8 +餐廳 8 +首府 8 +高中 8 +A 7 +A999 7 +~ 7 +下午 7 +不可 7 +主人 7 +之下 7 +事實 7 +事情 7 +二世 7 +二戰 7 +交換 7 +任命 7 +伊麗莎白 7 +住宅 7 +佛教 7 +保險 7 +倍 7 +傳說 7 +入侵 7 +公共 7 +公務 7 +公爵 7 +共產 7 +典型 7 +分析 7 +列 7 +前身 7 +創造 7 +匈奴 7 +北角 7 +十字 7 +卡 7 +原著 7 +右 7 +各地 7 +名稱 7 +名義 7 +吳 7 +吸引 7 +命 7 +員工 7 +哲學 7 +唐朝 7 +喬治 7 +回 7 +在此 7 +城堡 7 +城門 7 +基金 7 +場所 7 +大使 7 +天星 7 +天然 7 +失敗 7 +套 7 +奴隸 7 +學術 7 +安排 7 +宋 7 +實現 7 +實行 7 +專科 7 +尋找 7 +尋求 7 +小組 7 +島嶼 7 +左 7 +差異 7 +市區 7 +市民 7 +常常 7 +幣 7 +平原 7 +年級 7 +年輕 7 +店 7 +建國 7 +弗吉尼亞 7 +強調 7 +形象 7 +很少 7 +想像 7 +意識 7 +愛爾蘭 7 +戲 7 +找到 7 +持有 7 +指導 7 +探測 7 +支援 7 +收斂 7 +放 7 +教師 7 +施 7 +旗下 7 +明 7 +最多 7 +本地 7 +某些 7 +校園 7 +核糖 7 +條約 7 +榮譽 7 +樂隊 7 +檢查 7 +款 7 +母音 7 +氏 7 +氣候 7 +水庫 7 +沒 7 +海岸 7 +海洋 7 +混合 7 +清真 7 +港島 7 +湖南 7 +湯姆 7 +滿 7 +激烈 7 +無綫 7 +然後 7 +熊貓 7 +熱 7 +特有 7 +班 7 +現有 7 +現象 7 +球員 7 +球季 7 +理工 7 +甘肅 7 +生態 7 +申請 7 +真實 7 +石油 7 +礁 7 +秘密 7 +移動 7 +空軍 7 +突破 7 +策略 7 +簽訂 7 +約翰 7 +結合 7 +維新 7 +綱 7 +網 7 +翌年 7 +臺灣 7 +興建 7 +興趣 7 +舉辦 7 +航班 7 +航線 7 +艦 7 +茶 7 +著作 7 +衛生 7 +表演 7 +表面 7 +裔 7 +西方 7 +規劃 7 +覺得 7 +觀測 7 +觀點 7 +計算 7 +訪問 7 +設施 7 +評論 7 +調整 7 +講述 7 +議院 7 +讀 7 +貴族 7 +貿易 7 +較小 7 +較為 7 +輛 7 +轟炸 7 +迅速 7 +近年 7 +連接 7 +道德 7 +達成 7 +適合 7 +選出 7 +邏輯 7 +醫學 7 +重點 7 +錄製 7 +鏡頭 7 +長期 7 +長達 7 +關 7 +降低 7 +雖 7 +需求 7 +面對 7 +韓國 7 +領先 7 +領袖 7 +題材 7 +風暴 7 +食用 7 +駐守 7 +體現 7 +體系 7 +高級 7 +高達 7 +魔法 7 +魚 7 +999999 6 +999億 6 +999多 6 +JR 6 +The 6 +丈夫 6 +上市 6 +上映 6 +乘 6 +事物 6 +二十 6 +亦是 6 +享受 6 +亮 6 +代理 6 +任務 6 +但丁 6 +住 6 +作出 6 +來源 6 +依然 6 +依靠 6 +促進 6 +信號 6 +個體 6 +做法 6 +側 6 +傳 6 +優勢 6 +元朗 6 +全家 6 +公民 6 +公眾 6 +兼 6 +出土 6 +判決 6 +剛 6 +劃分 6 +加工 6 +助理 6 +努力 6 +動力 6 +十八 6 +協議 6 +卡爾 6 +原始 6 +反射 6 +取消 6 +口號 6 +司 6 +司法 6 +含有 6 +吸收 6 +呂 6 +呼吸 6 +咖啡 6 +商品 6 +商店 6 +嘗試 6 +四川 6 +困難 6 +國歌 6 +地產 6 +基 6 +基因 6 +壓力 6 +外國 6 +多樣 6 +大大 6 +大獎 6 +大眾 6 +太空 6 +夫人 6 +奧運 6 +她們 6 +好友 6 +如同 6 +始建 6 +嬴 6 +季節 6 +官方 6 +定居 6 +定義 6 +客運 6 +宣佈 6 +宮 6 +家中 6 +密碼 6 +封 6 +對應 6 +對抗 6 +對象 6 +導演 6 +展覽 6 +島上 6 +師範 6 +席 6 +平等 6 +平面 6 +底 6 +廣告 6 +延伸 6 +強度 6 +形容 6 +形態 6 +形狀 6 +影片 6 +彼此 6 +徒 6 +情感 6 +意 6 +意味 6 +愛 6 +感 6 +懷孕 6 +戀 6 +成熟 6 +成績 6 +成長 6 +手法 6 +打算 6 +批准 6 +投票 6 +授予 6 +提名 6 +搖滾 6 +搜索 6 +操作 6 +擴展 6 +改編 6 +效力 6 +敘利亞 6 +教導 6 +新城 6 +方向 6 +方形 6 +日報 6 +日耳曼 6 +時任 6 +時常 6 +普魯士 6 +更名 6 +最近 6 +朝廷 6 +杯 6 +校區 6 +校長 6 +楚 6 +樹 6 +歌曲 6 +止 6 +死後 6 +民眾 6 +池 6 +河道 6 +流行 6 +海盜 6 +消費 6 +深入 6 +深圳 6 +滅亡 6 +火 6 +無論 6 +版權 6 +牙齒 6 +王朝 6 +玻璃 6 +生存 6 +男友 6 +町 6 +畫 6 +畫家 6 +病毒 6 +發出 6 +發起 6 +發達 6 +短 6 +碑 6 +確認 6 +神奇 6 +神經 6 +禁止 6 +私人 6 +秦國 6 +穆斯林 6 +立刻 6 +立場 6 +童年 6 +端 6 +第七 6 +籃球 6 +米蘭 6 +經典 6 +經驗 6 +緬甸 6 +繪畫 6 +缺乏 6 +羅 6 +美麗 6 +習俗 6 +翡翠 6 +職 6 +能量 6 +色彩 6 +蔣 6 +蕭 6 +藉由 6 +虛擬 6 +血統 6 +行 6 +行走 6 +表明 6 +袁 6 +製成 6 +覆蓋 6 +規則 6 +設置 6 +試驗 6 +詩 6 +詩人 6 +詩歌 6 +該片 6 +說服 6 +說法 6 +論 6 +諮詢 6 +證明 6 +豐富 6 +走 6 +超人 6 +越來越 6 +跑道 6 +路易斯 6 +車展 6 +輿論 6 +近代 6 +返回 6 +退役 6 +通往 6 +通訊 6 +造 6 +進步 6 +過來 6 +選區 6 +遺傳 6 +邀請 6 +邊緣 6 +邱 6 +酒精 6 +醫生 6 +醫療 6 +金融 6 +銷售 6 +開展 6 +開放 6 +阻止 6 +陷入 6 +隊員 6 +階級 6 +隨機 6 +雕刻 6 +離 6 +雲南 6 +電池 6 +非洲 6 +須 6 +顧問 6 +首先 6 +騎兵 6 +黎 6 +9,999,999 5 +99.9萬 5 +999,999 5 +99億 5 +9千億 5 +『 5 +上述 5 +不僅 5 +不好 5 +中立 5 +中間 5 +主流 5 +事故 5 +亞歷山大 5 +亞馬遜 5 +人均 5 +今天 5 +今日 5 +介入 5 +以北 5 +任期 5 +佔據 5 +作家 5 +依舊 5 +侵略 5 +保存 5 +信 5 +信任 5 +信奉 5 +信託 5 +修正 5 +停止 5 +傑出 5 +傳承 5 +傷害 5 +像是 5 +儀式 5 +先 5 +免費 5 +入 5 +公交 5 +公會 5 +兵 5 +其它 5 +其餘 5 +冷卻 5 +分配 5 +分類 5 +列入 5 +別墅 5 +刺激 5 +創建 5 +加熱 5 +加盟 5 +動作 5 +勞工 5 +化合 5 +北海 5 +十二 5 +千 5 +升級 5 +南北 5 +南極 5 +印第安那 5 +參謀 5 +參議 5 +受傷 5 +叫做 5 +史 5 +司馬 5 +各個 5 +合 5 +合法 5 +合理 5 +同盟 5 +名單 5 +否認 5 +呈 5 +呈現 5 +周圍 5 +品牌 5 +哈定 5 +啟超 5 +善 5 +喇嘛 5 +固定 5 +固體 5 +圍 5 +圖像 5 +土耳其 5 +在內 5 +地圖 5 +城區 5 +執政 5 +培養 5 +堅持 5 +堡 5 +場地 5 +壁畫 5 +壘 5 +外科 5 +大氣 5 +大西 5 +如下 5 +如今 5 +妻 5 +始終 5 +孔 5 +學名 5 +學會 5 +學科 5 +宇宙 5 +安裝 5 +官吏 5 +客戶 5 +客體 5 +宮廷 5 +家長 5 +容納 5 +宿舍 5 +察覺 5 +寫作 5 +專利 5 +專家 5 +對外 5 +對此 5 +少數 5 +展出 5 +展開 5 +岸 5 +工 5 +工具 5 +巴西 5 +市政 5 +席位 5 +年度 5 +底部 5 +廈門 5 +廖 5 +廣 5 +廣西 5 +建成 5 +引發 5 +弟弟 5 +得知 5 +微博 5 +心 5 +意思 5 +愛情 5 +感情 5 +感覺 5 +慈善 5 +態度 5 +慶祝 5 +成年 5 +成本 5 +成都 5 +戰國 5 +戰後 5 +房間 5 +手中 5 +手段 5 +托勒密 5 +找 5 +技能 5 +抗議 5 +抵抗 5 +抵達 5 +拜占庭 5 +持續 5 +指定 5 +指示 5 +掌握 5 +排名 5 +接管 5 +推進 5 +措施 5 +提到 5 +換 5 +撤銷 5 +收入 5 +收藏 5 +政務 5 +故宮 5 +教皇 5 +教習 5 +敵人 5 +文忠 5 +文獻 5 +斯 5 +新型 5 +新華 5 +新鮮 5 +方便 5 +方言 5 +施工 5 +旅行 5 +日期 5 +早年 5 +明治 5 +更加 5 +書中 5 +有的 5 +朝 5 +本片 5 +杜 5 +東海 5 +東西 5 +架構 5 +某種 5 +查爾斯 5 +查理 5 +柯林頓 5 +棉花 5 +棒球 5 +極 5 +榜 5 +構成 5 +樓梯 5 +機制 5 +機器 5 +次年 5 +欣賞 5 +歡迎 5 +正常 5 +正確 5 +武裝 5 +歸 5 +殺害 5 +每天 5 +民 5 +民兵 5 +氣體 5 +水果 5 +水系 5 +汞 5 +江西 5 +決策 5 +河北 5 +河南 5 +波音 5 +泥塑 5 +泰安 5 +泳兒 5 +洛桑 5 +洪 5 +海峽 5 +海底 5 +消息 5 +游擊 5 +湖北 5 +溫 5 +溫度 5 +溫泉 5 +滅絕 5 +演化 5 +演奏 5 +漢朝 5 +潘 5 +澤東 5 +澳 5 +濃度 5 +炎 5 +無關 5 +牌 5 +物品 5 +物業 5 +物體 5 +狗 5 +狩獵 5 +王子 5 +珊瑚 5 +現場 5 +現實 5 +甘 5 +生下 5 +生涯 5 +用作 5 +發送 5 +百 5 +直徑 5 +直至 5 +相 5 +真理 5 +眼 5 +督 5 +祖先 5 +神秘 5 +神聖 5 +秋 5 +移居 5 +程 5 +程序 5 +種植 5 +種類 5 +稱作 5 +空氣 5 +穿 5 +突變 5 +競賽 5 +符合 5 +第六 5 +簡單 5 +粵 5 +紅軍 5 +紐西蘭 5 +級別 5 +素 5 +細節 5 +組合 5 +結局 5 +編號 5 +練習 5 +總署 5 +繞 5 +美洲 5 +群島 5 +群眾 5 +耕地 5 +聯絡 5 +聲明 5 +肯定 5 +臺 5 +興奮 5 +興起 5 +般 5 +船上 5 +艘 5 +花 5 +華視 5 +落後 5 +藝人 5 +藥物 5 +蘇格蘭 5 +虎丘 5 +虛構 5 +融合 5 +血壓 5 +行業 5 +裝甲 5 +裝置 5 +裡面 5 +西遊 5 +觀 5 +解散 5 +設備 5 +診斷 5 +該地 5 +該屬 5 +認可 5 +認知 5 +認識 5 +誕生 5 +請 5 +象徵 5 +貝多芬 5 +財產 5 +貨車 5 +質量 5 +赤道 5 +赴 5 +超級 5 +越南 5 +趙國 5 +路易 5 +身亡 5 +軍團 5 +輪 5 +轉移 5 +轉變 5 +辭去 5 +辭職 5 +退出 5 +通車 5 +通道 5 +連 5 +連任 5 +連續 5 +進而 5 +進軍 5 +遠 5 +適當 5 +遭遇 5 +那裡 5 +邦 5 +郗 5 +郵政 5 +鄉鎮 5 +鄰近 5 +醒亞 5 +醫師 5 +鎊 5 +鎮壓 5 +鐵 5 +鑒 5 +長大 5 +長官 5 +長沙 5 +開設 5 +防禦 5 +陝西 5 +院長 5 +陸 5 +階層 5 +障礙 5 +隸屬 5 +難 5 +電梯 5 +電車 5 +青海 5 +預測 5 +預算 5 +預防 5 +領土 5 +頻率 5 +食品 5 +飲料 5 +飾 5 +首相 5 +馬來西亞 5 +馬達 5 +馮 5 +騎士 5 +體積 5 +體色 5 +黑人 5 +龐大 5 +9-9 4 +9.99億 4 +9.9億 4 +9.9萬 4 +B 4 +Casey 4 +County 4 +Google 4 +John 4 +M9 4 +NBA 4 +of 4 +』 4 +一世 4 +一半 4 +一旦 4 +三世 4 +上演 4 +上訴 4 +下令 4 +下頜 4 +不及 4 +不得 4 +不應 4 +不等 4 +不足 4 +世凱 4 +中東 4 +中止 4 +中華龍鳥 4 +中視 4 +丹羽 4 +主演 4 +乃 4 +久 4 +之上 4 +乘坐 4 +乘客 4 +乾燥 4 +乾隆 4 +了解 4 +予 4 +事變 4 +于 4 +五世 4 +亞軍 4 +交給 4 +交配 4 +交響 4 +亦為 4 +享年 4 +人事 4 +代言 4 +以西 4 +任職 4 +企圖 4 +伊拉克 4 +伺服 4 +供應 4 +依法 4 +侵蝕 4 +保加利亞 4 +保障 4 +信徒 4 +修復 4 +倫理 4 +做出 4 +停留 4 +價 4 +優惠 4 +優秀 4 +兄弟 4 +充電 4 +先進 4 +克拉克 4 +入口 4 +入選 4 +全面 4 +公 4 +公安 4 +公式 4 +共振 4 +其間 4 +具體 4 +冬天 4 +出場 4 +出戰 4 +出發 4 +出租 4 +出色 4 +刀 4 +分佈 4 +分成 4 +分期 4 +列表 4 +則天 4 +則為 4 +前期 4 +前線 4 +前進 4 +劇集 4 +劍 4 +加州 4 +加強 4 +勝利 4 +包 4 +包裝 4 +匈牙利 4 +區劃 4 +十五 4 +協定 4 +協調 4 +南側 4 +南延 4 +印第安 4 +危機 4 +原有 4 +原理 4 +參考 4 +參賽 4 +古物 4 +句 4 +只要 4 +各國 4 +各界 4 +合成 4 +合眾 4 +合金 4 +吉 4 +吉他 4 +同事 4 +同治 4 +名將 4 +名詞 4 +呎 4 +呢 4 +周年 4 +命運 4 +哈爾濱 4 +哥倫比亞 4 +商人 4 +啟用 4 +喬治亞 4 +單車 4 +嘲諷 4 +回來 4 +國防 4 +圓形 4 +地底 4 +地形 4 +地面 4 +坊 4 +基辛格 4 +堅決 4 +墓 4 +夏 4 +外星 4 +夜 4 +夢 4 +大同 4 +大帝 4 +大臣 4 +天皇 4 +夫婦 4 +失望 4 +妹妹 4 +姐姐 4 +姓氏 4 +委任 4 +婚姻 4 +婦女 4 +媽媽 4 +學堂 4 +官員 4 +定律 4 +宣稱 4 +實業 4 +實體 4 +寶貝 4 +小學 4 +少女 4 +尼龍 4 +局部 4 +展示 4 +屯門 4 +山區 4 +山頂 4 +岩 4 +島式 4 +島津 4 +嶺 4 +巡迴 4 +帶給 4 +常用 4 +幅度 4 +幫 4 +平方英里 4 +年齡 4 +幽默 4 +度假 4 +庫 4 +廚房 4 +廢除 4 +廷 4 +影像 4 +影業 4 +往 4 +很快 4 +很難 4 +後者 4 +得分 4 +得名 4 +得寵 4 +循環 4 +微 4 +徵召 4 +志願 4 +快 4 +怎麼 4 +性別 4 +性質 4 +恐龍 4 +患者 4 +情報 4 +情形 4 +情節 4 +情緒 4 +慕尼黑 4 +應該 4 +戀愛 4 +成人 4 +成分 4 +戰敗 4 +戰死 4 +扮演 4 +批判 4 +技巧 4 +抒情 4 +拓展 4 +招募 4 +指數 4 +按照 4 +挪威 4 +排列 4 +排水 4 +排行 4 +接收 4 +接替 4 +推薦 4 +推行 4 +揚州 4 +擔心 4 +擴大 4 +擴建 4 +擴張 4 +收到 4 +收錄 4 +改造 4 +攻克 4 +敖 4 +教區 4 +教宗 4 +教練 4 +整理 4 +數千 4 +數字 4 +文泰 4 +新疆 4 +新竹 4 +旅客 4 +既 4 +日常 4 +昆明 4 +明基 4 +星球 4 +星等 4 +春秋 4 +時段 4 +晉國 4 +晚間 4 +暗示 4 +暴力 4 +更換 4 +曼德拉 4 +最低 4 +最好 4 +最長 4 +有用 4 +服裝 4 +望遠 4 +木材 4 +本作 4 +本土 4 +本科 4 +本線 4 +本魚 4 +東區 4 +某 4 +校舍 4 +格 4 +案件 4 +楚國 4 +樂 4 +樂器 4 +標本 4 +樞紐 4 +模仿 4 +橄欖 4 +檔 4 +檢測 4 +欲 4 +歌 4 +正月 4 +此前 4 +此次 4 +步兵 4 +武術 4 +歷任 4 +死神 4 +殺死 4 +毀 4 +母 4 +每秒 4 +比例 4 +毫克 4 +毫米 4 +水深 4 +永江 4 +污泥 4 +沈 4 +沉澱 4 +沙灘 4 +河川 4 +油價 4 +治 4 +法案 4 +法蘭克 4 +法規 4 +波希米亞 4 +波斯 4 +注入 4 +洛杉磯 4 +洛陽 4 +流經 4 +浦 4 +海域 4 +海外 4 +海德堡 4 +海戰 4 +海水 4 +海灣 4 +海面 4 +液態 4 +液體 4 +深 4 +測量 4 +港鐵 4 +湯 4 +源 4 +滬 4 +滿貫 4 +潮濕 4 +濟南 4 +灣仔 4 +火災 4 +炸藥 4 +烏克蘭 4 +無意 4 +無線 4 +無錫 4 +照片 4 +營 4 +營業 4 +父 4 +爽 4 +牛奶 4 +牧場 4 +特遣 4 +特點 4 +犯罪 4 +狀元 4 +狂 4 +狙擊 4 +獎勵 4 +王后 4 +珍珠 4 +現今 4 +現金 4 +球會 4 +理事 4 +理想 4 +琉球 4 +瑪麗 4 +瓷器 4 +甘珠爾 4 +生化 4 +生意 4 +產地 4 +產量 4 +留 4 +畝 4 +當天 4 +當年 4 +當日 4 +當然 4 +疫苗 4 +癌症 4 +發育 4 +發言 4 +發酵 4 +皮膚 4 +監獄 4 +直 4 +直升 4 +直隸 4 +相比 4 +相近 4 +省份 4 +省委 4 +省級 4 +真相 4 +督察 4 +矩陣 4 +短暫 4 +短篇 4 +研發 4 +社團 4 +神廟 4 +神達 4 +票房 4 +租界 4 +種族 4 +稱號 4 +空調 4 +突然 4 +立即 4 +童 4 +競爭 4 +等級 4 +節日 4 +簽約 4 +粒 4 +精確 4 +紋理 4 +納粹 4 +純 4 +終止 4 +終結 4 +維吾爾 4 +網球 4 +緊密 4 +總量 4 +總長 4 +繼 4 +繼任 4 +罕見 4 +罪名 4 +置 4 +羅伯特 4 +羅馬尼亞 4 +義務 4 +習慣 4 +老闆 4 +考察 4 +考試 4 +聖 4 +聖母 4 +聲稱 4 +聲譽 4 +背景 4 +胡佛 4 +自動 4 +船隻 4 +艙 4 +艱難 4 +苦艾 4 +草本 4 +荷蘭 4 +莊 4 +莊園 4 +莫斯科 4 +華航 4 +落成 4 +著重 4 +董 4 +蒙扎 4 +蓉蓉 4 +薩摩 4 +蘇家 4 +蘋果 4 +蛋白 4 +蜘蛛 4 +血栓 4 +行省 4 +術語 4 +衛星 4 +製 4 +西側 4 +西曼 4 +親 4 +親王 4 +評價 4 +評定 4 +詞語 4 +試 4 +該劇 4 +該區 4 +詹姆斯 4 +誰 4 +課程 4 +談話 4 +請求 4 +論文 4 +識字 4 +警署 4 +議長 4 +讀者 4 +負 4 +財富 4 +財政 4 +貨幣 4 +貨物 4 +貨運 4 +費 4 +賀 4 +資本 4 +資深 4 +資金 4 +賈 4 +質 4 +購買 4 +贊助 4 +起義 4 +足 4 +身上 4 +身分 4 +躲避 4 +車序 4 +軍人 4 +軍力 4 +軍官 4 +軍閥 4 +較多 4 +較少 4 +較高 4 +輸入 4 +輻射 4 +輻鰭魚 4 +轄下 4 +轉換 4 +辦事 4 +辦法 4 +辦理 4 +農民 4 +逃往 4 +這麼 4 +週期 4 +進口 4 +進球 4 +進程 4 +遂 4 +遊行 4 +過度 4 +過枝 4 +遷移 4 +遼寧 4 +邊境 4 +邵 4 +部落 4 +郵票 4 +重視 4 +野生 4 +量子 4 +金字 4 +針對 4 +銅鑼 4 +鋼琴 4 +錯誤 4 +鏡片 4 +鏡面 4 +長子 4 +長江 4 +門診 4 +開 4 +開幕 4 +開闢 4 +關心 4 +防守 4 +阿拉伯 4 +院校 4 +陽光 4 +隊伍 4 +階 4 +隔離 4 +雕塑 4 +雨水 4 +電 4 +電力 4 +電台 4 +電磁 4 +電訊 4 +靜態 4 +靜脈 4 +非法 4 +靠近 4 +音 4 +順 4 +順位 4 +預期 4 +頭 4 +題 4 +願意 4 +風險 4 +颱風 4 +飛 4 +飼養 4 +餐 4 +餘下 4 +首領 4 +體內 4 +體長 4 +高架 4 +高溫 4 +鬥爭 4 +鳥類 4 +黃埔 4 +黑 4 +黑色 4 +黨籍 4 +鼓勵 4 +! 3 +9.9% 3 +9.999 3 +9999萬 3 +99多 3 +99餘 3 +Center 3 +Close 3 +GDP 3 +Game 3 +H9N9 3 +III 3 +James 3 +Mappy 3 +New 3 +PSP 3 +To 3 +You 3 +°C 3 +─ 3 +・ 3 +一同 3 +丁 3 +三十 3 +三江 3 +上游 3 +上環 3 +上表 3 +上課 3 +上面 3 +下列 3 +下台 3 +下場 3 +下級 3 +不但 3 +不想 3 +不敵 3 +不明 3 +不遠 3 +不韋 3 +世 3 +世宗 3 +丟失 3 +中古 3 +中子 3 +中將 3 +中期 3 +中西 3 +中轉 3 +中風 3 +丹佛 3 +丹尼士 3 +主任 3 +主動 3 +主唱 3 +主機 3 +主編 3 +主辦 3 +主體 3 +之內 3 +之時 3 +也好 3 +互聯 3 +五角 3 +些 3 +亞當 3 +亞目 3 +亞視 3 +交往 3 +京都 3 +亮度 3 +人性 3 +人次 3 +人生 3 +人身 3 +他人 3 +付出 3 +以往 3 +以為 3 +以致 3 +任內 3 +任教 3 +份子 3 +企鵝 3 +伊恩 3 +伊賀 3 +休息 3 +估計 3 +伸出 3 +似 3 +伽利略 3 +住戶 3 +住院 3 +佔有 3 +佛 3 +佛像 3 +佛學 3 +佛羅倫薩 3 +作霖 3 +併 3 +來自 3 +例子 3 +供奉 3 +供給 3 +依 3 +依賴 3 +俄亥俄 3 +俘虜 3 +保安 3 +保育 3 +保證 3 +信德 3 +修建 3 +修道 3 +個別 3 +個性 3 +倖存 3 +候選 3 +借用 3 +倪 3 +值 3 +值得 3 +偉大 3 +偏 3 +停 3 +停車 3 +備受 3 +傳奇 3 +傳教 3 +傳染 3 +傾向 3 +優異 3 +允許 3 +元洪 3 +光源 3 +光緒 3 +克里米亞 3 +兒女 3 +內陸 3 +全市 3 +全縣 3 +全體 3 +公國 3 +公尺 3 +公轉 3 +六十 3 +共計 3 +兵力 3 +兼任 3 +冊封 3 +冷 3 +凱撒 3 +出使 3 +出口 3 +出獄 3 +函數 3 +分散 3 +分行 3 +分裂 3 +分解 3 +切斷 3 +刊物 3 +列為 3 +利 3 +制定 3 +前後 3 +前鋒 3 +前面 3 +剛好 3 +創意 3 +劇團 3 +劇本 3 +劇目 3 +劇院 3 +劍橋 3 +力學 3 +加利福尼亞 3 +勞倫斯 3 +匈 3 +化工 3 +北冕 3 +北洋 3 +區別 3 +十七 3 +十多 3 +升 3 +升任 3 +升格 3 +南昌 3 +南海 3 +占 3 +印象 3 +即將 3 +卻是 3 +厘米 3 +原則 3 +原告 3 +原料 3 +友誼 3 +取 3 +受損 3 +叛亂 3 +口徑 3 +古城 3 +可惜 3 +台中 3 +史上 3 +各州 3 +各省 3 +同名 3 +同性 3 +同情 3 +名譽 3 +告訴 3 +周邊 3 +呼聲 3 +和也 3 +和約 3 +品種 3 +哥哥 3 +哲 3 +哺乳 3 +唱 3 +喜愛 3 +單一 3 +嘉賓 3 +器官 3 +噴泉 3 +嚴格 3 +四世 3 +回應 3 +回歸 3 +國泰 3 +國籍 3 +國軍 3 +圍繞 3 +園區 3 +土 3 +土壤 3 +在任 3 +在場 3 +地中 3 +地勢 3 +地獄 3 +地理 3 +坡 3 +報導 3 +場合 3 +塊 3 +塑造 3 +塘 3 +塞爾維亞 3 +填充 3 +填海 3 +填補 3 +境地 3 +墜毀 3 +士官 3 +壯 3 +壯觀 3 +夏天 3 +外來 3 +外界 3 +外部 3 +多達 3 +大夫 3 +大家 3 +大師 3 +大橋 3 +大權 3 +大致 3 +大賽 3 +大選 3 +天國 3 +天王 3 +天空 3 +太 3 +太小 3 +失業 3 +奇異 3 +奈米 3 +契約 3 +奧 3 +奪取 3 +女巫 3 +女王 3 +女神 3 +好評 3 +如何 3 +妃 3 +妨礙 3 +委派 3 +委託 3 +威力 3 +威尼斯 3 +威爾士 3 +威爾斯 3 +娃娃 3 +娶 3 +嫁給 3 +嫌疑 3 +嬌嬌 3 +子女 3 +孟席斯 3 +孫子 3 +學府 3 +宇 3 +安 3 +安德烈 3 +安徽 3 +安置 3 +宋朝 3 +完備 3 +完善 3 +完工 3 +完美 3 +宏 3 +宗 3 +官僚 3 +定 3 +宣傳 3 +宣告 3 +室內 3 +宰相 3 +家寶 3 +家裡 3 +家鄉 3 +富有 3 +富江 3 +寒冷 3 +實在 3 +實施 3 +封閉 3 +射入 3 +射擊 3 +專用 3 +尊 3 +對方 3 +對比 3 +導航 3 +小吃 3 +小堂 3 +小孩 3 +小平 3 +少 3 +尖 3 +就算 3 +尼山 3 +局面 3 +屈 3 +屋邨 3 +屠 3 +屯 3 +州長 3 +已婚 3 +已知 3 +巴哈伊 3 +巴斯 3 +布庫 3 +布袋 3 +師傅 3 +帶到 3 +帶走 3 +帶頭 3 +帽 3 +帽子 3 +平台 3 +平方呎 3 +平方英尺 3 +平民 3 +平衡 3 +年間 3 +幸福 3 +幹線 3 +幼 3 +幾何 3 +序列 3 +度過 3 +康 3 +庾 3 +延續 3 +延長 3 +建業 3 +弓毛 3 +引入 3 +引力 3 +引用 3 +弟子 3 +弱小 3 +強制 3 +強壯 3 +彈 3 +彈簧 3 +彭 3 +彰化 3 +影視 3 +往來 3 +往後 3 +征服 3 +待 3 +待遇 3 +很好 3 +很高 3 +後人 3 +後方 3 +後衛 3 +後面 3 +徑 3 +徒步 3 +復 3 +復工 3 +復辟 3 +徵收 3 +德克薩斯 3 +德川 3 +德意志 3 +德綱 3 +徹底 3 +心情 3 +必然 3 +必要 3 +忽略 3 +思潮 3 +怡和 3 +急速 3 +性格 3 +怪物 3 +怪獸 3 +恩來 3 +悠久 3 +情書 3 +想到 3 +想法 3 +愛上 3 +愛國 3 +愛達荷 3 +感應 3 +慢慢 3 +憑藉 3 +憤怒 3 +懷疑 3 +懸崖 3 +成份 3 +成千上萬 3 +成果 3 +戒毒 3 +截止 3 +戰俘 3 +戰時 3 +戰艦 3 +戲劇 3 +戶 3 +房地產 3 +房子 3 +手下 3 +扎維耶 3 +扭曲 3 +扶手 3 +承受 3 +承擔 3 +投手 3 +抗戰 3 +抵擋 3 +拆除 3 +拉丁 3 +拯救 3 +持 3 +指令 3 +挑戰 3 +挺 3 +捐助 3 +捕捉 3 +捷克 3 +授 3 +排出 3 +探討 3 +接任 3 +接唱 3 +控 3 +提議 3 +換乘 3 +損失 3 +損害 3 +搬到 3 +撞擊 3 +播映 3 +撰寫 3 +擔當 3 +據說 3 +擴充 3 +支付 3 +支撐 3 +支流 3 +收回 3 +收拾 3 +收購 3 +改制 3 +改善 3 +改稱 3 +改進 3 +攻打 3 +放射 3 +故障 3 +救 3 +敘述 3 +教養 3 +文人 3 +文件 3 +料理 3 +斯里蘭卡 3 +新增 3 +新建 3 +新教 3 +新村 3 +新羅 3 +旁遮普 3 +族群 3 +日內瓦 3 +日後 3 +日間 3 +旨 3 +明星 3 +明珠 3 +明納努 3 +昏迷 3 +易 3 +星光 3 +星際 3 +星雲 3 +映射 3 +昭和 3 +是否 3 +時機 3 +時空 3 +晚 3 +晚年 3 +晨興 3 +普選 3 +景德 3 +景點 3 +晶體 3 +暗 3 +暨 3 +暫時 3 +暴動 3 +更為 3 +書店 3 +曼聯 3 +替 3 +替代 3 +替換 3 +最佳 3 +最為 3 +會堂 3 +月氏 3 +月球 3 +有利 3 +有機 3 +有權 3 +有趣 3 +服役 3 +服用 3 +朝日 3 +期望 3 +木板 3 +本來 3 +村民 3 +杜蘭戈 3 +杰 3 +東側 3 +東港 3 +東面 3 +板塊 3 +柏立基 3 +某個 3 +栃木 3 +校名 3 +核電 3 +栽培 3 +栽種 3 +桃 3 +桃園 3 +桃浦 3 +梅 3 +梅妃 3 +梅莉迪絲 3 +條例 3 +極度 3 +極端 3 +概念 3 +概率 3 +榮 3 +榮聲 3 +槍 3 +槍手 3 +樂曲 3 +樂章 3 +樊 3 +模擬 3 +機率 3 +檢察 3 +檸檬 3 +權力 3 +權勢 3 +權益 3 +次子 3 +次日 3 +歌劇 3 +正義 3 +正選 3 +步槍 3 +步道 3 +死傷 3 +死去 3 +毀滅 3 +比起 3 +民進 3 +氣壓 3 +氣泡 3 +氧化 3 +氧氣 3 +水上 3 +水域 3 +水塔 3 +水族 3 +水溝 3 +水稻 3 +永遠 3 +求救 3 +江南 3 +江孜 3 +污水 3 +決議 3 +沒收 3 +沖 3 +沙烏地阿拉伯 3 +油脂 3 +沼澤 3 +沿著 3 +法人 3 +法學 3 +法官 3 +波動 3 +波士頓 3 +波長 3 +泰國 3 +洋房 3 +洋行 3 +洗浴 3 +活佛 3 +活力 3 +流動 3 +流感 3 +流量 3 +海上 3 +海珊 3 +消滅 3 +淋巴 3 +淘汰 3 +淡水 3 +清代 3 +渝 3 +港口 3 +湖水 3 +湯瑪斯 3 +準則 3 +溥儀 3 +溫帶 3 +溶解 3 +滉 3 +滑冰 3 +漂亮 3 +漢城 3 +漳州 3 +潛入 3 +火箭 3 +災難 3 +為期 3 +無數 3 +煙草 3 +照相 3 +煩惱 3 +熱庫 3 +熱能 3 +爬行 3 +爭 3 +爲 3 +牆壁 3 +牛 3 +牛津 3 +物資 3 +特化 3 +狹窄 3 +獎學 3 +獎項 3 +獲利 3 +獲取 3 +獵食 3 +獻給 3 +率領 3 +王室 3 +珠海 3 +班納蒂克 3 +現任 3 +球場 3 +理解 3 +瑞草 3 +環 3 +生前 3 +生成 3 +生殖 3 +產下 3 +用品 3 +用戶 3 +用法 3 +用途 3 +田 3 +男女 3 +男孩 3 +畢 3 +畫作 3 +異常 3 +當今 3 +當作 3 +當初 3 +疑問 3 +病故 3 +瘋狂 3 +登上 3 +登基 3 +登場 3 +登陸 3 +發揮 3 +發源 3 +白人 3 +百科 3 +皇 3 +皇室 3 +盟友 3 +盟旗 3 +監管 3 +目錄 3 +直系 3 +直線 3 +直選 3 +相反 3 +相機 3 +相遇 3 +真人 3 +真武 3 +眼睛 3 +睡蓮 3 +瞭解 3 +知情 3 +短尾貓 3 +短短 3 +破曉 3 +破產 3 +碎片 3 +碳 3 +碳化 3 +確保 3 +確立 3 +社群 3 +祖父 3 +神父 3 +票價 3 +福 3 +福島 3 +福斯 3 +禮 3 +禮儀 3 +禮拜 3 +秀 3 +科系 3 +科隆 3 +租借 3 +租賃 3 +種種 3 +積極 3 +窯瓷 3 +立陶宛 3 +章 3 +童話 3 +競選 3 +竹子 3 +第八 3 +第十 3 +第十三 3 +筆 3 +算 3 +管道 3 +箱 3 +節慶 3 +節省 3 +籍 3 +精通 3 +精選 3 +糧食 3 +紅磡 3 +紅色 3 +紛爭 3 +素貞 3 +紡織 3 +索馬利亞 3 +細小 3 +終點 3 +組建 3 +組裝 3 +結成 3 +維京 3 +維吉爾 3 +維基 3 +維多利亞 3 +編劇 3 +總共 3 +總數 3 +總結 3 +總體 3 +繪製 3 +繼位 3 +纖維 3 +缺席 3 +缺點 3 +置富 3 +羊肉 3 +美利堅 3 +翁 3 +老鼠 3 +考 3 +考古 3 +考驗 3 +而非 3 +耶穌 3 +聖誕 3 +聖靈 3 +聘請 3 +聚會 3 +聯手 3 +聯軍 3 +聲勢 3 +職位 3 +股價 3 +股票 3 +胡安 3 +膜 3 +自傳 3 +自我 3 +自稱 3 +自身 3 +自願 3 +至少 3 +致力 3 +致命 3 +臺南 3 +臼齒 3 +舞蹈 3 +航海 3 +航程 3 +船員 3 +艾女 3 +艾滋 3 +芭比 3 +英九 3 +范 3 +茶葉 3 +草食 3 +莽 3 +華格納 3 +菲利普斯 3 +萊姆 3 +萊茵 3 +著稱 3 +蒙大拿 3 +蒙特內哥羅 3 +蒸汽 3 +蓬勃 3 +薩魯曼 3 +藍 3 +藍色 3 +藤 3 +藥 3 +藩 3 +蘇州 3 +虎鯨 3 +蜀 3 +衍生 3 +衙門 3 +衛 3 +衛視 3 +衝擊 3 +衣服 3 +表 3 +裁判 3 +裏 3 +補給 3 +裝 3 +裝飾 3 +複合 3 +複製 3 +西安 3 +西洋 3 +西湖 3 +西納 3 +西門 3 +西關 3 +西面 3 +見到 3 +規格 3 +視頻 3 +親自 3 +計畫 3 +記憶 3 +評估 3 +評審 3 +該寺 3 +該書 3 +該校 3 +該站 3 +該鎮 3 +誠實 3 +誤認 3 +說明 3 +課室 3 +諷刺 3 +諸多 3 +謀殺 3 +謂 3 +謝 3 +證實 3 +識別 3 +護照 3 +譽 3 +讀書 3 +變形 3 +變數 3 +變體 3 +讚賞 3 +象 3 +貝爾 3 +貴妃 3 +貴州 3 +買 3 +買家 3 +費德勒 3 +費雪 3 +資助 3 +賓夕法尼亞 3 +賢 3 +賦予 3 +走廊 3 +起訴 3 +越 3 +足協 3 +距 3 +路徑 3 +跳 3 +踢 3 +身邊 3 +車體 3 +較大 3 +較長 3 +輔助 3 +輔導 3 +輔政 3 +輸出 3 +轄 3 +轄區 3 +轉乘 3 +轉到 3 +轉投 3 +轉讓 3 +農 3 +近期 3 +迫 3 +迫使 3 +追逐 3 +退休 3 +逃離 3 +逐步 3 +通信 3 +通用 3 +通行 3 +速食 3 +逢 3 +連環 3 +連線 3 +逮捕 3 +週 3 +逾 3 +遇見 3 +遊樂 3 +運營 3 +過渡 3 +道光 3 +達也 3 +違法 3 +遠航 3 +適應 3 +遷徙 3 +選 3 +選手 3 +選秀 3 +遺體 3 +邊界 3 +那裏 3 +邦聯 3 +都市 3 +鄉議 3 +配 3 +配樂 3 +配置 3 +酗酒 3 +酸 3 +釀酒 3 +釋放 3 +重傷 3 +重整 3 +金庫 3 +金庸 3 +金鐘 3 +銅 3 +鋼 3 +錄 3 +錄音 3 +鍵 3 +鎳 3 +鐵人 3 +長安 3 +長州 3 +長相 3 +長遠 3 +開播 3 +關節 3 +阿兒法 3 +阿拉斯加 3 +阿根廷 3 +阿爾卑斯 3 +附屬 3 +降 3 +降落 3 +陣營 3 +除外 3 +陵 3 +陸地 3 +陸續 3 +際 3 +隱藏 3 +隱語 3 +雅典 3 +雌雄 3 +雙立 3 +雜技 3 +難度 3 +雪梨 3 +雪莉 3 +雲 3 +零 3 +零售 3 +雷睦斯 3 +靈 3 +青島 3 +鞏固 3 +音頻 3 +頂層 3 +順利 3 +預先 3 +頭銜 3 +頻譜 3 +題寫 3 +額 3 +風 3 +食 3 +飲食 3 +飾演 3 +首任 3 +首演 3 +馬來 3 +馬來亞 3 +馬德里 3 +驅動 3 +驅逐 3 +體操 3 +高低槓 3 +高原 3 +高層 3 +高山 3 +高麗 3 +鳳山 3 +麥田 3 +黃金 3 +黑子 3 +黑斑 3 +黑洞 3 +黛比 3 +黨員 3 +龍 3 +龍馬 3 +$ 2 +' 2 +... 2 +...... 2 +9.9999999 2 +99%-99% 2 +99.9億 2 +999.9 2 +999.99999 2 +999.9999999999999 2 +999.99億 2 +9999.9 2 +9999/99 2 +9999多 2 +9999餘 2 +99:99 2 +99A 2 +99° 2 +9:99 2 +9D 2 +9億9千9百萬 2 +9百萬 2 +9萬 2 +AAC 2 +ABC 2 +AI 2 +Aldridge 2 +Arts 2 +BBC 2 +Before 2 +Boy 2 +C 2 +DC-99 2 +DJ 2 +DNA 2 +E9 2 +E99 2 +Europipe 2 +Eve 2 +F-99A 2 +FC 2 +Finn 2 +GCMG 2 +Gravion 2 +Hall 2 +II 2 +Jason 2 +Jean 2 +K 2 +Karin 2 +L 2 +La 2 +Lee 2 +Live 2 +M9999 2 +N999 2 +NASA 2 +NDS 2 +NET 2 +Nicea 2 +OROCHI 2 +PVC 2 +Phillips 2 +Rivers 2 +Robert 2 +S 2 +Station 2 +Strait 2 +TVB 2 +U99 2 +UA 2 +V 2 +Winston 2 +Wyclef 2 +XII 2 +and 2 +de 2 +iPod 2 +km/h 2 +silver 2 +the 2 +‧ 2 +〈 2 +〉 2 +一中 2 +一共 2 +一千 2 +一向 2 +一度 2 +一手 2 +一提 2 +一貫 2 +一面 2 +七喜 2 +三棟屋 2 +三氯化金 2 +三藏 2 +上下車 2 +上任 2 +上佳 2 +上午 2 +上吊 2 +上將 2 +上層 2 +上方 2 +上校 2 +上街 2 +下去 2 +下層 2 +下屬 2 +下旬 2 +下水 2 +下海 2 +下游 2 +下野 2 +不一 2 +不停 2 +不再 2 +不列顛 2 +不受 2 +不夠 2 +不如 2 +不宜 2 +不已 2 +不幸 2 +不法 2 +不清 2 +不用 2 +不管 2 +不良 2 +不論 2 +不變 2 +不錯 2 +不需 2 +不願 2 +丐幫 2 +世俗 2 +世博 2 +世卿 2 +世襲 2 +世錦 2 +丘陵 2 +中區 2 +中午 2 +中天 2 +中巴 2 +中正 2 +中途 2 +中道 2 +中遠 2 +主上 2 +主力 2 +主因 2 +主場 2 +主權 2 +主管 2 +主線 2 +乘船 2 +乘車 2 +乙級 2 +九一八 2 +九州 2 +九巴 2 +也有 2 +也許 2 +乳酪 2 +事後 2 +二十六 2 +二甘醇 2 +互動 2 +五四 2 +五峰 2 +五百 2 +井 2 +亞冠 2 +亞利桑那 2 +亞得里亞 2 +交 2 +交互 2 +交到 2 +交匯 2 +交好 2 +交情 2 +交戰 2 +交手 2 +交趾 2 +人力 2 +人心 2 +人才 2 +人文 2 +人格 2 +人熙 2 +人群 2 +人間 2 +人魚 2 +仁慈 2 +仁記 2 +今年 2 +介乎 2 +介石 2 +仍舊 2 +付款 2 +仙 2 +仙劍 2 +仙女 2 +以南 2 +以東 2 +以至 2 +任城 2 +任天堂 2 +任意 2 +份額 2 +仿 2 +伊比利亞 2 +伏威 2 +休閒 2 +伯公 2 +伯爵 2 +伯靈頓 2 +伴隨 2 +似乎 2 +低地 2 +低廉 2 +低溫 2 +住房 2 +佐土原 2 +佐藤 2 +佛山 2 +佛朗明哥 2 +佛殿 2 +作好 2 +作業 2 +作物 2 +佩劍 2 +併入 2 +使命 2 +使者 2 +使館 2 +來訪 2 +例 2 +例外 2 +供暖 2 +供熱 2 +供職 2 +侵 2 +侵入 2 +侵犯 2 +便宜 2 +促使 2 +促成 2 +俗稱 2 +保有 2 +保級 2 +保羅 2 +保衛 2 +信心 2 +信義 2 +信長 2 +信雄 2 +修士 2 +修理 2 +修習 2 +修訂 2 +修鍊 2 +個案 2 +倒台 2 +倒掛 2 +候鳥 2 +倡導 2 +倫 2 +假如 2 +假期 2 +假髮 2 +偏差 2 +停戰 2 +停滯 2 +偶然 2 +偶爾 2 +偽造 2 +傑作 2 +備 2 +催化 2 +傳入 2 +傳到 2 +傳動 2 +傳媒 2 +傳導 2 +傳授 2 +傳聞 2 +傳言 2 +傳送 2 +傳達 2 +債務 2 +傷 2 +傾聽 2 +僅僅 2 +僱員 2 +儀錶 2 +儒家 2 +優先 2 +儲備 2 +元代 2 +元件 2 +元帥 2 +元年 2 +元洲 2 +元璋 2 +元甲 2 +元首 2 +充斥 2 +充當 2 +兆帕 2 +先知 2 +先行 2 +先驅 2 +光線 2 +光譜 2 +光軸 2 +克基拉 2 +克用 2 +克隆 2 +免職 2 +入伍 2 +入圍 2 +入學 2 +入獄 2 +入讀 2 +入門 2 +內務 2 +內外 2 +內心 2 +內流 2 +全新 2 +全日 2 +全校 2 +全權 2 +全能 2 +全身 2 +八一 2 +八百餘 2 +公學 2 +公寓 2 +公署 2 +公認 2 +兵營 2 +其父 2 +具備 2 +典禮 2 +再造 2 +冬季 2 +冰兄 2 +冰峰 2 +冰川 2 +冰雪 2 +凡 2 +凱特 2 +凱瑞 2 +出入 2 +出入口 2 +出家 2 +出席 2 +出演 2 +出產 2 +出賽 2 +出道 2 +分享 2 +分化 2 +分區 2 +分手 2 +分擔 2 +分歧 2 +分隊 2 +刊載 2 +列傳 2 +列出 2 +初學 2 +初年 2 +初稿 2 +初級 2 +初賽 2 +判斷 2 +判處 2 +別 2 +別列佐夫斯基 2 +利比亞 2 +利物浦 2 +利特維年科 2 +到來 2 +到底 2 +制止 2 +制裁 2 +制訂 2 +刺 2 +刺客 2 +刺死 2 +刻 2 +刻有 2 +削弱 2 +前任 2 +前來 2 +前妻 2 +前途 2 +剝奪 2 +剩下 2 +副本 2 +創 2 +創始 2 +創新 2 +創業 2 +劃入 2 +劃給 2 +劇烈 2 +劍術 2 +劍齒虎 2 +功 2 +功率 2 +加之 2 +加勒比 2 +加堆 2 +加重 2 +劣勢 2 +助戰 2 +勒格里 2 +勒沃 2 +動機 2 +動脈 2 +動車 2 +勝出 2 +勳 2 +勳章 2 +勳銜 2 +勾引 2 +包圍 2 +包廂 2 +包衣 2 +匕首 2 +化纖 2 +化身 2 +北宋 2 +北平 2 +北方 2 +北端 2 +北約 2 +北道 2 +北齊 2 +匯率 2 +區分 2 +十一世 2 +十三 2 +十六 2 +升學 2 +半山 2 +半球 2 +協商 2 +協奏 2 +協約 2 +南下 2 +南山 2 +南斯拉夫 2 +南遣 2 +南邊 2 +南陽 2 +南非 2 +南面 2 +南韓 2 +博弈 2 +博彩 2 +博恩 2 +占卜 2 +卡梅隆 2 +卡片 2 +印 2 +印加 2 +印尼 2 +印製 2 +即位 2 +即時 2 +即興 2 +卷 2 +卿 2 +卿雲 2 +厄運 2 +原名 2 +原址 2 +原聲 2 +去除 2 +參觀 2 +參選 2 +又是 2 +又稱 2 +及格 2 +友好 2 +反叛 2 +反抗 2 +反擊 2 +叔叔 2 +取決 2 +受審 2 +受益 2 +受體 2 +口中 2 +口述 2 +古巴 2 +古柯鹼 2 +古蹟 2 +召喚 2 +可汗 2 +史學 2 +史密斯 2 +史提夫 2 +史蒂芬 2 +右岸 2 +司機 2 +司長 2 +司鼓 2 +吃肉 2 +吃飯 2 +各式 2 +各式各樣 2 +各級 2 +各自 2 +各部 2 +合同 2 +合川 2 +合稱 2 +合葬 2 +吉布斯 2 +吉林 2 +吉里巴斯 2 +同人 2 +同居 2 +同體 2 +名人 2 +名利 2 +名古屋 2 +名縉 2 +名鎮 2 +向量 2 +君 2 +君王 2 +吞併 2 +否則 2 +否定 2 +告別 2 +告知 2 +告終 2 +周歲 2 +味 2 +呼叫 2 +呼籲 2 +和解 2 +和談 2 +咬金 2 +品行 2 +哈里發 2 +哥斯大黎加 2 +哥本哈根 2 +哥特 2 +哪裡 2 +售賣 2 +唯有 2 +唯美 2 +問 2 +啟動 2 +啟睿 2 +啟航 2 +啟蒙 2 +善化 2 +善意 2 +喉嚨 2 +喜劇 2 +喝 2 +喪生 2 +喬伊斯 2 +喬艾爾 2 +單元 2 +單曲 2 +喻 2 +嘉慶 2 +嘉玲 2 +器物 2 +噪音 2 +噴氣 2 +嚴密 2 +囚禁 2 +四分之一 2 +四十 2 +回國 2 +回想 2 +回憶 2 +回收 2 +國代 2 +國外 2 +國寶 2 +國徽 2 +國璋 2 +國語 2 +國鋒 2 +圍攻 2 +園藝 2 +圓頂 2 +圖樣 2 +圖畫 2 +團結 2 +團聚 2 +團長 2 +在位 2 +在來 2 +地外 2 +地帶 2 +坐診 2 +型態 2 +埃 2 +埃米莉 2 +城中 2 +城子 2 +域名 2 +執導 2 +執掌 2 +執教 2 +執法 2 +基底 2 +堂區 2 +堅 2 +堅固 2 +堅強 2 +報紙 2 +場場 2 +塞普勒斯 2 +境外 2 +墓地 2 +墓室 2 +增多 2 +增建 2 +增強 2 +增設 2 +墮胎 2 +壓倒 2 +壓強 2 +壓迫 2 +士 2 +壯大 2 +壯年 2 +夏伊 2 +夏季 2 +外傳 2 +外圍 2 +外在 2 +外援 2 +外觀 2 +外資 2 +多倫多 2 +多半 2 +多少 2 +夜晚 2 +夠 2 +夥伴 2 +大亂 2 +大佛 2 +大公 2 +大力 2 +大勝 2 +大半 2 +大堂 2 +大妃 2 +大將 2 +大屋 2 +大廳 2 +大批 2 +大敗 2 +大槍 2 +大火 2 +大碟 2 +大笨 2 +大街 2 +大衛 2 +大連 2 +大阪 2 +天地 2 +天子 2 +天師 2 +天敵 2 +天氣 2 +天衣 2 +天雷 2 +太古 2 +太多 2 +太大 2 +太子 2 +太守 2 +太祖 2 +夸脫 2 +奉天 2 +契合 2 +奢侈 2 +奧布賴恩 2 +奧斯曼 2 +奧朗則布 2 +奧林匹克 2 +奪冠 2 +女士 2 +女孩 2 +女皇 2 +妖精 2 +妖魔 2 +妥善 2 +姊妹 2 +始皇 2 +姐妹 2 +姐弟 2 +姑家 2 +姓 2 +姓名 2 +姚 2 +姜 2 +姿態 2 +威斯康辛 2 +威爾遜 2 +娘舅 2 +婆婆 2 +嫉妒 2 +子夜 2 +子珍 2 +孔子 2 +字元 2 +字型 2 +字體 2 +存有 2 +存活 2 +孟能 2 +季前 2 +季軍 2 +孤僻 2 +孤獨 2 +孵化 2 +學制 2 +學問 2 +學士 2 +學年 2 +學期 2 +學童 2 +學系 2 +學費 2 +宇一郎 2 +守衛 2 +安修 2 +安息 2 +安打 2 +安東尼 2 +安菲特裡忒 2 +安邑 2 +完 2 +完整 2 +宏觀 2 +宗室 2 +官職 2 +定下 2 +定名 2 +定型 2 +定期 2 +客 2 +客串 2 +客人 2 +客室 2 +客機 2 +客車 2 +宣戰 2 +害怕 2 +家久 2 +家境 2 +家屬 2 +家產 2 +家衛 2 +家貓 2 +寄宿 2 +密切 2 +密蘇里 2 +富 2 +富人 2 +富特 2 +實力 2 +實務 2 +實用 2 +實習 2 +審 2 +審判 2 +審查 2 +寫道 2 +寬廣 2 +寬頻 2 +寬鬆 2 +寶石 2 +寺廟 2 +寺院 2 +封神 2 +封面 2 +射殺 2 +專區 2 +專員 2 +專有 2 +專題 2 +尉 2 +尊嚴 2 +尊重 2 +尋常 2 +對峙 2 +對待 2 +對陣 2 +小兒 2 +小姐 2 +小心 2 +小桃 2 +小梅 2 +小鎮 2 +小閻 2 +小青 2 +就任 2 +就業 2 +尺 2 +尼克森 2 +尼羅 2 +尼西亞 2 +尼采 2 +尾部 2 +局限 2 +居 2 +居委 2 +居里 2 +屋大維 2 +屋苑 2 +展 2 +展館 2 +履仁 2 +屬名 2 +山丘 2 +山坡 2 +山海 2 +岩石 2 +岳母 2 +岳父 2 +崇拜 2 +崔西 2 +崖 2 +嵌 2 +嶺南 2 +嶽麓 2 +川 2 +工兵 2 +工商 2 +工農 2 +工黨 2 +左上 2 +左側 2 +巧眉 2 +巧言 2 +差距 2 +差點 2 +巴克特里亞 2 +巴勒斯坦 2 +巴哈歐拉 2 +巴格曼 2 +巴格達 2 +巴洛克 2 +巴爾幹 2 +巴納德 2 +巷 2 +市值 2 +市內 2 +市商 2 +市郊 2 +市長 2 +布卡 2 +布拉格 2 +布朗 2 +布爾薩 2 +布魯明頓 2 +希羅 2 +帕洛馬 2 +帛琉 2 +帶去 2 +帶有 2 +常務 2 +常年 2 +常德 2 +常春藤葉 2 +常規 2 +幫忙 2 +干擾 2 +干涉 2 +干預 2 +平 2 +平安 2 +平息 2 +平成 2 +平方尺 2 +平時 2 +平頂 2 +年初 2 +年紀 2 +年譜 2 +幼體 2 +床墊 2 +序數 2 +底層 2 +店鋪 2 +度母 2 +座堂 2 +庫夫 2 +庫容 2 +庭 2 +康乃爾 2 +康復 2 +廉租 2 +廠房 2 +廢 2 +廢墟 2 +廢止 2 +廣安 2 +廣義 2 +延任 2 +延遲 2 +建有 2 +建銘 2 +引種 2 +引退 2 +引進 2 +弗朗索瓦 2 +強風 2 +彈奏 2 +彈性 2 +彌迦 2 +彙集 2 +彩色 2 +影展 2 +往返 2 +征 2 +征戰 2 +很近 2 +後端 2 +後裔 2 +徒刑 2 +得票 2 +得道 2 +從小 2 +從而 2 +從軍 2 +御苑 2 +微山 2 +德州 2 +德瑞克 2 +德輔 2 +心中 2 +必 2 +志剛 2 +快樂 2 +忽必烈 2 +思念 2 +思明 2 +思科 2 +性交 2 +恆鳳 2 +恐慌 2 +恥辱 2 +恩 2 +恩寵 2 +恩賜 2 +悅強 2 +悲觀 2 +情意 2 +惠 2 +惠山 2 +愈 2 +愉景 2 +意志 2 +意願 2 +愛因斯坦 2 +愛德華 2 +愛惜 2 +感動 2 +感受 2 +感染 2 +慈幼 2 +慈鯛 2 +態 2 +慘敗 2 +慣例 2 +慶尚 2 +慶豐 2 +慾望 2 +憎恨 2 +憑 2 +應對 2 +懊惱 2 +懷俄明 2 +懷舊 2 +懸浮 2 +懸索 2 +成仙 2 +成傑 2 +成因 2 +成型 2 +成就 2 +成群 2 +成貓 2 +戰亂 2 +戰列 2 +戰場 2 +戰士 2 +戰線 2 +戰術 2 +戴 2 +戴麟趾 2 +房 2 +手冊 2 +手動 2 +手機 2 +手裡 2 +才能 2 +打工 2 +打敗 2 +打破 2 +打開 2 +托爾斯 2 +托爾斯泰 2 +扶植 2 +找出 2 +找回 2 +找尋 2 +承諾 2 +抄襲 2 +抓 2 +抓住 2 +投影 2 +投降 2 +抗擊 2 +抽取 2 +拆穿 2 +拆解 2 +拉斐爾 2 +拉格 2 +拔出 2 +拖 2 +拖延 2 +招商 2 +招股 2 +拷貝 2 +拼音 2 +拿 2 +拿到 2 +拿破崙 2 +拿走 2 +指引 2 +指控 2 +指涉 2 +按鍵 2 +挖角 2 +挽救 2 +捐贈 2 +捕 2 +捕獲 2 +捕食 2 +捷運 2 +掉 2 +排 2 +排放 2 +排氣 2 +排演 2 +掛架 2 +掠過 2 +掠食 2 +採訪 2 +接待 2 +接掌 2 +接種 2 +接駁 2 +控球 2 +推廣 2 +推翻 2 +推選 2 +描寫 2 +提倡 2 +提及 2 +提示 2 +插圖 2 +揚聲 2 +換入 2 +換股 2 +損傷 2 +損毀 2 +搞笑 2 +搭檔 2 +搶險 2 +摩根 2 +摩爾 2 +撤出 2 +撤軍 2 +播 2 +播客 2 +擅長 2 +擊 2 +擊退 2 +擒抱 2 +擔負 2 +據守 2 +擺脫 2 +擾動 2 +支出 2 +支柱 2 +收 2 +收復 2 +收發 2 +收穫 2 +收視 2 +收集 2 +改回 2 +改寫 2 +改建 2 +改版 2 +改良 2 +改裝 2 +攻佔 2 +攻陷 2 +放映 2 +放置 2 +政協 2 +政變 2 +政黨 2 +故意 2 +故此 2 +故鄉 2 +效忠 2 +效率 2 +敏 2 +敏感 2 +敗給 2 +教友 2 +教員 2 +教徒 2 +教派 2 +教科文 2 +整修 2 +整套 2 +整體 2 +敵對 2 +數位 2 +數十 2 +數理 2 +數目 2 +文元 2 +文官 2 +文帝 2 +文康 2 +文英 2 +文華 2 +文革 2 +斐濟 2 +斥資 2 +斯圖爾特 2 +斯大林 2 +斯氏星蟒 2 +斯洛維尼亞 2 +斯特勒謝尼 2 +斯理 2 +新宿 2 +新岩 2 +新曲 2 +新澤西 2 +新田 2 +新興 2 +斷裂 2 +方位 2 +方針 2 +施行 2 +旁 2 +旁邊 2 +旅鴿 2 +旋律 2 +日喀則 2 +日本龍 2 +日益 2 +日航 2 +日行 2 +早上 2 +旺山 2 +旺盛 2 +昆士蘭 2 +昌 2 +明帝 2 +易名 2 +昔日 2 +星形 2 +春天 2 +春日 2 +是否是 2 +時尚 2 +時速 2 +晉升 2 +晚上 2 +晚會 2 +普 2 +普及 2 +普陀 2 +景 2 +景帝 2 +景觀 2 +景象 2 +智慧 2 +暑假 2 +暗殺 2 +暢銷 2 +暫停 2 +暫緩 2 +暴露 2 +曝氣 2 +更好 2 +更改 2 +更深 2 +更高 2 +書信 2 +書寫 2 +書房 2 +書法 2 +最久 2 +最小 2 +最少 2 +最新 2 +最遊 2 +會員 2 +會場 2 +會社 2 +會談 2 +會長 2 +月刊 2 +有助 2 +有意 2 +有毒 2 +有罪 2 +服 2 +服從 2 +朔日 2 +朝代 2 +木星 2 +木管 2 +末年 2 +末期 2 +本區 2 +本屆 2 +本班 2 +本站 2 +本質 2 +本願 2 +朴 2 +村落 2 +村頭 2 +束縛 2 +杭 2 +東亞 2 +東吳 2 +東山 2 +東征 2 +東晉 2 +東正 2 +東視 2 +松潘 2 +松鼠猴 2 +林庄 2 +果實 2 +果汁 2 +架設 2 +柏油 2 +染色 2 +柔佛 2 +柔弱 2 +查 2 +查德 2 +柯 2 +柱 2 +柳 2 +柳江 2 +柴油 2 +柴灣 2 +校內 2 +校隊 2 +核能 2 +根本 2 +格式 2 +格林 2 +格林維爾 2 +格格 2 +格檔 2 +格里高利 2 +桃太洛斯 2 +桌面 2 +桑葚 2 +棕熊 2 +棣 2 +植被 2 +楊樹 2 +業者 2 +極大 2 +極性 2 +極高 2 +榮獲 2 +樁 2 +樂農 2 +標語 2 +標題 2 +樞機 2 +樟湖 2 +模具 2 +樣本 2 +樹木 2 +橙 2 +機動 2 +機員 2 +機槍 2 +橡膠 2 +橫山 2 +橫濱 2 +橫跨 2 +檢索 2 +檢討 2 +權威 2 +權貴 2 +次數 2 +次級 2 +次要 2 +次郎 2 +次長 2 +欺騙 2 +歌仔 2 +歌唱 2 +歌聲 2 +歌迷 2 +正是 2 +正直 2 +正統 2 +正面 2 +此人 2 +此案 2 +此物 2 +此種 2 +此線 2 +此舉 2 +此類 2 +步態 2 +武大 2 +武昌 2 +武松 2 +歧視 2 +歸類 2 +死靈 2 +殘存 2 +殘忍 2 +殘酷 2 +殯葬 2 +殺傷 2 +殺掉 2 +每位 2 +每周 2 +每層 2 +每日 2 +每次 2 +毒性 2 +毒殺 2 +毒藥 2 +毗鄰 2 +毛利 2 +民不聊生 2 +民調 2 +民都洛水牛 2 +氣 2 +氣田 2 +氧 2 +氫彈 2 +氯金酸 2 +水孔 2 +水手 2 +水準 2 +水溫 2 +水滸 2 +水質 2 +水道 2 +水餃 2 +永嘉 2 +永寧 2 +汗位 2 +汝霖 2 +江北 2 +江戶 2 +池尻 2 +決心 2 +決戰 2 +沃夫 2 +沖繩 2 +沙 2 +沙咀 2 +沙柏 2 +沙河 2 +沙龍 2 +河水 2 +泉州 2 +法蘭西 2 +法醫 2 +泡沫 2 +波塞摩斯 2 +注射 2 +注重 2 +泰坦 2 +洗 2 +洗手 2 +洛辛堡 2 +洞 2 +活 2 +活性 2 +派出 2 +派別 2 +派駐 2 +流傳 2 +流失 2 +流求 2 +流派 2 +流通 2 +浙東 2 +浩劫 2 +浮冰 2 +海南 2 +海涌 2 +海豹 2 +海邊 2 +海關 2 +消化 2 +消失 2 +涌 2 +涮 2 +淄博 2 +淮 2 +淮河 2 +深厚 2 +深得 2 +深愛 2 +深遠 2 +淹沒 2 +添加 2 +清晨 2 +清楚 2 +清華 2 +清鍾 2 +減輕 2 +游牧 2 +湖州 2 +湘 2 +湯興 2 +溝通 2 +溪流 2 +溫和 2 +溫州 2 +溫暖 2 +滄州 2 +滅口 2 +滙豐 2 +滬東 2 +滿足 2 +漁業 2 +漂流 2 +演說 2 +漢佛瑞 2 +漢口 2 +漸漸 2 +潔 2 +潭西 2 +潮州 2 +澤普 2 +澳底 2 +激光 2 +激戰 2 +激起 2 +濃縮 2 +濕原 2 +濕度 2 +濟寧 2 +濱松 2 +濱湖 2 +瀏覽 2 +灌木 2 +灘 2 +火藥 2 +灰狼 2 +灰色 2 +災害 2 +炮台 2 +為數 2 +烏孫 2 +無力 2 +無效 2 +無界 2 +無緣 2 +無辜 2 +無黨 2 +焦耳 2 +然 2 +煙熏 2 +照料 2 +照顧 2 +煮制 2 +熊 2 +熊隻 2 +熱比婭 2 +熱衷 2 +燃燒 2 +燒毀 2 +燒餅 2 +燕山 2 +爪 2 +爪獸 2 +爭取 2 +爭執 2 +爭辯 2 +爭霸 2 +父子 2 +爾後 2 +牆體 2 +片段 2 +牙買加 2 +牛仔 2 +牛肉 2 +牧師 2 +牧養 2 +物價 2 +特使 2 +特性 2 +特權 2 +特種 2 +犬隻 2 +犬齒 2 +犯 2 +狀 2 +狐狸 2 +猛烈 2 +猛虎 2 +猶他 2 +猶豫 2 +獅 2 +獎章 2 +獎金 2 +獨居 2 +獨自 2 +獵奇 2 +獵殺 2 +玄 2 +玄機 2 +率軍 2 +玉帶 2 +玉門 2 +王位 2 +王妃 2 +玩 2 +玩具 2 +珀西 2 +珍 2 +珍品 2 +現址 2 +現狀 2 +球迷 2 +理念 2 +琪 2 +琴 2 +琴行 2 +瑪利亞 2 +瑪納斯 2 +瑪莉 2 +瑪麗亞 2 +環島 2 +環形 2 +環球 2 +環礁 2 +瓊璘 2 +瓜分 2 +瓦爾那 2 +甚 2 +甚少 2 +甚麼 2 +甜甜 2 +生日 2 +生母 2 +生病 2 +產值 2 +產區 2 +產物 2 +用地 2 +用電 2 +由來 2 +由衷 2 +甲 2 +甲板 2 +甲醇 2 +申花 2 +男爵 2 +町村 2 +留存 2 +留學 2 +留意 2 +留香 2 +畜 2 +番 2 +畫上 2 +畫報 2 +異性 2 +當事 2 +當代 2 +當前 2 +當場 2 +當成 2 +疫情 2 +病人 2 +病理 2 +痕迹 2 +登記 2 +登輝 2 +發售 2 +發回 2 +發掘 2 +發覺 2 +發音 2 +白紙 2 +白金漢 2 +白馬 2 +百度 2 +皇子 2 +皇宮 2 +皮 2 +皮埃蒙特 2 +盆子 2 +益世 2 +盟校 2 +監察 2 +監製 2 +監視 2 +直人 2 +直布羅陀 2 +直轄 2 +直通 2 +相戀 2 +相等 2 +相識 2 +相連 2 +省立 2 +看似 2 +看法 2 +真宗 2 +真情 2 +真的 2 +眼鏡 2 +眾人 2 +睡衣 2 +矚目 2 +矛盾 2 +知節 2 +短面熊 2 +矮人 2 +石化 2 +石原 2 +石家 2 +砍柴 2 +研製 2 +研討 2 +砲 2 +硅 2 +硫磺 2 +硬 2 +硬體 2 +碎石 2 +碘 2 +碧翠絲 2 +碩士 2 +確實 2 +磅 2 +磨損 2 +礦 2 +礦業 2 +示 2 +示威 2 +社交 2 +祂 2 +祕教 2 +神代 2 +票 2 +祺瑞 2 +福來 2 +福利 2 +福部 2 +福音 2 +禮節 2 +禽龍 2 +秀全 2 +秀吉 2 +秋天 2 +科幻 2 +科爾多瓦 2 +科羅拉多 2 +科赫 2 +科雷馬 2 +秘魯 2 +租客 2 +移除 2 +稀有 2 +稅 2 +程式 2 +種姓 2 +稱呼 2 +稱臣 2 +稱讚 2 +稻盛 2 +穆罕默德 2 +積分 2 +空缺 2 +穿耳 2 +突出 2 +突厥 2 +突擊 2 +窟 2 +立下 2 +立憲 2 +立熙 2 +站台 2 +竟然 2 +竣工 2 +童星 2 +競技 2 +競馬 2 +笑話 2 +笨 2 +第九 2 +第十一 2 +筆下 2 +等到 2 +等待 2 +策 2 +策劃 2 +管弦 2 +管治 2 +節奏 2 +簡 2 +簡易 2 +簽署 2 +籃壇 2 +籃子 2 +籌建 2 +籤 2 +米利特 2 +米格 2 +米爾扎 2 +精度 2 +精武 2 +精液 2 +精緻 2 +精美 2 +精采 2 +糖 2 +糖份 2 +紀 2 +約克 2 +約定俗成 2 +約會 2 +約瑟夫 2 +紅木 2 +紅麴 2 +紋 2 +紐卡斯爾 2 +紓緩 2 +純淨 2 +純粹 2 +紙 2 +紙幣 2 +紛紛 2 +素質 2 +索引 2 +細緻 2 +終 2 +組長 2 +結 2 +結晶 2 +結識 2 +絕望 2 +統稱 2 +絲綢 2 +經紀 2 +經費 2 +綠 2 +維 2 +維修 2 +維吉尼亞 2 +維鈞 2 +網上 2 +網友 2 +網民 2 +緊鄰 2 +線粒 2 +線西 2 +編入 2 +編寫 2 +編製 2 +緩存 2 +緩慢 2 +緬因 2 +縣城 2 +縣治 2 +縣長 2 +縱橫 2 +縱貫 2 +總值 2 +總會 2 +總監 2 +總管 2 +總額 2 +繁忙 2 +繁榮 2 +繁殖 2 +繚 2 +繞城 2 +繪圖 2 +續篇 2 +續約 2 +罪 2 +罪案 2 +罪行 2 +署名 2 +署長 2 +罷黜 2 +罹患 2 +羅丹 2 +羅塞塔 2 +羅斯 2 +羅斯基勒 2 +羅斯提 2 +羅漢 2 +羅素 2 +羅貝爾 2 +羊 2 +羊曲 2 +羊毛 2 +羌 2 +美女 2 +義 2 +義和 2 +習 2 +習性 2 +翦 2 +翻新 2 +翻越 2 +翼 2 +老年 2 +老式 2 +老舍 2 +考場 2 +考證 2 +耕作 2 +耕種 2 +耳道 2 +耶和華 2 +耶律 2 +耶魯 2 +聖三 2 +聖地 2 +聘任 2 +聚 2 +聚合 2 +聚居 2 +聯 2 +聯名 2 +聰明 2 +聲名 2 +聲望 2 +聲道 2 +聽 2 +肉糕 2 +肉食 2 +肖 2 +肖像 2 +肖金 2 +肝臟 2 +股 2 +股權 2 +肢 2 +肯尼迪 2 +育才 2 +育種 2 +肺炎 2 +胎兒 2 +胖子 2 +能源 2 +能級 2 +腎 2 +腓特烈 2 +腳 2 +腳趾 2 +腹面 2 +腺葉木犀欖 2 +膝蓋 2 +膠質 2 +臘汁 2 +臣民 2 +臨床 2 +臨淄 2 +臨近 2 +臨邑 2 +自主 2 +自助 2 +自家 2 +自殺 2 +自衛 2 +自轉 2 +臭氧 2 +至於 2 +致 2 +致死 2 +臺中 2 +臺北 2 +興化 2 +舉 2 +舉人 2 +舉動 2 +舊址 2 +舒服 2 +舒適 2 +舞台 2 +船尾 2 +船廠 2 +船艦 2 +船長 2 +艇 2 +艦艇 2 +艱苦 2 +色度 2 +色素 2 +花卉 2 +花崗 2 +花樣 2 +花費 2 +苗 2 +若干 2 +若是 2 +苦惱 2 +英俊 2 +英超 2 +英雄 2 +茨威格 2 +荷花 2 +荷蘭豬 2 +莆田 2 +莉拉 2 +莎拉 2 +莎莉 2 +莫名 2 +莫泊桑 2 +莫爾庫斯 2 +莫雷爾 2 +莫高 2 +菁英 2 +菌 2 +菩薩 2 +華夏 2 +華隆 2 +華麗 2 +菲 2 +萊特 2 +萬宜 2 +萬春 2 +萬萬 2 +落入 2 +落差 2 +葉子 2 +葉海亞 2 +葉片 2 +著想 2 +著迷 2 +葛馮 2 +葡萄牙 2 +葵盛 2 +蒂羅爾 2 +蒐集 2 +蒙 2 +蒙山 2 +蒙蔽 2 +蒸餾 2 +蓄電 2 +蓮屬 2 +蔬菜 2 +蔭權 2 +薩 2 +薩達姆 2 +薪資 2 +藉 2 +藉口 2 +藉著 2 +藍調 2 +藍鯨 2 +藏在 2 +藝員 2 +蘇丹 2 +蘇爾曼 2 +蘇維埃 2 +蘇黎世 2 +蘭 2 +虎豹 2 +虐待 2 +虔誠 2 +處境 2 +蛇 2 +蛇夫 2 +蛇類 2 +螺旋 2 +蠟燭 2 +蠻族 2 +血清 2 +血緣 2 +行李 2 +行程 2 +行車 2 +行駛 2 +術士 2 +街區 2 +衛冕 2 +衛戍 2 +表皮 2 +袋中 2 +裁定 2 +補充 2 +補助 2 +裝病 2 +製冷 2 +製片 2 +西元 2 +西區 2 +西沙 2 +西甲 2 +西站 2 +西鄰 2 +西鐵 2 +西門子 2 +西雅圖 2 +要素 2 +要職 2 +見義勇為 2 +見證 2 +規範 2 +視覺 2 +親密 2 +親屬 2 +親情 2 +親戚 2 +親緣 2 +親近 2 +觀世音 2 +觀塘 2 +觀賞 2 +角宿 2 +角逐 2 +解 2 +解鎖 2 +解體 2 +言論 2 +訂婚 2 +訂購 2 +計 2 +討伐 2 +記號 2 +許可 2 +訴說 2 +註冊 2 +評議 2 +評選 2 +詞彙 2 +詩篇 2 +詮釋 2 +話語 2 +該廟 2 +該車 2 +該館 2 +誕辰 2 +誘發 2 +語堂 2 +誤導 2 +說唱 2 +課 2 +課題 2 +調動 2 +調料 2 +調景 2 +論壇 2 +諸侯 2 +諸葛 2 +諾貝爾 2 +謎 2 +謙虛 2 +講 2 +謠言 2 +證件 2 +證券 2 +證據 2 +譜 2 +譜寫 2 +警報 2 +警官 2 +警方 2 +警長 2 +譯 2 +譯名 2 +譯法 2 +護士 2 +護法 2 +變得 2 +變換 2 +變更 2 +變異 2 +谷 2 +象棋 2 +豪華 2 +豫 2 +貓頭鷹 2 +貝爾尼納 2 +財務 2 +財困 2 +財團 2 +財物 2 +貨櫃 2 +販子 2 +貪污 2 +貴人 2 +買下 2 +買來 2 +賀氏 2 +資方 2 +資產 2 +賠償 2 +賢妃 2 +質子 2 +質疑 2 +質素 2 +購入 2 +購物 2 +賽馬 2 +赤川 2 +走出 2 +走路 2 +起飛 2 +趁機 2 +超越 2 +越低 2 +越獄 2 +越遠 2 +越高 2 +趕出 2 +趨 2 +趨同 2 +路上 2 +路口 2 +身 2 +身長 2 +躲過 2 +車中 2 +車廂 2 +車資 2 +車隊 2 +軍區 2 +軍校 2 +軍法 2 +載重 2 +輔音 2 +輕 2 +輕傷 2 +輕型 2 +輕視 2 +輟學 2 +轄境 2 +轄有 2 +轉介 2 +轉車 2 +轎車 2 +轟動一時 2 +辛亥 2 +辣妹 2 +辦 2 +辭退 2 +辯護 2 +農地 2 +農場 2 +農曆 2 +農田 2 +農藥 2 +近藤 2 +近衛 2 +迫害 2 +迴避 2 +迷 2 +迷信 2 +迷幻 2 +追溯 2 +退化 2 +送 2 +送入 2 +逃 2 +逃出 2 +逃避 2 +透明 2 +逐鹿 2 +通報 2 +通婚 2 +通知 2 +通稱 2 +通航 2 +速寫 2 +速率 2 +造出 2 +造船 2 +連同 2 +連帶 2 +連鎖 2 +週年 2 +進修 2 +進出口 2 +進化 2 +進駐 2 +遇到 2 +遊仙 2 +遊客 2 +遊玩 2 +運河 2 +運用 2 +運轉 2 +過世 2 +過勞 2 +過年 2 +過於 2 +過海 2 +過關 2 +道場 2 +道生 2 +達爾 2 +違反 2 +遞歸 2 +遠東 2 +遭受 2 +遴選 2 +遵守 2 +遷 2 +遷往 2 +選中 2 +選拔 2 +選民 2 +選為 2 +遺囑 2 +遺產 2 +遺跡 2 +遼東 2 +還珠 2 +那些 2 +那樣 2 +邦初 2 +郊外 2 +部下 2 +部件 2 +部族 2 +郵件 2 +都柏林 2 +都統 2 +鄭國 2 +鄭氏 2 +鄰國 2 +配合 2 +配對 2 +酒吧 2 +酒泉 2 +酒醉 2 +醜聞 2 +醫藥 2 +釉下 2 +里昂 2 +里程 2 +重修 2 +重型 2 +重華 2 +重言 2 +重返 2 +重重 2 +重量 2 +野獸 2 +量表 2 +金山 2 +金星 2 +金牌 2 +金蓮 2 +金雞 2 +金馬 2 +鈞 2 +銀 2 +鋅 2 +鋼鐵 2 +錢 2 +錫金 2 +鍋 2 +鍵盤 2 +鐘錶 2 +鐵伊 2 +鐵達尼 2 +鑄造 2 +長久 2 +長城 2 +長女 2 +長春 2 +長老 2 +長者 2 +長興 2 +長蘆 2 +長軸 2 +長音 2 +門前 2 +門口 2 +門戶 2 +門齒 2 +開創 2 +開口 2 +開心 2 +開拍 2 +開採 2 +開會 2 +開火 2 +開羅 2 +開賽 2 +開通 2 +開門 2 +開除 2 +閏年 2 +間接 2 +間隙 2 +閘門 2 +閱讀 2 +闊 2 +關注 2 +關聯 2 +關說 2 +關鍵 2 +關門 2 +關閉 2 +防範 2 +防衛 2 +阻擋 2 +阻礙 2 +阿 2 +阿保機 2 +阿姆斯特丹 2 +阿格 2 +阿美 2 +附帶 2 +降級 2 +降解 2 +除籍 2 +陰霾 2 +陵墓 2 +陵寢 2 +陶瓷 2 +陷阱 2 +陽澄 2 +隆頭魚 2 +隊友 2 +隊長 2 +隋 2 +隋代 2 +隔 2 +隕石 2 +隨之 2 +集成 2 +集資 2 +集雨 2 +集體 2 +雍正 2 +雕像 2 +離任 2 +離婚 2 +離心 2 +雪貂 2 +雲想 2 +零星 2 +電動 2 +電壓 2 +電流 2 +電纜 2 +電能 2 +電路 2 +電鐵 2 +震動 2 +震盪 2 +震驚 2 +霍 2 +霍普 2 +霸主 2 +霸王 2 +靈素 2 +青聯 2 +青藏 2 +青銅 2 +靜電 2 +面臨 2 +面試 2 +面部 2 +音系 2 +音變 2 +韻律 2 +頂 2 +順序 2 +預備 2 +預定 2 +預言 2 +預計 2 +頒布 2 +頒發 2 +頓 2 +領地 2 +頭等 2 +頭部 2 +願望 2 +顧 2 +顯得 2 +顯聖 2 +顯著 2 +風俗 2 +風景 2 +風氣 2 +風濕 2 +風雲 2 +風靡 2 +食夢 2 +食材 2 +飢荒 2 +飯 2 +飲品 2 +飲用 2 +餘額 2 +館藏 2 +饑荒 2 +饒舌 2 +首位 2 +首播 2 +首爾 2 +首腦 2 +首部 2 +香蕉 2 +馬丁 2 +馬克思 2 +馬克斯 2 +馬其頓 2 +馬拉松 2 +馬歇爾 2 +馬耳他 2 +馬里奧 2 +駐紮 2 +駐足 2 +駕駛 2 +骨 2 +骨頭 2 +骨髓 2 +體制 2 +體力 2 +體型 2 +體校 2 +體重 2 +體驗 2 +高低 2 +高壓 2 +高平 2 +高校 2 +高止 2 +高能 2 +高興 2 +高郵 2 +鬆散 2 +鬼 2 +魅力 2 +魏 2 +魚雷 2 +魚頭 2 +魯殊 2 +鮮明 2 +鯉形 2 +鯉科 2 +鰂魚 2 +鱸形 2 +鳥取 2 +鳥綱 2 +鳳凰 2 +鳳翔 2 +鹼 2 +鹿 2 +鹿兒 2 +麗珠 2 +麗茲 2 +麥克塞 2 +麥爾斯 2 +麻河 2 +麻省 2 +黃帝 2 +黃色 2 +黑幫 2 +黑貓 2 +黑龍 2 +黔 2 +點擊 2 +點數 2 +點球 2 +黨派 2 +鼎盛 2 +鼠疫 2 +齊 2 +齊克果 2 +齒擦 2 +齒軌 2 +齧齒 2 +龐家堡 2 +$9,999 1 +$99,999 1 ++ 1 +-99 1 +-999 1 +9--9 1 +9-9.9 1 +9.99% 1 +9.999% 1 +9.9999萬 1 +9.99萬 1 +9/9 1 +99-99 1 +999-999LR 1 +999.999 1 +9999-9999 1 +999M 1 +999X 1 +999cm 1 +999萬9千餘 1 +999餘 1 +999餘萬 1 +99B 1 +99萬9千 1 +9C 1 +9F 1 +9nd 1 +9億9999萬 1 +9億9千萬 1 +9成 1 +9百多萬 1 +9萬億 1 +9萬多 1 +A9 1 +A999-999 1 +AC 1 +AEG 1 +AEK 1 +AFD 1 +AMORC 1 +Aankhen 1 +Abante 1 +Abdurrahman 1 +Activision 1 +Adilabad 1 +Adisumarmo 1 +Admiral 1 +Advance 1 +Aero 1 +AeroMobile 1 +Aethra 1 +Ages 1 +Airlines 1 +Airport 1 +Aleksej 1 +Alliance 1 +Alpha 1 +Alyssum 1 +Android 1 +Anne 1 +Antarctic 1 +Argonauts 1 +Arwadi 1 +Arzacq-Arraziguet 1 +Auld 1 +Auteuil 1 +Avenue 1 +Aviation 1 +Aviv 1 +B9 1 +BHCs 1 +BT 1 +Bad 1 +Baldwin 1 +Ballklub 1 +Bank 1 +Baronet 1 +Barros 1 +Barsbold 1 +Beatles 1 +Beaune 1 +Beaune-Sud 1 +Beckham 1 +Beinasco 1 +Belgaum 1 +Bellagio 1 +Berg 1 +Berne-Belp 1 +Besar 1 +Blake 1 +Books 1 +Boot 1 +Brett 1 +Brian 1 +Briann 1 +Bronfenbrenner 1 +Brough 1 +Bruce 1 +Bud 1 +CARET 1 +CBE 1 +CEC 1 +CET 1 +CI-9999 1 +CIT999B 1 +CMS 1 +CNZZ 1 +CP 1 +CPU 1 +CRH999B 1 +CRH999B-999 1 +CRH999C 1 +CRYPTON 1 +Caen 1 +Calling 1 +Campaign 1 +Campostoma 1 +Canal 1 +Cannon 1 +Capital 1 +Caroline 1 +Castle 1 +Cathedral 1 +Cerro 1 +Chapman 1 +Chase 1 +Chau 1 +Chell 1 +Christopher 1 +Chrome 1 +Churchill 1 +City 1 +Claritin 1 +Clark 1 +Cohen 1 +Colchis 1 +Color 1 +Comic 1 +Company 1 +Connecticut 1 +Conroy 1 +Cornell 1 +Cost 1 +Costa 1 +Council 1 +Cushing 1 +Cálida 1 +DDC 1 +DFH9 1 +DMFC 1 +DS 1 +DSM 1 +Daisuke 1 +Dakota 1 +Damrosch 1 +Daria 1 +Dark 1 +Dart 1 +Dawn 1 +DeSanctis 1 +Dennis 1 +Derby 1 +Devasthanam 1 +Dialogue 1 +Digi 1 +DigiBook 1 +Direct 1 +Divisione 1 +Dog 1 +Doodle 1 +Dorian 1 +Dossing 1 +Dragon 1 +Durst 1 +E 1 +EPA 1 +ET 1 +EXE 1 +Eden 1 +El 1 +Electronic 1 +Elisabeth 1 +Ellie 1 +Elliot 1 +Eminescu 1 +End 1 +Entertainment 1 +Epithema 1 +Epstein 1 +Estate 1 +Expedition 1 +FLY 1 +FSB 1 +FUDOSI 1 +Falls 1 +Family 1 +Fernando 1 +Films 1 +Firefox 1 +Firozpur 1 +Fleet 1 +Fook 1 +Forever 1 +Fortran 1 +Fox 1 +Frank 1 +Franpipe 1 +Fred 1 +Frito-Lay 1 +Fund 1 +G 1 +G99A 1 +GB 1 +GTO 1 +Galliano 1 +Gear 1 +Geophysical 1 +German 1 +Ghost 1 +Gibbs 1 +Giuliano 1 +Golden 1 +Good 1 +Goodnow 1 +Government 1 +Grant 1 +Greater 1 +Greenbelt 1 +Greenville 1 +Groening 1 +Ground 1 +Group 1 +Guariglia 1 +HIV 1 +HP 1 +Halifax 1 +Harry 1 +Harvey 1 +Hau 1 +Haven 1 +HeH 1 +Herrera 1 +Herschel 1 +Higher 1 +Hillman 1 +Holy 1 +Hondt 1 +Hopkins 1 +Housing 1 +Humphrey 1 +Hunt 1 +I 1 +IB 1 +IGBT 1 +IGY 1 +IPark 1 +IUPAC 1 +IV 1 +Illumination 1 +In 1 +India 1 +Ingeri 1 +Innocence 1 +International 1 +Iron 1 +Isartor 1 +Ischl 1 +It's 1 +JPL 1 +Jay 1 +Jazz 1 +Jeff 1 +Johnson 1 +Justin 1 +Juvisy 1 +KINGFISHER 1 +KKR 1 +Kansas 1 +Karaköy 1 +Karlstor 1 +Kate 1 +Kekal 1 +Kenway 1 +Kilpatrick 1 +Kink.com 1 +Kinross 1 +Knudstrup 1 +Koffka 1 +Kurnool 1 +Kurt 1 +LCD 1 +LD99 1 +Langdon 1 +Langford 1 +Language 1 +Last 1 +Leaf 1 +Lees 1 +Lennart 1 +Lethal 1 +Liaoxipterus 1 +Lilim 1 +Linux 1 +Liu 1 +Lomidine 1 +Lotz 1 +Low 1 +Lowell 1 +MD-99 1 +MM 1 +Maddie 1 +Magic 1 +Magma 1 +MallRide 1 +Mamaia 1 +Man 1 +Manea 1 +Maolan 1 +Maria 1 +Mario 1 +Market 1 +Marshlands 1 +Martin 1 +Mayflower 1 +Mechernich 1 +Medical 1 +Menachem 1 +Merina 1 +Methala 1 +Metress 1 +Meyers 1 +Michaelerkirche 1 +Micro 1 +Micro-USM 1 +Middle 1 +Mihai 1 +Mintz 1 +Mitchell 1 +Modern 1 +Mogens 1 +Money 1 +Monsters 1 +Montana 1 +Multitier 1 +Mundell 1 +Museum 1 +My 1 +Myers 1 +N99 1 +NCAA 1 +NHK 1 +NIST 1 +Name 1 +Nanocells 1 +Natasha 1 +Nazionale 1 +Neluset 1 +Neverwhere 1 +Niarchos 1 +Nibiru 1 +Nirmal 1 +Norman 1 +North 1 +Novogrudok 1 +O. 1 +ORI 1 +OVA 1 +Odd 1 +Omega 1 +Omniworld 1 +Online 1 +Opus 1 +Orjan 1 +Orkney 1 +Ospatulus 1 +Otto 1 +P 1 +P9O9 1 +PASMO 1 +PFA 1 +PLANES 1 +Paleorhinus 1 +Pangjiabu 1 +Papa 1 +Park 1 +Pau 1 +Paul 1 +Perouse 1 +Persson 1 +Perth 1 +Phil 1 +Philippa 1 +Piano 1 +Pinerolo 1 +Pisapia 1 +Pittsburghia 1 +Place 1 +PlanetShanghai 1 +Playgirl 1 +Police 1 +Pre-rendering 1 +Presbyterian 1 +Primary 1 +Psychology 1 +Pukaki 1 +Pulau 1 +Purma 1 +Quartet 1 +Quentin 1 +Quest 1 +R9 1 +RBK 1 +RBS 1 +RIAA 1 +Railway 1 +Record 1 +Recordon 1 +Reserve 1 +Return 1 +Review 1 +RhCl9 1 +Rinchen 1 +River 1 +Roble 1 +Rocha 1 +Rolf 1 +Rosenborg 1 +Rossabi 1 +Ruger 1 +Russell 1 +S9 1 +SBE 1 +SEC 1 +SS 1 +ST 1 +STIF 1 +Safari 1 +Salomon 1 +Sam 1 +Sara 1 +Sarianidi 1 +Savannah 1 +School 1 +Schuchat 1 +Sea 1 +Secobarbital 1 +Seemann 1 +Sendlinger 1 +SensMe 1 +Shame 1 +Sharon 1 +Sheegog 1 +Sheinkin 1 +Simon 1 +Snipes 1 +Social 1 +Sofi 1 +Soobedars 1 +Soviet 1 +Spector 1 +Spirit 1 +Spittel 1 +Sportsnet 1 +Srisailamgudem 1 +Standard 1 +Stanton 1 +Star 1 +Statpipe 1 +Stavros 1 +Steinbeck 1 +Stephen 1 +Steven 1 +Stonewall 1 +Street 1 +Streymoy 1 +Stutsman 1 +Suica 1 +Sunset 1 +Suzuki 1 +Syahrin 1 +Sōya 1 +T 1 +TF 1 +TF99 1 +TNM 1 +TVS-9 1 +Tau 1 +Technology 1 +Tel 1 +Texas 1 +Theodor 1 +Thomas 1 +Thrissur 1 +Timati 1 +Time 1 +Tor 1 +Train 1 +Tru 1 +Tsang 1 +Tweddle 1 +Twisty 1 +Tyler 1 +UMLS 1 +USPHS 1 +Uhler-Phillips 1 +Un 1 +Union 1 +University 1 +Utricularia 1 +VVVF 1 +Valla 1 +Varginha 1 +Victoria 1 +Viktor 1 +Villa 1 +Volantis 1 +WHO 1 +WTA 1 +Walker 1 +Walter 1 +Wesley 1 +West 1 +Westmeath 1 +Wheeler 1 +Wii 1 +William 1 +Wing 1 +Wireless 1 +Woman 1 +Wood 1 +Woodside 1 +World 1 +X 1 +Year 1 +YouTube 1 +Zeepipe 1 +`` 1 +académie 1 +architecture 1 +asteroid 1 +bar 1 +bransoni 1 +can 1 +ceyhan 1 +copper 1 +director 1 +double 1 +e 1 +earth 1 +entity 1 +f(x) 1 +g(x) 1 +gbest 1 +hangar 1 +hear 1 +iPhone 1 +iTunes 1 +justice 1 +km 1 +laurifolia 1 +liability 1 +loratadin 1 +managing 1 +morus 1 +n=9 1 +nickel 1 +no 1 +one 1 +ornatum 1 +p 1 +pbest 1 +peronismo 1 +rhythm 1 +rock 1 +sandwithii 1 +scream 1 +scree 1 +shelters 1 +space 1 +study 1 +supply 1 +t.999.com 1 +t.qq.com 1 +t.sina.com.cn 1 +t.sohu.com 1 +t.xxxx.com 1 +to 1 +touch 1 +trail 1 +truncatulus 1 +view 1 +w=9 1 +white 1 +x 1 +you 1 +zone 1 +° 1 +ð 1 +þ 1 +̄ 1 +θ 1 +〔 1 +〕 1 +一, 1 +一中全會 1 +一九五八 1 +一併 1 +一億 1 +一八 1 +一分為二 1 +一到 1 +一勞永逸 1 +一反其道 1 +一字一句 1 +一式一樣 1 +一成 1 +一戰 1 +一改 1 +一時 1 +一概 1 +一模一樣 1 +一氧化碳 1 +一炮 1 +一爭 1 +一發 1 +一百 1 +一百幾十 1 +一百萬 1 +一百餘 1 +一益 1 +一而再、再而三 1 +一舉 1 +一落千丈 1 +一見鍾情 1 +一路 1 +一身 1 +一邊 1 +一點 1 +丁字 1 +丁目 1 +七七 1 +七十 1 +七里 1 +三、 1 +三一 1 +三中 1 +三中全會 1 +三井 1 +三井住友 1 +三亞 1 +三元 1 +三十四 1 +三原 1 +三崎 1 +三星 1 +三氯化銠 1 +三氯氧釩 1 +三浦 1 +三王 1 +三百 1 +三百六七十 1 +三百多 1 +三索頜腔蛇 1 +三船 1 +三菱 1 +三萬 1 +三藩市 1 +三軍 1 +三門 1 +上下 1 +上下行 1 +上傳 1 +上去 1 +上古 1 +上司 1 +上埔 1 +上報 1 +上塘 1 +上奏 1 +上學 1 +上尉 1 +上手 1 +上新世 1 +上朝 1 +上林 1 +上沖 1 +上班 1 +上端 1 +上網 1 +上線 1 +上色 1 +上蓋 1 +上訪 1 +上調 1 +上路 1 +上身 1 +上車 1 +上選 1 +上部 1 +上限 1 +上集 1 +上雲 1 +上顎 1 +下剋上高潮 1 +下圖 1 +下徹 1 +下樓 1 +下河 1 +下潛 1 +下獄 1 +下稱 1 +下蝕 1 +下設 1 +下課 1 +下跌 1 +下車 1 +下遊 1 +下部 1 +下關 1 +下院 1 +下集 1 +下雷 1 +下面 1 +下顎 1 +下風 1 +不丹 1 +不乏 1 +不以為然 1 +不克 1 +不入 1 +不凡 1 +不出 1 +不出所料 1 +不利 1 +不到 1 +不力 1 +不動 1 +不去 1 +不吃 1 +不合 1 +不和 1 +不問 1 +不均 1 +不多 1 +不大 1 +不定 1 +不實 1 +不惜 1 +不愛 1 +不懷好意 1 +不折不扣 1 +不捨 1 +不收 1 +不敬 1 +不料 1 +不易 1 +不景 1 +不服 1 +不朽 1 +不歸 1 +不準 1 +不理 1 +不畏 1 +不符 1 +不純 1 +不絕 1 +不行 1 +不衰 1 +不要 1 +不見天日 1 +不解 1 +不計其數 1 +不該 1 +不詳 1 +不豐 1 +不賣 1 +不輸 1 +不辭辛勞 1 +不道 1 +不適 1 +不銹 1 +不限 1 +不露 1 +不顧 1 +且是 1 +世上 1 +世人 1 +世代相傳 1 +世充 1 +世則 1 +世子 1 +世家 1 +世昌 1 +世民 1 +世田谷 1 +世祿 1 +世綱 1 +世貿 1 +世道 1 +世銘 1 +丘 1 +丙組 1 +丞相 1 +並無 1 +並稱 1 +並系 1 +中信 1 +中南 1 +中南海 1 +中原 1 +中堅 1 +中場 1 +中底層 1 +中彈 1 +中性 1 +中投 1 +中斷 1 +中旬 1 +中校 1 +中樞 1 +中檔 1 +中殿 1 +中毒 1 +中波希米亞 1 +中田 1 +中級 1 +中綴 1 +中線 1 +中耳 1 +中聯 1 +中興 1 +中葉 1 +中藥 1 +中西方 1 +中西醫 1 +中觀 1 +中超 1 +中農 1 +中鐵 1 +串聯 1 +丸都 1 +丹 1 +丹噶爾 1 +丹尼士達智 1 +丹路殊 1 +主修 1 +主創 1 +主導 1 +主帶 1 +主幹 1 +主意 1 +主控 1 +主治 1 +主炮 1 +主犯 1 +主筆 1 +主船 1 +主食 1 +乃威 1 +久經 1 +久藏 1 +之所以 1 +之申 1 +之銓 1 +之鋒 1 +乘勢 1 +乘搭 1 +乘撘 1 +乘裝 1 +乙 1 +乙二胺 1 +乙未 1 +乙組 1 +乙苯 1 +九一一 1 +九十 1 +九江 1 +九鐵 1 +乳房 1 +乾季 1 +乾德 1 +乾淨 1 +乾西 1 +亂 1 +亂倫 1 +亂刀 1 +事先 1 +事態 1 +事發 1 +事與願違 1 +事跡 1 +事蹟 1 +二中全會 1 +二二八 1 +二十一 1 +二十二 1 +二十五 1 +二十八 1 +二十多 1 +二十萬 1 +二宮 1 +二戶 1 +二百 1 +二百五十餘 1 +二百餘 1 +二胺 1 +二郎 1 +于敏 1 +互作 1 +互利 1 +互助 1 +互惠 1 +互通 1 +互選 1 +五一 1 +五中全會 1 +五分之一 1 +五十 1 +五十一 1 +五十六 1 +五常 1 +五弟 1 +五彩繽紛 1 +五成半 1 +五指 1 +五氧化二氮 1 +五百萬 1 +五萬三千 1 +井字 1 +井村 1 +井田 1 +些微 1 +亞丁 1 +亞他那修 1 +亞伯塔 1 +亞伯拉罕 1 +亞冠龍 1 +亞利桑納 1 +亞基 1 +亞奧 1 +亞彬 1 +亞德里亞堡 1 +亞文 1 +亞普芮 1 +亞東 1 +亞歷山大丹尼士 1 +亞流 1 +亞烏扎 1 +亞特蘭大 1 +亞瑟 1 +亞當斯 1 +亞西爾 1 +亞運 1 +亞邦 1 +亞麻 1 +亡故 1 +交付 1 +交代 1 +交出 1 +交口 1 +交回 1 +交州 1 +交替 1 +交棒 1 +交涉 1 +交界 1 +交行 1 +交角 1 +交談 1 +交道 1 +交錯 1 +亦即 1 +亨 1 +亨得利 1 +享 1 +京劇 1 +京王 1 +京釜 1 +亭湖 1 +亮相 1 +人世 1 +人仕 1 +人字 1 +人客 1 +人手 1 +人日 1 +人權 1 +人殉 1 +人氣 1 +人祭 1 +人種 1 +人稱 1 +人行 1 +人道 1 +人選 1 +人麻呂 1 +仁傑 1 +仁和 1 +仁壽 1 +仁守 1 +仁宗 1 +仁煥 1 +仁牙因 1 +仁玕 1 +仁穆 1 +仁粹 1 +仁青 1 +仇人 1 +今川 1 +介壽 1 +介質 1 +仍是 1 +仍有 1 +仍算 1 +他倆 1 +他家 1 +仙人打坐 1 +仙女木 1 +仙鶴 1 +代亞布羅 1 +代價 1 +代名詞 1 +代幣 1 +代數 1 +代牧 1 +代碼 1 +令狐 1 +令華 1 +以爲 1 +仰光 1 +仰望 1 +仲 1 +仲雄 1 +任免 1 +任選 1 +伊 1 +伊克巴爾 1 +伊利 1 +伊利沙伯 1 +伊塔蒂亞亞 1 +伊娃 1 +伊尹 1 +伊摩琴 1 +伊朗 1 +伊犁 1 +伊甸 1 +伊薩爾 1 +伊里亞德 1 +伊阿宋 1 +伊頓 1 +伍德 1 +伍德羅 1 +伎倆 1 +伏塔 1 +伏契克 1 +伏爾加 1 +伏瓦蒂爾 1 +伐 1 +休假 1 +休克 1 +休士頓 1 +休憩 1 +休斯 1 +休閑 1 +休養 1 +伙食 1 +伯克爾 1 +伯多祿 1 +伯恩 1 +伯恩哈德 1 +伯明翰 1 +伯格 1 +伯溫 1 +伯爾尼 1 +伯納姆 1 +伯納雷 1 +伯茲貝格 1 +伯莎 1 +伯虎 1 +伯謙 1 +伯達 1 +伴侶 1 +伴奏 1 +伴有 1 +伴生 1 +伶 1 +伸一 1 +伸冤 1 +伸延 1 +伸港 1 +伽馬 1 +佈局 1 +佈置 1 +佈道 1 +位在 1 +位居 1 +位階 1 +位面 1 +低下 1 +低估 1 +低價 1 +低層 1 +低平 1 +低座 1 +低檔 1 +低潮 1 +低等 1 +低調 1 +低額 1 +住所 1 +住進 1 +佐佐木 1 +佐勞爾 1 +佐和子 1 +佐民 1 +佔用 1 +何利菲德 1 +何力特 1 +何方 1 +佛事 1 +佛典 1 +佛瑞爾斯 1 +佛經 1 +佛羅倫斯 1 +佛羅里達 1 +佛萊明 1 +佛蒙特 1 +佛頭 1 +作對 1 +作怪 1 +作曲 1 +作次郎 1 +作法 1 +作為 1 +作畫 1 +作雲 1 +作風 1 +佩佐拉諾 1 +佩儂 1 +佩戴 1 +佩琪 1 +佩蘭多 1 +佬 1 +佳作 1 +佳佳 1 +佳節 1 +併發 1 +使喚 1 +使團 1 +使節 1 +侄子 1 +來看 1 +來臨 1 +來襲 1 +來館 1 +侈談 1 +侍奉 1 +侍女 1 +侍從 1 +侏羅 1 +供水 1 +供電 1 +供養 1 +依次 1 +依照 1 +依瑪 1 +依託 1 +依託泊苷 1 +依附 1 +侮辱 1 +侯 1 +侵佔 1 +侵害 1 +便利 1 +便捷 1 +便是 1 +便服 1 +便當 1 +便秘 1 +俊業 1 +俗 1 +俘獲 1 +保 1 +保住 1 +保全 1 +保加爾 1 +保大 1 +保定 1 +保密 1 +保明 1 +保溫 1 +保羅費雷拉 1 +保送 1 +保養 1 +俠 1 +信中 1 +信念 1 +信教 1 +信玄 1 +信神 1 +信竹 1 +信裡 1 +修好 1 +修學 1 +修憲 1 +修煉 1 +修羅 1 +修葺 1 +修鞋 1 +修養 1 +俯瞰 1 +俸祿 1 +俾路支 1 +倉促 1 +倉庫 1 +個位 1 +個個 1 +個展 1 +倒下 1 +倒入 1 +倖免 1 +候旨 1 +候補 1 +倚天 1 +倚靠 1 +借 1 +倩文 1 +倫巴底 1 +倫拜 1 +倫納特 1 +倬標 1 +倭國 1 +倭寇 1 +假使 1 +假借 1 +假名 1 +假帳 1 +假設 1 +假說 1 +假象 1 +假釋 1 +假面 1 +偉 1 +偉強 1 +偏低 1 +偏僻 1 +偏向 1 +偏小 1 +偏東 1 +偏重 1 +偏離 1 +做到 1 +停刊 1 +停業 1 +停機 1 +停泊 1 +停職 1 +停辦 1 +停靠 1 +停飛 1 +健壯 1 +健將 1 +健身 1 +側目 1 +側邊 1 +偵察 1 +偵測 1 +偵緝 1 +偶像 1 +偶發 1 +偷取 1 +偷羊 1 +偷襲 1 +偷走 1 +偽 1 +偽季米特里 1 +偽裝 1 +傀儡 1 +傅萊 1 +傍 1 +傍晚 1 +傑克托爾 1 +傑志 1 +傑斐遜 1 +備忘 1 +備戰 1 +備案 1 +備用 1 +備註 1 +傢具 1 +催芽 1 +傭人 1 +傳來 1 +傳給 1 +傳記 1 +傳遍 1 +債券 1 +傷及 1 +傷心 1 +傷患 1 +傷悲 1 +傷病 1 +傷透 1 +傻 1 +傾心 1 +傾談 1 +僅屬 1 +僅用 1 +像差 1 +僑 1 +僕人 1 +僖 1 +僧人 1 +僧孺 1 +僧尼 1 +僧格 1 +僧祐 1 +僱主 1 +僱傭 1 +僵局 1 +價位 1 +價錢 1 +儀器 1 +億 1 +儒士 1 +儘快 1 +儘量 1 +償付 1 +優 1 +優值 1 +優良 1 +優裕 1 +優質 1 +儲量 1 +儷 1 +允 1 +允良 1 +元子 1 +元朝 1 +元氣 1 +元澄 1 +元老 1 +元起 1 +兄 1 +兄長 1 +充任 1 +充分 1 +充氣 1 +充滿 1 +充軍 1 +兆基 1 +兆楠 1 +兆陽 1 +兇多吉少 1 +兇悍 1 +兇猛 1 +先前 1 +先帝 1 +先師 1 +先賢 1 +先鋒 1 +先驗 1 +光啟 1 +光學 1 +光宇 1 +光州 1 +光度 1 +光復 1 +光景 1 +光束 1 +光泰 1 +光滑 1 +光照 1 +光環 1 +光范 1 +光華 1 +光顧 1 +克利普頓 1 +克力佛 1 +克勤 1 +克家 1 +克拉瑪 1 +克拉西奇 1 +克敏能 1 +克欽 1 +克洛頓 1 +克特勒 1 +克羅維茲 1 +克羅迪歐 1 +克蘇魯 1 +克裡斯 1 +克農 1 +克里姆希爾特 1 +克里斯多夫 1 +克里斯多弗 1 +克里斯托弗 1 +克里波門 1 +克魯 1 +兌換 1 +免 1 +免疫 1 +免遭 1 +兔毛 1 +兢兢業業 1 +入世 1 +入地 1 +入塞 1 +入境 1 +入手 1 +入聲 1 +入股 1 +入閘 1 +入院 1 +入駐 1 +內化 1 +內卡薩 1 +內在 1 +內埔 1 +內壁 1 +內政 1 +內置 1 +內胎 1 +內臟 1 +內載 1 +內遷 1 +全劇 1 +全名 1 +全境 1 +全壘 1 +全套 1 +全島 1 +全州 1 +全得 1 +全德 1 +全效 1 +全敗 1 +全數 1 +全書 1 +全盛 1 +全盤 1 +全省 1 +全福 1 +全程 1 +全稱 1 +全線 1 +全興 1 +全邨 1 +全鎮 1 +全隊 1 +全額 1 +全黑 1 +兩億 1 +兩千五百萬 1 +兩千萬 1 +八世 1 +八十九 1 +八卦 1 +八思巴 1 +八成 1 +八杉 1 +八百 1 +公仔 1 +公佈 1 +公克 1 +公告 1 +公墓 1 +公屋 1 +公斤 1 +公款 1 +公正 1 +公狼 1 +公約 1 +公衛 1 +公袥 1 +公視 1 +公超 1 +公關 1 +公頃 1 +公館 1 +六七 1 +六千 1 +六千四百萬 1 +六合 1 +六四 1 +六安 1 +共享 1 +共尾 1 +共生 1 +共處 1 +共識 1 +共鳴 1 +兵房 1 +兵鋒 1 +其妻 1 +其子 1 +其次 1 +其母 1 +典籍 1 +兼修 1 +兼具 1 +兼容 1 +兼屬 1 +兼并 1 +冀望 1 +冉 1 +冊 1 +再三 1 +再保 1 +再用 1 +再臨 1 +再補 1 +再見 1 +冒 1 +冒險 1 +冠 1 +冠上 1 +冠峰 1 +冠狀 1 +冠玉 1 +冢 1 +冤案 1 +冥冥 1 +冥想 1 +冬初 1 +冬眠 1 +冬青 1 +冰 1 +冰冰 1 +冰塔 1 +冰晶 1 +冰柱 1 +冰河 1 +冰湖 1 +冰瀑 1 +冰球 1 +冰風 1 +冷凍 1 +冷暖氣 1 +冷次 1 +冷氣 1 +冷眼 1 +冷遇 1 +冷靜 1 +凄美 1 +准 1 +准考 1 +凈白 1 +凊 1 +凌 1 +凌日 1 +凌晨 1 +凌辱 1 +凌駕 1 +凍傷 1 +凝結 1 +凡爾登 1 +凡爾賽 1 +凱恩 1 +凱文 1 +凱爾特 1 +凱維埃爾 1 +凱美特 1 +凱茜 1 +凶 1 +凸 1 +凸起 1 +凹版 1 +出世 1 +出人意料 1 +出到 1 +出動 1 +出去 1 +出名 1 +出品 1 +出國 1 +出城 1 +出奇 1 +出嫁 1 +出局 1 +出師 1 +出廠 1 +出征 1 +出手 1 +出擊 1 +出校 1 +出榜 1 +出血 1 +出訪 1 +出路 1 +出逃 1 +出門 1 +出頭 1 +刀鞘 1 +分工 1 +分店 1 +分批 1 +分攤 1 +分數 1 +分明 1 +分枝 1 +分校 1 +分泌 1 +分流 1 +分發 1 +分科 1 +分立 1 +分站 1 +分管 1 +分組 1 +分缺 1 +分貝 1 +分辨 1 +分部 1 +分鏡 1 +分隔 1 +分離 1 +分題 1 +分點 1 +切下 1 +切分 1 +切割 1 +切合 1 +切實 1 +切成 1 +切望 1 +切爾尼赫 1 +切片 1 +刑事 1 +刑部 1 +划算 1 +划艇 1 +列斯聯 1 +列維爾 1 +初中 1 +初始 1 +初時 1 +初次 1 +初步 1 +初見 1 +判 1 +判令 1 +判定 1 +判寺事 1 +判詞 1 +別人 1 +別名 1 +別院 1 +利他能 1 +利刃 1 +利好 1 +利潘迪特蘭堡 1 +利維奧 1 +刪剪 1 +刮目相看 1 +到任 1 +到期 1 +到發 1 +制動 1 +制式 1 +制瓷 1 +制約 1 +制酸 1 +刷 1 +刷到 1 +券 1 +券頂 1 +刺殺 1 +刻劃 1 +刻寫 1 +刻板 1 +刻滿 1 +刻畫 1 +則士 1 +則里拉 1 +削減 1 +前傾 1 +前去 1 +前因後果 1 +前奏 1 +前委 1 +前嫌 1 +前季 1 +前提 1 +前景 1 +前稱 1 +前端 1 +前綴 1 +前者 1 +前肢 1 +前齒 1 +剛剛 1 +剛性 1 +剛直 1 +剛鐸 1 +剩 1 +剩餘 1 +副長 1 +割據 1 +割破 1 +割讓 1 +割開 1 +創保 1 +創傷 1 +創刊 1 +創煥 1 +創生 1 +剷除 1 +剿 1 +剿滅 1 +劃出 1 +劃歸 1 +劃界 1 +劇中 1 +劇作 1 +劇場 1 +劇組 1 +劍俠 1 +劍法 1 +劍麻 1 +劑量 1 +力克 1 +力圖 1 +力霸 1 +功勞 1 +功德 1 +功樂 1 +功績 1 +加侖 1 +加值 1 +加冕 1 +加利奇 1 +加劇 1 +加勁 1 +加恩卡納 1 +加爾文 1 +加粗 1 +加藤 1 +加賀 1 +加速 1 +加電 1 +劣 1 +助 1 +助手 1 +助燃 1 +助聽 1 +助長 1 +努兒道刺特 1 +劫匪 1 +劫持 1 +効忠 1 +勁光 1 +勁報 1 +勁敵 1 +勁歌 1 +勃起 1 +勇俊 1 +勇士 1 +勇武 1 +勒溫 1 +動人 1 +動向 1 +動土 1 +動漫 1 +動漫畫 1 +動用 1 +動能 1 +動蕩 1 +動詞 1 +動量 1 +勘探 1 +務工 1 +勝 1 +勝任 1 +勝昭 1 +勝素 1 +勝者 1 +勝訴 1 +勝賴 1 +勞埃德 1 +勞累 1 +募款 1 +募集 1 +勢傾中外 1 +勢能 1 +勤先 1 +勤快 1 +勳位 1 +勳爵 1 +勵珍 1 +勸 1 +勾形 1 +勾畫 1 +勾結 1 +包袱 1 +包裹 1 +包覆 1 +包頭 1 +化名 1 +化妝 1 +化成 1 +化整為零 1 +化用 1 +化肥 1 +北伐 1 +北側 1 +北冰 1 +北卡羅萊納 1 +北景 1 +北歐 1 +北段 1 +北甘馬粦 1 +北美擬獅 1 +北車 1 +北返 1 +北達科他 1 +北邊 1 +匡 1 +匯入 1 +匯合 1 +匯報 1 +匯聯 1 +匯集 1 +匹 1 +匹茲堡 1 +匾額 1 +區塊 1 +區段 1 +區間 1 +十二世 1 +十二烷基苯 1 +十全十美 1 +十八億 1 +十八大 1 +十四 1 +十數 1 +十萬 1 +十餘 1 +千兆 1 +千克 1 +千島 1 +千方百計 1 +千春 1 +千瓦 1 +千米 1 +千萬 1 +千里迢迢 1 +千陽 1 +千鶴 1 +升值 1 +升到 1 +升天 1 +升越 1 +升降 1 +升高 1 +午膳 1 +半導體 1 +半牧 1 +半農 1 +卑詩 1 +卓著 1 +協合 1 +協理 1 +南人 1 +南卡羅萊納 1 +南哲 1 +南大 1 +南安 1 +南安普頓 1 +南寧 1 +南市 1 +南征 1 +南端 1 +南線 1 +南美 1 +南臨 1 +南航 1 +南船 1 +南路 1 +南通 1 +南遷 1 +南鄰 1 +南門 1 +南開 1 +南院 1 +南雄 1 +南麓 1 +博 1 +博凱蒂 1 +博多 1 +博學 1 +博斯維爾 1 +博格 1 +博洛尼亞 1 +博滕 1 +博義 1 +博覽 1 +占星 1 +卡亞尼 1 +卡內拉 1 +卡利帕斯 1 +卡力崗 1 +卡夫 1 +卡夫卡 1 +卡巴雷羅 1 +卡希 1 +卡帕克 1 +卡拉OK 1 +卡拉柯伊 1 +卡拉維拿 1 +卡斯楚 1 +卡斯特羅 1 +卡普里維 1 +卡波特 1 +卡洛克 1 +卡洛斯 1 +卡洛曼 1 +卡洛琳 1 +卡羅來納 1 +卡羅萊納 1 +卡臣 1 +卡薩諾瓦 1 +卡車 1 +卡達 1 +卦 1 +卧底 1 +卧病 1 +卧薪嘗膽 1 +印信 1 +印刷 1 +印地安那 1 +印度尼西亞 1 +印第安納 1 +印第安納波利斯 1 +印表 1 +危在旦夕 1 +危害 1 +危殆 1 +即場 1 +即有 1 +卵內 1 +厘 1 +原先 1 +原型 1 +原姓 1 +原屬 1 +原平 1 +原意 1 +原指 1 +原文 1 +原核 1 +原畫 1 +原籍 1 +原罪 1 +原諒 1 +厥 1 +厭世 1 +厭惡 1 +去搶 1 +去留 1 +去看 1 +參戰 1 +參政 1 +參演 1 +參看 1 +參禮 1 +參贊 1 +參閱 1 +又廷 1 +又或 1 +及後 1 +及時 1 +友 1 +友情 1 +友邦 1 +反共 1 +反動 1 +反右 1 +反向 1 +反恐 1 +反省 1 +反綁 1 +反證 1 +反響 1 +反黨 1 +叔父 1 +取下 1 +取出 1 +取名 1 +取回 1 +取悅 1 +取液 1 +取物 1 +取用 1 +取而代之 1 +受命 1 +受孕 1 +受害 1 +受挫 1 +受洗 1 +受精 1 +受罰 1 +受襲 1 +受賄 1 +受阻 1 +受雇 1 +叛徒 1 +叛變 1 +叛軍 1 +叡 1 +叢刊 1 +叢書 1 +口供 1 +口信 1 +口吻 1 +口感 1 +口服 1 +口音 1 +古喙龍 1 +古堡 1 +古寺 1 +古廟 1 +古德諾 1 +古惑 1 +古斯塔夫 1 +古爾德 1 +古迹 1 +古都斯 1 +句子 1 +句點 1 +另加 1 +另娶 1 +另立 1 +另築 1 +另類 1 +只好 1 +只是 1 +只會 1 +只知 1 +只能 1 +叫作 1 +叫拜 1 +叫聲 1 +召 1 +召集 1 +可可 1 +可塑 1 +可愛 1 +可憐 1 +可樂 1 +可欣 1 +可西卡 1 +可靠 1 +可風 1 +台南 1 +台標 1 +台視 1 +台詞 1 +台長 1 +史前 1 +史坦貝克 1 +史官 1 +史帝芬 1 +史特勞斯 1 +史稱 1 +史記 1 +史跡 1 +史館 1 +右任 1 +右手 1 +右方 1 +右臂 1 +司可巴比妥 1 +司鐸 1 +吁宋 1 +吃上 1 +吃到 1 +吃掉 1 +吃法 1 +吃起 1 +各方 1 +各球 1 +各異 1 +各科 1 +各職 1 +各處 1 +各行各業 1 +各隊 1 +各項 1 +合共 1 +合力 1 +合和 1 +合唱 1 +合夥 1 +合奏 1 +合流 1 +合約 1 +合計 1 +合資 1 +合辦 1 +合適 1 +合陽 1 +合體 1 +吉利 1 +吉奧瓦尼 1 +吉姆 1 +吉布地 1 +吉拉德 1 +吉爾伯特 1 +吉祥 1 +吉米 1 +吉西 1 +吉隆坡 1 +吋 1 +同仁社 1 +同伴 1 +同僚 1 +同台 1 +同型 1 +同工 1 +同志 1 +同日 1 +同校 1 +同步 1 +同母 1 +同父 1 +同甘共苦 1 +同行 1 +同郷 1 +同食 1 +同飲 1 +名作 1 +名分 1 +名利雙收 1 +名城 1 +名帥 1 +名師 1 +名村 1 +名氣 1 +名流 1 +名聲 1 +名臣 1 +名茶 1 +名號 1 +名門 1 +名額 1 +后 1 +后妃 1 +吐 1 +吐嘈 1 +向前 1 +向滋 1 +君如 1 +君權 1 +君長 1 +吞下 1 +吟唱 1 +否 1 +否決 1 +吧 1 +吩咐 1 +含 1 +含糖 1 +含量 1 +吳王 1 +吵醒 1 +吸塵 1 +吸毒 1 +吸菸 1 +吸附 1 +吸食 1 +吹來 1 +吹氣 1 +吹滅 1 +吻部 1 +呀 1 +呂宋 1 +呆 1 +呈交 1 +告戒 1 +告白 1 +周代 1 +周刊 1 +周敏 1 +周日 1 +周朝 1 +周期 1 +周迅 1 +周遭 1 +味道 1 +呼 1 +呼倫貝爾 1 +呼和浩特 1 +命題 1 +和夫 1 +和好 1 +和宜合 1 +和康 1 +和暖 1 +和會 1 +和林 1 +和樹 1 +和睦 1 +和美 1 +和衷 1 +和親 1 +和記 1 +和諧 1 +和議 1 +咧嘴 1 +咬弦 1 +咸平 1 +咸康 1 +咸淳 1 +咸美頓 1 +咸鏡 1 +咸陽 1 +哀悼 1 +品嘗 1 +品學兼優 1 +品德 1 +品源 1 +哈 1 +哈丹姆 1 +哈依拉爾 1 +哈剌旭烈 1 +哈吉 1 +哈布斯堡 1 +哈希姆 1 +哈恩 1 +哈拉帕那瓦 1 +哈索爾 1 +哈羅 1 +哈萊姆 1 +哈薩克 1 +哈達 1 +哈里斯堡 1 +哈里森 1 +哈默史密斯 1 +員佐 1 +員外 1 +哥利茲 1 +哥德堡 1 +哨所 1 +哪 1 +哭 1 +哲也 1 +哲元 1 +哲孟雄 1 +哲生 1 +哲蚌 1 +唇槍舌劍 1 +唐代 1 +售予 1 +售出 1 +售票 1 +唯 1 +唯獨 1 +唱戲 1 +唱法 1 +唸 1 +唸珠 1 +唾液 1 +商事 1 +商務 1 +商圈 1 +商城 1 +商埠 1 +商場 1 +商幫 1 +商朝 1 +商湯 1 +商用 1 +商羯羅 1 +商船 1 +商量 1 +啊 1 +問吧 1 +問話 1 +啟 1 +啟傑 1 +啟明 1 +啟發 1 +啟示 1 +啟程 1 +啟聯 1 +啟鑰 1 +啤酒 1 +喀什 1 +喀拉拉邦 1 +喀里多尼亞 1 +善事 1 +善作 1 +善待 1 +善後 1 +善惡 1 +善撲 1 +善良 1 +喇薩 1 +喊出 1 +喘息 1 +喙 1 +喙端 1 +喚 1 +喚回 1 +喚起 1 +喜 1 +喜好 1 +喝醉 1 +喝采 1 +喪失 1 +喬姆斯基 1 +喬木 1 +喬科維奇 1 +單獨 1 +單調 1 +單質 1 +單項 1 +嗅到 1 +嗎 1 +嗜酸 1 +嗜鹼 1 +嗣位 1 +嗣業 1 +嘉木揚 1 +嘉木樣 1 +嘉樂 1 +嘉許 1 +嘉道理 1 +嘉陵 1 +嘉靖 1 +嘔吐 1 +嘩然 1 +嘯林 1 +嘴 1 +噁心 1 +噁爆 1 +器具 1 +器械 1 +器蓋 1 +器身 1 +噴射 1 +噸位 1 +嚇人 1 +嚮導 1 +嚴 1 +嚴令 1 +嚴加 1 +嚴島 1 +嚴懲 1 +嚴斥 1 +嚴氏 1 +嚴肅 1 +嚴謹 1 +囊胚 1 +囑咐 1 +囚犯 1 +四十三 1 +四十多 1 +四十餘 1 +四周 1 +四平 1 +四方八面 1 +四牌 1 +四萬 1 +四郎 1 +回信 1 +回合 1 +回填 1 +回家 1 +回寺 1 +回彈 1 +回復 1 +回教 1 +回生 1 +回程 1 +回答 1 +因弗內斯 1 +因達農 1 +困 1 +困住 1 +困擾 1 +固 1 +固態 1 +固有 1 +固醇 1 +國中 1 +國主 1 +國光 1 +國公 1 +國共 1 +國史 1 +國名 1 +國君 1 +國土 1 +國奧 1 +國妃 1 +國安會 1 +國府 1 +國庫 1 +國情 1 +國慶 1 +國成 1 +國松 1 +國父 1 +國產 1 +國界 1 +國立 1 +國策 1 +國諱 1 +國雄 1 +圍坐 1 +圍棋 1 +圍牆 1 +圍魏救趙 1 +園丁 1 +園主 1 +園內 1 +園明園 1 +園林 1 +圓 1 +圓圓 1 +圓弧 1 +圓柱 1 +圓滑 1 +圓環 1 +圖取 1 +圖布丹 1 +圖形 1 +圖片 1 +圖示 1 +圖稿 1 +團圓 1 +團隊 1 +土匪 1 +土司 1 +土石 1 +土虱 1 +在崗 1 +在校 1 +在身 1 +地名 1 +地域 1 +地基 1 +地平 1 +地庫 1 +地政 1 +地板 1 +地標 1 +地盤 1 +地級 1 +地表 1 +地貌 1 +地質 1 +地道 1 +地震 1 +坂本 1 +均勻 1 +均衡 1 +坎特伯里 1 +坎貝爾 1 +坎農 1 +坐在 1 +坐監 1 +坐骨 1 +坡子 1 +坤玲 1 +坦 1 +坦克 1 +坦干伊喀 1 +坦然 1 +坦白 1 +型式 1 +垮台 1 +埃內韋塔克 1 +埃弗里 1 +埃米內斯庫 1 +埃米琳 1 +埃胡德 1 +埃雷拉 1 +埋怨 1 +埋葬 1 +埋藏 1 +城主 1 +城光 1 +城內 1 +城南 1 +城址 1 +城巴 1 +城池 1 +城牆 1 +城西 1 +城隍 1 +埜堂 1 +埤 1 +執委 1 +執業 1 +執飛 1 +培元 1 +培育 1 +基層 1 +基希涅夫 1 +基平 1 +基徹 1 +基數 1 +基石 1 +基頻 1 +堂堂正正 1 +堅城 1 +堅定 1 +堅尼地 1 +堅拒 1 +堅蜥 1 +堆填 1 +堆積 1 +堈 1 +堪憐 1 +堪稱 1 +堪薩斯 1 +報仇 1 +報刊 1 +報名 1 +報復 1 +報讀 1 +場內 1 +場均 1 +場景 1 +塑像 1 +塑料 1 +塑有 1 +塑膠 1 +塔利班 1 +塔台 1 +塔吉克 1 +塔塔爾 1 +塔夫茨 1 +塔林 1 +塔樓 1 +塔西佗 1 +塗黑 1 +塚 1 +塞古拉 1 +塞德爾恰尼 1 +塞普提米烏斯 1 +塞法迪 1 +塞爾達 1 +塞琉古 1 +塞琉西 1 +塞維利亞 1 +塞維魯 1 +塞維魯敉 1 +塞隆 1 +塞音 1 +塞馬 1 +墓葬 1 +墓頂 1 +墜入 1 +墜落 1 +增殖 1 +增生 1 +增祥 1 +增進 1 +增額 1 +墟 1 +墟內 1 +墨 1 +墨客 1 +墨色 1 +墳 1 +墾田 1 +壓 1 +壓縮 1 +壞球 1 +壩上 1 +壩下 1 +士珍 1 +士禛 1 +士評 1 +壯漢 1 +壯烈 1 +壹 1 +壺 1 +壺中仙 1 +壽命 1 +壽宴 1 +壽星 1 +夏威夷 1 +夏愨 1 +夏秋季 1 +夏至 1 +夏茸切哇 1 +夏茸穹哇 1 +夏荷林 1 +夏默 1 +外借 1 +外力 1 +外加 1 +外務 1 +外匯 1 +外地 1 +外壁 1 +外套 1 +外層 1 +外形 1 +外殼 1 +外甥 1 +外甥女 1 +外省 1 +外管 1 +外表 1 +外褂 1 +外訪 1 +外語 1 +外銷 1 +多倫 1 +多元 1 +多汁 1 +多納德 1 +多謝 1 +多雨 1 +夜夜 1 +夜戰 1 +夠大 1 +夢中 1 +夢境 1 +夢幻 1 +夢想 1 +夢雲 1 +夢鴿 1 +夥兒 1 +大不了 1 +大乘 1 +大事 1 +大二 1 +大儒 1 +大區 1 +大友 1 +大受 1 +大吉 1 +大名 1 +大君 1 +大和 1 +大喊 1 +大國 1 +大圍 1 +大城 1 +大堆 1 +大堤 1 +大增 1 +大士 1 +大失所望 1 +大島 1 +大嶼 1 +大幅 1 +大怒 1 +大悟 1 +大敵 1 +大新 1 +大校 1 +大概 1 +大正 1 +大殿 1 +大汗 1 +大河 1 +大洋 1 +大湖 1 +大溪 1 +大漠 1 +大獲 1 +大理 1 +大發 1 +大窘 1 +大紅 1 +大經 1 +大綱 1 +大腦 1 +大腸 1 +大膽 1 +大舉 1 +大艇 1 +大華 1 +大蒜 1 +大街小巷 1 +大跌 1 +大路 1 +大辦 1 +大通 1 +大進 1 +大郎 1 +大部 1 +大都 1 +大釗 1 +大銘 1 +大門 1 +大雄 1 +大韓 1 +大馬 1 +大驚 1 +大體 1 +大鬧 1 +大黨 1 +大鼠 1 +天份 1 +天佐 1 +天使 1 +天倫之樂 1 +天元 1 +天安 1 +天寶樓 1 +天差地遠 1 +天性 1 +天悅 1 +天慶 1 +天才 1 +天母 1 +天河 1 +天涯 1 +天球 1 +天祐 1 +天窗 1 +天紀 1 +天翔 1 +天賜 1 +天賦 1 +天馬 1 +太傅 1 +太元 1 +太冷 1 +太初 1 +太后 1 +太宗 1 +太宰 1 +太尉 1 +太常 1 +太極 1 +太湖 1 +太炎 1 +太監 1 +太行 1 +太近 1 +太遠 1 +太郎 1 +夫仇 1 +夫妻 1 +央行 1 +失利 1 +失地 1 +失效 1 +失職 1 +失能 1 +失落 1 +失誤 1 +失蹤 1 +夷昧 1 +夾 1 +夾狀 1 +奇俠 1 +奇幻 1 +奇怪 1 +奇缺 1 +奈葉 1 +奉 1 +奉命 1 +奉安 1 +奉律 1 +奉新 1 +奉系 1 +奎德林堡 1 +奏 1 +奏鳴 1 +奕 1 +奕詝 1 +套出 1 +套用 1 +奢華 1 +奧伊 1 +奧克尼 1 +奧克蘭 1 +奧古斯丁 1 +奧姆 1 +奧得 1 +奧托 1 +奧斯卡 1 +奧斯威爾 1 +奧斯汀 1 +奧林匹亞絲 1 +奧林匹斯 1 +奧格斯堡 1 +奧爾滕 1 +奧爾登堡 1 +奧爾良 1 +奧特 1 +奧特伊 1 +奧的斯 1 +奧米加 1 +奧羽 1 +奧蒂洛 1 +奪去 1 +奬懲 1 +女人 1 +女傭 1 +女僕 1 +女優 1 +女友 1 +女嬰 1 +女水 1 +女版 1 +女生 1 +女眷 1 +奴役 1 +奶爸 1 +奸 1 +她倆 1 +好上 1 +好奇 1 +好手 1 +好氧 1 +好色 1 +如數 1 +妄圖 1 +妊娠 1 +妖怪 1 +妙 1 +妮科爾 1 +妮綺 1 +妳 1 +妹 1 +妹夫 1 +妻妹 1 +妻姐 1 +妻室 1 +姊姊 1 +始發 1 +始祖 1 +始稱 1 +始興 1 +姑娘 1 +姑母 1 +委內瑞拉 1 +委身 1 +姚里 1 +姥姥 1 +姦情 1 +姪女 1 +姿色 1 +威 1 +威光 1 +威嚇 1 +威塞克斯 1 +威斯特米思從 1 +威格莫爾 1 +威權 1 +威爾伯 1 +威爾歇 1 +威特 1 +威舍 1 +威靈頓 1 +娘 1 +娘家 1 +娜塔莉 1 +婁 1 +婆 1 +婆羅 1 +婚 1 +婚事 1 +婚宴 1 +婚禮 1 +婢女 1 +婦 1 +婷婷 1 +媒介 1 +媚娘 1 +嫁與 1 +嫘縈 1 +嫣然 1 +嬰孩 1 +子孫 1 +子文 1 +子球 1 +子程 1 +孕育 1 +孕酮 1 +字喃 1 +字幕 1 +字模 1 +字號 1 +存世 1 +存取 1 +存放 1 +孝感 1 +孝次 1 +孟 1 +孟加拉 1 +孟德爾 1 +季後 1 +季惟 1 +季風 1 +季龍 1 +孤島 1 +孤芳自賞 1 +孤身 1 +孩提 1 +學到 1 +學前 1 +學家 1 +學府二道 1 +學業 1 +學民 1 +學津 1 +學社 1 +學聯 1 +學苑 1 +宇航 1 +守備 1 +守孝 1 +守文 1 +守法 1 +守臣 1 +守謙 1 +守齋 1 +安二郎 1 +安妮 1 +安安 1 +安岳 1 +安徒生 1 +安得拉 1 +安得拉邦 1 +安德魯 1 +安托瓦內特 1 +安撫 1 +安放 1 +安東 1 +安樂 1 +安正 1 +安民 1 +安汶 1 +安然 1 +安營 1 +安理 1 +安納 1 +安聯 1 +安葬 1 +安蘭 1 +安達信 1 +安那瑞安 1 +安那罕 1 +宋國 1 +完好 1 +完畢 1 +宏偉 1 +宏坤 1 +宏聲 1 +宏道 1 +宏量 1 +宗偉 1 +宗憲 1 +宗谷 1 +宗龍 1 +官兵 1 +官司 1 +官府 1 +官服 1 +官腔 1 +官話 1 +官邸 1 +官長 1 +宙域 1 +定位 1 +定價 1 +定向 1 +定影 1 +定性 1 +定案 1 +定理 1 +定量 1 +宛城 1 +宜興 1 +客場 1 +客家 1 +客觀 1 +客貨運 1 +客輪 1 +客量 1 +宣 1 +宣判 1 +宣化 1 +宣帝 1 +宣誓 1 +室外 1 +室溫 1 +宦官 1 +宮人 1 +宮崎 1 +宰李 1 +宴席 1 +宴會 1 +家光 1 +家勁 1 +家務 1 +家外 1 +家奴 1 +家干 1 +家用 1 +家立 1 +家道中落 1 +家驤 1 +容 1 +容器 1 +容忍 1 +容許 1 +容量 1 +宿敵 1 +宿根 1 +寄存 1 +寄送 1 +寅成 1 +密 1 +密山 1 +密文 1 +密歇根 1 +密西西比 1 +密集 1 +富商 1 +富恩特德奧羅 1 +富翁 1 +富蘭克林 1 +富裕 1 +富豪 1 +富貴 1 +富邦 1 +察合台 1 +察哈爾 1 +察沃 1 +寡尿 1 +實 1 +實則 1 +實屬 1 +實情 1 +實戰 1 +實收 1 +實權 1 +實況 1 +實踐 1 +寧波 1 +審批 1 +審理 1 +審計 1 +審評 1 +審議 1 +寫下 1 +寫信 1 +寫入 1 +寫出 1 +寫字 1 +寫成 1 +寫進 1 +寬容 1 +寬度 1 +寬敞 1 +寬條 1 +寬順 1 +寮國 1 +寵物 1 +寵臣 1 +寶光 1 +寶劍 1 +寶如 1 +寶應 1 +寶殿 1 +寶玉 1 +寶田 1 +寶血 1 +寶雞 1 +寶雲 1 +寶麗金 1 +寺前 1 +封土 1 +封為 1 +封爵 1 +封穴 1 +封號 1 +封裝 1 +封路 1 +射失 1 +射程 1 +射箭 1 +射線 1 +射鵰 1 +將來 1 +將領 1 +專 1 +專任 1 +專制 1 +專吃 1 +專指 1 +專政 1 +專機 1 +專橫 1 +專欄 1 +專權 1 +專款 1 +專注 1 +專線 1 +專註 1 +專賣 1 +專長 1 +專項 1 +尊崇 1 +尊敬 1 +尊稱 1 +尋回 1 +尋親 1 +對上 1 +對付 1 +對撞 1 +對準 1 +對照 1 +對生 1 +對白 1 +對稱 1 +對立 1 +對簿公堂 1 +對話 1 +對面 1 +對飛 1 +導 1 +導入 1 +導出 1 +導向 1 +導彈 1 +導播 1 +導正 1 +導體 1 +小人 1 +小兔 1 +小刀 1 +小南 1 +小國 1 +小小 1 +小島 1 +小息 1 +小數 1 +小書 1 +小欖 1 +小水鴨 1 +小河兒 1 +小津 1 +小浪底 1 +小澤 1 +小片 1 +小生 1 +小田急 1 +小知 1 +小石 1 +小童 1 +小舖 1 +小虎 1 +小街 1 +小輪 1 +小野 1 +小隊 1 +小順 1 +小顏 1 +小風 1 +小體 1 +少兒 1 +少將 1 +少年 1 +少懷 1 +少林 1 +少見 1 +少許 1 +少量 1 +尖端 1 +尖酸 1 +尖頂 1 +尚州 1 +尚德 1 +尚方 1 +尚書 1 +尤利烏斯 1 +尤勒 1 +尤指 1 +尤里卡 1 +就此 1 +就熟 1 +就職 1 +尷尬 1 +尹 1 +尹氏 1 +尼克貝 1 +尼古丁 1 +尼古拉 1 +尼奧爾德 1 +尼師今 1 +尼庫瑙 1 +尼歐斯 1 +尼比魯 1 +尼爾 1 +尼爾斯 1 +尼爾馬爾 1 +尾 1 +尾巴 1 +尾柄 1 +尾隨 1 +尾鰭 1 +尾龍 1 +局勢 1 +局間 1 +居家 1 +居所 1 +居留 1 +居禮 1 +屆滿 1 +屋 1 +屋大薇 1 +屋宇 1 +屋頂 1 +屍 1 +屍體 1 +屏山 1 +屏東 1 +屏風 1 +展品 1 +展望 1 +展貿 1 +屠村 1 +屠龍 1 +層壓 1 +層次 1 +層疊 1 +層級 1 +層面 1 +履行 1 +屬國 1 +屬於 1 +屬靈 1 +屯南 1 +山下 1 +山內 1 +山口 1 +山地 1 +山姆 1 +山峰 1 +山崖 1 +山手 1 +山月 1 +山村 1 +山楂 1 +山猿 1 +山田 1 +山胞 1 +山葉 1 +山陵 1 +山麓 1 +山龍眼 1 +岐女短 1 +岐阜 1 +岐陽 1 +岑 1 +岔江 1 +岡恩 1 +岡本 1 +岩屋 1 +岩心 1 +岩手 1 +岩漿 1 +岳 1 +岳泰 1 +岷江 1 +岸川 1 +岸賈 1 +岸邊 1 +峯崎 1 +峰倉 1 +峰景 1 +島內 1 +島國 1 +島蚺 1 +峽 1 +峽灣 1 +峽谷 1 +崇善 1 +崇尚 1 +崇敬 1 +崎頭 1 +崔 1 +崔陂 1 +崗 1 +崗斜 1 +崙頂 1 +崞縣 1 +崩坍 1 +崩潰 1 +嵩祝 1 +巔峰 1 +川南 1 +川村 1 +川邊 1 +州界 1 +州舞 1 +巡査 1 +巢 1 +工事 1 +工務 1 +工序 1 +工廠 1 +工會 1 +工法 1 +工潮 1 +左右神策軍 1 +左岸 1 +左拉 1 +左派 1 +左膀 1 +左轉 1 +巨作 1 +巨像 1 +巨冊 1 +巨型 1 +巨石 1 +巨賈 1 +巨野 1 +巫師 1 +差 1 +差分 1 +差別 1 +差勁 1 +差會 1 +己二胺 1 +己巳 1 +己酉 1 +已故 1 +已晚 1 +已死 1 +巴 1 +巴亞莫 1 +巴克 1 +巴克禮 1 +巴列姆 1 +巴列斯特爾 1 +巴卑爾 1 +巴喬 1 +巴城 1 +巴塞 1 +巴塞羅那 1 +巴塞隆拿 1 +巴塞隆納 1 +巴孛許諾 1 +巴巴克 1 +巴庫 1 +巴思缽 1 +巴恩斯 1 +巴拉克 1 +巴拉尼 1 +巴斯克 1 +巴斯德 1 +巴斯蒂亞 1 +巴比 1 +巴爾虎 1 +巴爾齊蒂斯 1 +巴納夫 1 +巴納巴 1 +巴羅爾 1 +巴英額 1 +巴莫鱷 1 +巴蒂斯塔 1 +巴西利卡 1 +巴西班讓 1 +巴諾 1 +巴賽 1 +巴赫 1 +巴頓 1 +市售 1 +市縣 1 +市轄 1 +市面 1 +布 1 +布伯 1 +布倫努斯 1 +布列塔尼 1 +布哈林 1 +布宜諾斯艾利斯 1 +布拉亞斯 1 +布拉德 1 +布政 1 +布料 1 +布林 1 +布氏奇非鯽 1 +布爾 1 +布置 1 +布萊姆 1 +布蘭特福德 1 +布蘭登堡 1 +布賴滕費爾德 1 +布里奇曼 1 +布里斯托 1 +布里斯班 1 +布雷克 1 +布雷西亞 1 +布魯克林 1 +布魯斯 1 +帆布 1 +帆船 1 +希伯來 1 +希克森 1 +希爾曼 1 +希特勒 1 +希皮奧內 1 +希鵬 1 +帕克 1 +帕內爾 1 +帕搏 1 +帕爾曼 1 +帕特羅克洛斯 1 +帕米爾 1 +帕納辛奈克斯 1 +帕納辛納克斯 1 +帕維亞 1 +帕薩迪納 1 +帕西奧利 1 +帕迪恩 1 +帕金森 1 +帝王 1 +帝都 1 +師團 1 +師徒 1 +師從 1 +師父 1 +師生 1 +席勒 1 +帳目 1 +帶上 1 +帶出 1 +帶子 1 +帶少 1 +帶水 1 +常住 1 +常勝 1 +常客 1 +常態 1 +常春 1 +常春藤 1 +常盛 1 +常識 1 +常量 1 +常青 1 +常駐 1 +幀 1 +幅 1 +幅員遼闊 1 +幕 1 +幕府 1 +幕後 1 +幢 1 +幣原 1 +幪面 1 +幫主 1 +干王 1 +平反 1 +平和 1 +平地 1 +平坦 1 +平帝 1 +平常 1 +平手 1 +平日 1 +平林 1 +平沼 1 +平滑 1 +平臺 1 +平行 1 +平陵 1 +平陽 1 +年中 1 +年份 1 +年幼 1 +年息 1 +年第 1 +年老 1 +年號 1 +年資 1 +年青 1 +并行 1 +幸一 1 +幸好 1 +幸運 1 +幹 1 +幹事 1 +幹掉 1 +幹流 1 +幹道 1 +幼子 1 +幼年 1 +幼弟 1 +幼發拉底 1 +幼稚 1 +幼貓 1 +幼魚 1 +幼鯨 1 +幼鳥 1 +幽閣 1 +幾內亞 1 +幾十 1 +幾千 1 +幾多 1 +幾百 1 +床 1 +床鋪 1 +底冊 1 +底格里斯 1 +底比斯 1 +底片 1 +底特律 1 +底稿 1 +底質 1 +店家 1 +庚戌 1 +府中 1 +府城 1 +府尹 1 +府第 1 +度宗 1 +座位 1 +座右 1 +座座 1 +座椅 1 +座西 1 +座談 1 +庫伊瓦 1 +庫伊瓦涅米 1 +庫哈斯 1 +庫柏力克 1 +庫欣 1 +庫爾特 1 +庫賽 1 +庫赫莫 1 +庫迪尼奧 1 +庫頁 1 +庭園 1 +庭薺 1 +庭長 1 +康乃狄克 1 +康史 1 +康奈爾 1 +康子 1 +康寧 1 +康樂 1 +康濟鼐 1 +康福 1 +康科德 1 +康羅伊 1 +廂 1 +廉潔 1 +廚師 1 +廝守 1 +廟倉 1 +廟方 1 +廟橋 1 +廟鎮 1 +廢棄 1 +廢熱 1 +廢舊 1 +廣受 1 +廣大 1 +廣大興 1 +廣權 1 +廣澳 1 +廣稱 1 +廣金 1 +廬山 1 +廳局 1 +廳長 1 +延安 1 +延年益壽 1 +延音 1 +廷和 1 +廷尉 1 +建好 1 +建威 1 +建市 1 +建御名方 1 +建御雷 1 +建構 1 +建武 1 +建置 1 +建華 1 +建超 1 +廿五 1 +廿六 1 +弄到 1 +弄清 1 +弊案 1 +式微 1 +弓尾 1 +弓弦 1 +弓箭 1 +引來 1 +引咎 1 +引導 1 +引江 1 +引渡 1 +引申 1 +引資 1 +弗拉格斯塔夫 1 +弗朗丹 1 +弗朗恰 1 +弗朗索 1 +弗朗西絲 1 +弗格森 1 +弗洛伊德 1 +弗特 1 +弗蘭克 1 +弗里德里希 1 +弗里施 1 +弗里茨 1 +弘 1 +弘前 1 +弘宣 1 +弭兵 1 +弱 1 +張家口 1 +張氏 1 +強勁 1 +強化 1 +強拍 1 +強暴 1 +強權 1 +強求 1 +強盜 1 +強迫 1 +強韌 1 +強項 1 +彈劾 1 +彈塗魚 1 +彈撥 1 +彈盡糧絕 1 +彌撒 1 +彌補 1 +彌賽亞 1 +彎曲 1 +彗差 1 +彗星 1 +彙編 1 +彝 1 +形像 1 +形同 1 +形體 1 +彥根 1 +彥直 1 +彩 1 +彩畫 1 +彩繪 1 +彩雲 1 +彩鳳 1 +彪馬 1 +彭劉楊 1 +彭博倫 1 +彭古魯 1 +彭定康 1 +彭拿路 1 +彰信 1 +影帝 1 +影線 1 +影評 1 +影迷 1 +影集 1 +影音 1 +彷彿 1 +役 1 +彼特 1 +往上 1 +往世 1 +往日 1 +征西 1 +待到 1 +很小 1 +很強 1 +很忙 1 +很懶 1 +很是 1 +很深 1 +很遠 1 +很重 1 +很長 1 +律定 1 +後世 1 +後代 1 +後勤 1 +後南 1 +後周 1 +後宮 1 +後庄 1 +後悔 1 +後援 1 +後梁 1 +後段 1 +後母 1 +後稱 1 +後續 1 +後置 1 +後藤 1 +後送 1 +後防 1 +後齒 1 +徒具 1 +徒手 1 +得克薩斯 1 +得心應手 1 +得悉 1 +得獎 1 +得益 1 +從來 1 +從句 1 +從周 1 +從善如流 1 +從政 1 +御史 1 +御墨 1 +御宅 1 +御窯 1 +復健 1 +復合 1 +復寫 1 +復甦 1 +循道 1 +微型 1 +微妙 1 +微小 1 +微波 1 +微粒 1 +微粒體 1 +微觀 1 +微量 1 +徵兆 1 +徵招 1 +徵祥 1 +德勝 1 +德國牧羊犬 1 +德妃 1 +德宏德特 1 +德富卡 1 +德干 1 +德愛 1 +德懷 1 +德拉瓦 1 +德文 1 +德比 1 +德江 1 +德爾 1 +德爾加多 1 +德爾斐 1 +德甲 1 +德高 1 +德魯茲 1 +徽 1 +徽章 1 +心境 1 +心宿 1 +心意 1 +心智 1 +心目 1 +心肌 1 +必和必拓 1 +必走 1 +必需 1 +忍心 1 +忍氣吞聲 1 +志 1 +志摩 1 +志明 1 +志道 1 +忘 1 +忘記 1 +忙 1 +忠 1 +忠於 1 +忠誠 1 +快上 1 +快捷 1 +快綫 1 +忽 1 +忽視 1 +怎 1 +怒 1 +怕 1 +思侯 1 +思成 1 +思維 1 +思考 1 +怡 1 +急劇 1 +急忙 1 +急救 1 +急於 1 +急流 1 +急症 1 +急行 1 +性向 1 +性命 1 +性情 1 +性腺 1 +怪 1 +怪圈 1 +怪聲 1 +恆 1 +恆大 1 +恆德 1 +恆河 1 +恐嚇 1 +恐懼 1 +恢豐 1 +恣意 1 +恤 1 +恨 1 +恩南伽 1 +恩慈 1 +恩秀 1 +恩贈 1 +恭子 1 +息率 1 +悉心 1 +悉達多 1 +悟到 1 +悟空 1 +患 1 +患得患失 1 +患病 1 +您 1 +悲傷 1 +悲劇 1 +悲嘆 1 +悲慘 1 +悲痛 1 +悲痛欲絕 1 +悲鴻 1 +悼念 1 +情 1 +情不自禁 1 +情人 1 +情勢 1 +情愁 1 +情愛 1 +情景 1 +情結 1 +情誼 1 +情資 1 +情陷 1 +情願 1 +惇曧 1 +惟 1 +惠亞 1 +惠梨香 1 +惠特蘭 1 +惡 1 +惡人 1 +惡化 1 +惡夢 1 +惡性 1 +惡搞 1 +惡臭 1 +惡靈 1 +惡魔 1 +想必 1 +想起 1 +愈加 1 +愈大 1 +愈高 1 +愉快 1 +意圖 1 +意念 1 +意料 1 +意甲 1 +意魔 1 +愙威 1 +愚園 1 +愚昧 1 +愛好 1 +愛娜 1 +愛娜茲薇 1 +愛思德 1 +愛恨 1 +愛意 1 +愛慕 1 +愛明內斯庫 1 +愛樂 1 +愛河 1 +愛莎尼亞 1 +愛迪生 1 +愛默生 1 +感冒 1 +感謝 1 +慈湖 1 +慈濟 1 +慌亂 1 +慎 1 +慎太郎 1 +慕容 1 +慕肯 1 +慘叫 1 +慘重 1 +慚愧 1 +慢行 1 +慢駛 1 +慧嫻 1 +慰安 1 +慶 1 +慶典 1 +慶曆 1 +慶貽 1 +慶黎 1 +慷慨 1 +憂 1 +憂憤 1 +憲政 1 +憲民 1 +憲法 1 +憶蓮 1 +懂 1 +應付 1 +應允 1 +應屆 1 +應戰 1 +應昌 1 +應當 1 +應許 1 +應邀 1 +懲罰 1 +懶爪龍 1 +懷 1 +懷仁 1 +懷克里夫 1 +懷念 1 +懷慶 1 +懷抱 1 +懷水 1 +懷聖 1 +懸掛 1 +懼高 1 +懿 1 +戀人 1 +戀屍 1 +戀童 1 +戈德曼 1 +戈爾 1 +戈登 1 +戈矛 1 +戈蘭 1 +成事 1 +成仁 1 +成化 1 +成名 1 +成品 1 +成套 1 +成對 1 +成形 1 +成梁 1 +成行 1 +成語 1 +我國 1 +截 1 +截然不同 1 +截至 1 +截頜鯉 1 +戰事 1 +戰力 1 +戰勝 1 +戰地 1 +戰平 1 +戰情 1 +戰船 1 +戲子 1 +戲曲 1 +戲法 1 +戲碼 1 +戲謔 1 +戲院 1 +戴上 1 +戴克里先 1 +戴斯德 1 +戴爾馬 1 +戴維斯 1 +戴蒙 1 +戴頓 1 +戶田 1 +戶籍 1 +房東 1 +所為 1 +所長 1 +手上 1 +手工 1 +手感 1 +手抄 1 +手指 1 +手提 1 +手槍 1 +手稿 1 +手筆 1 +手腳 1 +手邊 1 +手風 1 +才子 1 +才是 1 +才智 1 +扎什倫布 1 +打亂 1 +打人 1 +打包 1 +打撈 1 +打死 1 +打水 1 +打牌 1 +打碎 1 +打造 1 +打響 1 +扔出 1 +托倫 1 +托加下 1 +托洛洛 1 +托盤 1 +托米 1 +托茂 1 +扣上 1 +批次 1 +扼止 1 +找來 1 +找續 1 +承天 1 +承德 1 +承接 1 +承斌 1 +承租 1 +技師 1 +技戰術 1 +技法 1 +抑制 1 +抑鬱 1 +抒解 1 +抓到 1 +投交 1 +投奔 1 +投標 1 +投球 1 +投身 1 +投靠 1 +抗大 1 +抗拒 1 +抗衡 1 +抗體 1 +折射 1 +折斷 1 +折衷 1 +抨擊 1 +披覆 1 +披頭士 1 +抬昇 1 +抱 1 +抱持 1 +抵受 1 +抵禦 1 +押韻 1 +抽檢 1 +抽煙 1 +抽象 1 +抽走 1 +拆分 1 +拆卸 1 +拆掉 1 +拆遷 1 +拉 1 +拉什沃思 1 +拉卜楞 1 +拉塞爾 1 +拉多加 1 +拉奏 1 +拉姆齊 1 +拉差諾 1 +拉布 1 +拉彼魯茲 1 +拉日色布 1 +拉林 1 +拉森 1 +拉爾夫 1 +拉特蘭 1 +拉珀斯維爾 1 +拉瑙 1 +拉籌伯 1 +拉美西斯 1 +拉薩 1 +拉西拉 1 +拉赫曼尼諾夫 1 +拋棄 1 +拋物 1 +拍 1 +拍照 1 +拍賣 1 +拒不 1 +拓務 1 +拓建 1 +拓撲 1 +拔刀 1 +拖進 1 +拖鞋 1 +拙劣 1 +招 1 +招潮蟹 1 +招生 1 +招聘 1 +招降 1 +拜仁慕尼黑 1 +拜拜 1 +括弧 1 +拱廊 1 +拱橋 1 +拳一 1 +拳擊 1 +拳賽 1 +拷問 1 +拼寫 1 +拾糞 1 +拿來 1 +持久 1 +持球 1 +指使 1 +指標 1 +指派 1 +指稱 1 +指責 1 +挑選 1 +挖 1 +挖子 1 +挖掘 1 +挪動 1 +挪用 1 +振 1 +振動 1 +振幅 1 +振林 1 +挹江 1 +挺身而出 1 +挽回 1 +挾持 1 +捉弄 1 +捉拿 1 +捉襟見肘 1 +捍衛 1 +捐 1 +捐款 1 +捐獻 1 +捕撈 1 +捕殺 1 +捕獵 1 +捕魚 1 +捕鼠 1 +捲入 1 +捷徑 1 +授勳 1 +授意 1 +授權 1 +授與 1 +掉頭 1 +掌 1 +掌控 1 +掌摑 1 +掌權 1 +掌鏡 1 +排場 1 +排外 1 +排序 1 +掙扎 1 +掛 1 +掛果 1 +掛牌 1 +掛鉤 1 +掠奪 1 +採 1 +採信 1 +採摘 1 +採樣 1 +採納 1 +採購 1 +採集 1 +採食 1 +探明 1 +探望 1 +探求 1 +探究 1 +探險 1 +接到 1 +接力 1 +接班 1 +接納 1 +接聽 1 +接見 1 +接辦 1 +接送 1 +接連 1 +控告 1 +控訴 1 +推介 1 +推免生 1 +推前 1 +推力 1 +推導 1 +推斷 1 +推測 1 +推演 1 +推特 1 +推理 1 +推舉 1 +推論 1 +推遲 1 +掩 1 +掩蓋 1 +描摹 1 +描繪 1 +提前 1 +提問 1 +提子 1 +提康德羅加 1 +提拔 1 +提攜 1 +提昇 1 +提煉 1 +提督 1 +提籃 1 +提醒 1 +插手 1 +插曲 1 +揚言 1 +換成 1 +換算 1 +握帶 1 +握持 1 +揭曉 1 +揭發 1 +揭開 1 +揮舞 1 +援 1 +援助 1 +援外 1 +援引 1 +援手 1 +援救 1 +搜尋 1 +搜狐 1 +搜羅 1 +搜集 1 +搞垮 1 +搞錯 1 +搬動 1 +搬往 1 +搬移 1 +搬遷 1 +搭乘 1 +搭配 1 +搶 1 +搶先 1 +搶劫 1 +搶奪 1 +搶救 1 +摒棄 1 +摔 1 +摘下 1 +摘星 1 +摘錄 1 +摧毀 1 +摩加迪沙 1 +摩天 1 +摩崖 1 +摩托 1 +摩擦 1 +摩爾多瓦 1 +摩登 1 +摩納哥 1 +摩西 1 +摯友 1 +摸摸 1 +撒拉 1 +撒營盤 1 +撞入 1 +撞死 1 +撤回 1 +撤職 1 +撤退 1 +撤除 1 +撥 1 +撥出 1 +撥號 1 +撫養 1 +播種 1 +撮合 1 +撰述 1 +撲克 1 +撿 1 +撿起 1 +擁 1 +擁堵 1 +擁戴 1 +擁擠 1 +擁護 1 +擂台 1 +擊中 1 +擊劍 1 +擊斃 1 +擊毀 1 +擊潰 1 +擊破 1 +擋住 1 +操 1 +操控 1 +操縱 1 +擒拿 1 +擔憂 1 +擔竿 1 +擔綱 1 +據傳 1 +據此 1 +據稱 1 +據點 1 +擠塞 1 +擠壓 1 +擠奶 1 +擠眉弄眼 1 +擠迫 1 +擢升 1 +擬 1 +擬桿菌 1 +擬訂 1 +擬議 1 +擴散 1 +擴編 1 +擺弄 1 +擺渡 1 +擾亂 1 +攀爬 1 +攔截 1 +攝像 1 +攝取 1 +攪拌 1 +支取 1 +支廳 1 +支派 1 +支那 1 +支隊 1 +收場 1 +收容 1 +收市 1 +收支 1 +收生 1 +收益 1 +收租 1 +收緊 1 +收聽 1 +收買 1 +收費 1 +收養 1 +攸之 1 +改作 1 +改屬 1 +改投 1 +改採 1 +改換 1 +改派 1 +改發 1 +改穿 1 +改組 1 +改選 1 +改隸 1 +攻下 1 +攻勢 1 +攻堅 1 +攻方 1 +攻殺 1 +攻訐 1 +攻讀 1 +放任 1 +放入 1 +放出 1 +放到 1 +放大 1 +放榜 1 +放牧 1 +放緩 1 +放送 1 +放逐 1 +放開 1 +放鬆 1 +政團 1 +政委 1 +政局 1 +政廳 1 +政敵 1 +政樞 1 +政法 1 +政爭 1 +政界 1 +故郷 1 +效尤 1 +效能 1 +敏銳 1 +救人 1 +救出 1 +救助 1 +救國 1 +救援 1 +救星 1 +救災 1 +救生 1 +救贖 1 +敕 1 +敕令 1 +敕書 1 +敗 1 +敗局 1 +敗死 1 +敗瓦 1 +敗退 1 +教務 1 +教士 1 +教室 1 +教席 1 +教材 1 +教案 1 +教科 1 +教籍 1 +教總 1 +教義 1 +教職員 1 +散射 1 +敦 1 +敦煌 1 +敬仰 1 +敬堯 1 +敬請 1 +敲擊 1 +敲訂 1 +整 1 +整塊 1 +整所 1 +整架 1 +整片 1 +整篇 1 +整軍 1 +整顆 1 +整齊 1 +敵兵 1 +敵方 1 +數以千計 1 +數值 1 +數十億 1 +數十萬 1 +數澤 1 +數百 1 +數碼 1 +數萬 1 +數論 1 +文哲 1 +文姬 1 +文岳 1 +文巨 1 +文德 1 +文摘 1 +文政 1 +文書 1 +文本 1 +文楷 1 +文武 1 +文法 1 +文清 1 +文職 1 +文賢 1 +文集 1 +文飾曲口魚 1 +文體 1 +文體教 1 +斑塊 1 +斑點 1 +斗貴子 1 +料 1 +斜 1 +斜坡 1 +斥教 1 +斬落 1 +斯佩克特 1 +斯凱勒 1 +斯哥特 1 +斯坦利 1 +斯坦福 1 +斯坦頓 1 +斯基龍 1 +斯塔茨門 1 +斯尼夫魯 1 +斯德哥爾摩 1 +斯托克 1 +斯氏亞冠龍 1 +斯洛伐克 1 +斯洛特 1 +斯特奇斯 1 +斯特萊默 1 +斯瓦爾恩 1 +斯科特 1 +斯維亞托斯拉夫 1 +斯里賽拉姆古德姆德瓦斯塔納姆 1 +新任 1 +新修 1 +新址 1 +新埔 1 +新太郎 1 +新奧爾良 1 +新字 1 +新寧 1 +新屋 1 +新巴 1 +新思 1 +新昌 1 +新明 1 +新春 1 +新月 1 +新核 1 +新榮 1 +新民 1 +新浪 1 +新版 1 +新生 1 +新秀 1 +新篇 1 +新編 1 +新罕布夏 1 +新罕布希爾 1 +新義 1 +新舊 1 +新製 1 +新開 1 +新飛 1 +新馬 1 +新高 1 +新鴻基 1 +新黨 1 +斷後 1 +斷盡 1 +斷言 1 +方丈 1 +方尖 1 +方正 1 +方田 1 +方石 1 +方程 1 +方蓋 1 +方蟹 1 +於維西 1 +施奈德 1 +施文 1 +施瓦本 1 +施用 1 +施韋比施哈爾 1 +旅 1 +旅居 1 +旅程 1 +旋渦 1 +旋轉 1 +族雄 1 +族頭 1 +旗艦 1 +旗面 1 +既得 1 +既是 1 +既然 1 +日出 1 +日向 1 +日夜 1 +日子 1 +日日 1 +日照 1 +日用 1 +日落 1 +日誌 1 +日賜 1 +旦增 1 +早有 1 +早餐 1 +旭 1 +旱災 1 +旻寧 1 +昆丁 1 +昆蟲 1 +昌吉 1 +昌都 1 +明中 1 +明亞 1 +明亮 1 +明代 1 +明宗 1 +明尼蘇達 1 +明憲 1 +明昌 1 +明智 1 +明正 1 +明潭 1 +明白 1 +明碁 1 +明視 1 +易卜拉欣 1 +易守 1 +易幟 1 +易斯 1 +易水 1 +易燃 1 +易經 1 +昔蘭尼 1 +星團 1 +星塵 1 +星展 1 +星崎 1 +星系 1 +映像 1 +春 1 +春丕 1 +春季 1 +春日井 1 +春會 1 +春田 1 +春節 1 +春緋 1 +春耕 1 +昨日 1 +昭侯 1 +昭儀 1 +昭宗 1 +昭禮 1 +昭通 1 +是年 1 +是方 1 +是次 1 +時事 1 +時份 1 +時值 1 +時光 1 +時刻 1 +時報 1 +時弊 1 +時稱 1 +時舉 1 +時針 1 +晃動 1 +晉 1 +晉北 1 +晉哲 1 +晉江 1 +晉級 1 +晒乾 1 +晨間 1 +普世 1 +普什圖 1 +普伊瑪諾娃 1 +普利茅斯 1 +普朗克 1 +普爾塔龍 1 +景泰 1 +晴神 1 +晶 1 +晶瑩 1 +晶閘 1 +智伯 1 +智利 1 +智趣 1 +暑期 1 +暖 1 +暗中 1 +暗喻 1 +暗影 1 +暗房 1 +暗指 1 +暗礁 1 +暗紅 1 +暗號 1 +暫 1 +暫別 1 +暫無 1 +暮光 1 +暱稱 1 +暴亂 1 +暴斂 1 +暴死 1 +暴風雪 1 +暹羅 1 +曄之 1 +曉彬 1 +曉得 1 +曉聲 1 +曉舟 1 +曖昧 1 +曬相 1 +曬衣 1 +曲張 1 +曲率 1 +曲目 1 +曲線 1 +曲藝 1 +曲阜 1 +曲頜形翼龍 1 +更低 1 +更佳 1 +更大 1 +更審 1 +更小 1 +更強 1 +更快 1 +更新世 1 +更是 1 +更硬 1 +更衣 1 +更輕 1 +更長 1 +曷懶甸 1 +書本 1 +書裡 1 +書迷 1 +書面 1 +書香世家 1 +曹家 1 +曹甸 1 +曹記 1 +曼切華 1 +曼哈頓 1 +曼城 1 +曼寧 1 +曼徹斯特 1 +曼成 1 +曼斯菲爾德 1 +曼海姆 1 +曼涅托 1 +曼玉 1 +曼科 1 +曾任 1 +曾孫 1 +曾愛 1 +曾祖父母 1 +替人 1 +最內 1 +最前 1 +最受 1 +最外 1 +最強 1 +最旺 1 +最最 1 +最末 1 +最東 1 +最純 1 +最遠 1 +會上 1 +會址 1 +會師 1 +會戰 1 +會所 1 +會晤 1 +會章 1 +會見 1 +會計 1 +月色 1 +月薪 1 +有份 1 +有別 1 +有力 1 +有名 1 +有愛 1 +有方 1 +有期 1 +有染 1 +有條不紊 1 +有異 1 +有病 1 +有稱 1 +有花 1 +有點 1 +服刑 1 +朔 1 +朗豪 1 +朗頓 1 +望族 1 +朝下 1 +朝元 1 +朝政 1 +朝散 1 +朝東 1 +朝聖 1 +朝覲 1 +朝貢 1 +朝陽 1 +期刊 1 +木中 1 +木乃伊 1 +木刻 1 +木卡姆 1 +木城 1 +木尼 1 +木屋 1 +木工 1 +木戶 1 +木斯塘 1 +木村 1 +木櫾 1 +木蘭 1 +木造 1 +未入 1 +未敢 1 +未有 1 +未深 1 +未滿 1 +末端 1 +本劇 1 +本名 1 +本城 1 +本始 1 +本季 1 +本島 1 +本市 1 +本德 1 +本書 1 +本營 1 +本目 1 +本省 1 +本社 1 +本縣 1 +本能 1 +本著 1 +本郡 1 +本部 1 +本鄉 1 +本集 1 +本領 1 +札幌 1 +朱里 1 +朴次茅斯 1 +杉並 1 +李察 1 +杏子 1 +材 1 +材官 1 +材質 1 +村旁 1 +杖責 1 +杜乃爾 1 +杜伊 1 +杜利華 1 +杜成 1 +杜浦 1 +杜甫 1 +杜蘭戈維多利亞 1 +杜隆坦 1 +束 1 +杯賽 1 +杰仔 1 +東主 1 +東加 1 +東勝 1 +東南亞 1 +東坡 1 +東姑 1 +東宮 1 +東尼 1 +東岸 1 +東巡 1 +東急 1 +東支 1 +東昇 1 +東映 1 +東桑 1 +東條 1 +東武 1 +東涌 1 +東渡 1 +東直 1 +東站 1 +東興 1 +東華 1 +東西向 1 +東距 1 +東道 1 +東邊 1 +東郊 1 +東鄉 1 +東鐵 1 +東隧 1 +東風 1 +松下 1 +松坂 1 +松山 1 +松島 1 +松州 1 +松翔 1 +松花 1 +松鼠 1 +板 1 +板式 1 +林克 1 +林地 1 +林場 1 +林業 1 +林檎 1 +林翼 1 +林胡 1 +果然 1 +果真 1 +果酒 1 +枝葉 1 +架次 1 +枸杞 1 +柏 1 +柏加 1 +柏村 1 +柏松 1 +柏臣 1 +染手 1 +染病 1 +柔道 1 +柚木 1 +柝聲 1 +查找 1 +查普曼 1 +查氏 1 +查爾頓 1 +查理曼 1 +柬 1 +柬埔寨 1 +柯克伍德 1 +柯林斯 1 +柯爾 1 +柯爾克孜 1 +柯爾貝爾 1 +柱銘 1 +柳川 1 +柳州 1 +柳德米拉 1 +柳葉魚 1 +柴電 1 +柿本 1 +栗橋 1 +校呔 1 +校簿 1 +校門 1 +栩栩如生 1 +株 1 +株式 1 +核孔 1 +核實 1 +核工 1 +核彈 1 +核發 1 +核研 1 +核算 1 +根 1 +根培烏孜 1 +根深柢固 1 +根生 1 +根莖 1 +根部 1 +格丁尼亞 1 +格仔 1 +格但斯克 1 +格來 1 +格勞庇烏 1 +格勞賓登 1 +格奧爾格 1 +格子 1 +格式塔 1 +格拉博夫斯基 1 +格拉漢姆 1 +格林威治 1 +格林布希 1 +格羅先 1 +格羅夫納 1 +格羅希 1 +格蘭特 1 +格陵蘭 1 +格魯 1 +格魯瓊茲與姆瓦瓦 1 +桂陵 1 +桃子 1 +框架 1 +框線 1 +案例 1 +案達羅 1 +桐生 1 +桑德威斯狸藻 1 +桑托斯 1 +桓子 1 +桓玄 1 +梁贊諾夫 1 +梁龍 1 +梅園 1 +梅塔 1 +梅塔拉 1 +梅帕器 1 +梅里納 1 +梓里 1 +條款 1 +條紋 1 +梧州 1 +梨花 1 +梭羅 1 +梯隊 1 +梳 1 +梳頜翼龍 1 +梵安 1 +棉條 1 +棋局 1 +棋盤 1 +棋聖 1 +棋院 1 +棋類 1 +棒 1 +棒錘樹 1 +棕色 1 +棕褐 1 +森德靈 1 +棲地 1 +棲身 1 +棵 1 +植株 1 +椎名 1 +椰林 1 +楓樹 1 +楚克 1 +楚瑜 1 +楚紅 1 +楠桂 1 +楠溪 1 +業主 1 +業餘 1 +極北 1 +極區 1 +極少 1 +極為 1 +極矮 1 +極長 1 +極闊 1 +極限 1 +楷書 1 +楷模 1 +概要 1 +榆林 1 +榔頭 1 +榕樹 1 +榜羅 1 +榨出 1 +榫眼 1 +榮廷 1 +榮洲 1 +榮茂 1 +榴彈 1 +構思 1 +構造 1 +槍尖 1 +槍尾 1 +槍殺 1 +槍術 1 +槳 1 +樂園 1 +樂安 1 +樂官 1 +樂山 1 +樂師 1 +樂手 1 +樂敏錠 1 +樂樂 1 +樂活 1 +樂翠 1 +樂觀 1 +樂趣 1 +樓宇 1 +樓層 1 +樓底 1 +樓煩 1 +樓盤 1 +樓面 1 +樓高 1 +標 1 +標售 1 +標志 1 +標明 1 +標有 1 +標示 1 +標籤 1 +標記 1 +標註 1 +標高 1 +樞密 1 +模里西斯 1 +樣 1 +樣品 1 +樣式 1 +樣貌 1 +樸實 1 +樹上 1 +樹幹 1 +樹枝 1 +橈腳 1 +橋上 1 +橋樑 1 +橋面 1 +機上 1 +機位 1 +機型 1 +機密 1 +機師 1 +機床 1 +機敏 1 +機械 1 +機理 1 +機種 1 +機能 1 +機製 1 +機遇 1 +橡樹 1 +橡樹龍 1 +橢 1 +橫 1 +橫帶 1 +橫徵 1 +橫渡 1 +橫線 1 +檔案 1 +檔次 1 +檜山 1 +檢驗 1 +檨仔林 1 +檳榔 1 +檸七 1 +櫃 1 +櫃檯 1 +櫟社 1 +欄目 1 +權氏 1 +權限 1 +次席 1 +次月 1 +次生 1 +次程 1 +欣快 1 +欺 1 +欽 1 +款式 1 +歆 1 +歌人 1 +歌壇 1 +歌星 1 +歌舞 1 +歌詞 1 +歌頌 1 +歐律狄刻 1 +歐斯巴特 1 +歐盟 1 +歐羅巴 1 +歐青 1 +歐麥爾 1 +歡 1 +歡慶 1 +歡樂 1 +正值 1 +正傳 1 +正夫 1 +正子 1 +正宇 1 +正巧 1 +正平 1 +正比 1 +正派 1 +正版 1 +正當 1 +正經 1 +正負粒子 1 +正配 1 +正陽 1 +此事 1 +此地 1 +此夢 1 +此書 1 +此樓 1 +此橋 1 +此片 1 +此處 1 +此語 1 +此起彼落 1 +此路 1 +此陵 1 +此項 1 +此魚 1 +步伐 1 +步蟾 1 +步行 1 +步驟 1 +武克希 1 +武力 1 +武威 1 +武帝 1 +武廟 1 +武廠 1 +武德 1 +武打 1 +武王 1 +武略 1 +武皇 1 +武者 1 +武藏 1 +歩 1 +歲月 1 +歷代 1 +歷來 1 +歷屬 1 +歷程 1 +歸來 1 +歸入 1 +歸到 1 +歸功 1 +歸咎 1 +歸案 1 +歸還 1 +歸附 1 +死刑 1 +死因 1 +死地 1 +死戰 1 +死期 1 +死板 1 +死狀 1 +死而復生 1 +死黨 1 +殉教 1 +殉爆 1 +殉職 1 +殊榮 1 +殘疾 1 +殘破 1 +殘遺 1 +殘部 1 +殲滅 1 +殺人 1 +殺手 1 +殺機 1 +殼層 1 +殼體 1 +殿堂 1 +毀壞 1 +毀容 1 +毅 1 +毅仁 1 +毅然 1 +母會 1 +母校 1 +母狼 1 +母猴 1 +母艦 1 +母語 1 +母貓 1 +毎年 1 +每元 1 +每座 1 +每戶 1 +每所 1 +每枚 1 +每每 1 +每股 1 +每邊 1 +每集 1 +每鼎 1 +毒​​物 1 +毒品 1 +毒死 1 +毒癮 1 +毒舌 1 +毓林 1 +毓楓 1 +毓芳 1 +比亞迪 1 +比亞韋斯托克 1 +比利 1 +比利牛斯 1 +比哈爾 1 +比喻 1 +比得哥什 1 +比方 1 +比武 1 +比薩 1 +比袍 1 +比褂 1 +毛色 1 +毛髮 1 +毫安 1 +毫無 1 +毯子 1 +氈幕 1 +民事 1 +民俗 1 +民力 1 +民居 1 +民工 1 +民心 1 +民意 1 +民房 1 +民柬 1 +民權 1 +民法 1 +民盟 1 +民答那峨 1 +民航 1 +民英 1 +民謠 1 +民豐 1 +民選 1 +民鐸 1 +民防 1 +氘 1 +氚 1 +氣息 1 +氣態 1 +氣憤 1 +氣旋 1 +氣槍 1 +氣死 1 +氣溫 1 +氣燄 1 +氣胸 1 +氣象 1 +氦 1 +氧化鐵 1 +氨基酸 1 +氫 1 +氫化氦 1 +氫氣 1 +氫鍵 1 +氮 1 +氮素 1 +氯化 1 +氯化氫 1 +氯化銠 1 +氯化鋁 1 +氯雷他定 1 +水世 1 +水份 1 +水圈 1 +水壓 1 +水床 1 +水扁 1 +水攻 1 +水晶 1 +水汽 1 +水流 1 +水火不容 1 +水球 1 +水產 1 +水療 1 +水翼 1 +水能 1 +水警 1 +水面 1 +水鳥 1 +永久 1 +永元 1 +永升 1 +永吉 1 +永和 1 +永壽 1 +永平 1 +永成 1 +永昌 1 +永樂 1 +永樂環 1 +永權 1 +永續 1 +永輝 1 +永靖 1 +汁液 1 +求 1 +求偶 1 +求出 1 +求助 1 +求問 1 +求婚 1 +求情 1 +求援 1 +求籤 1 +求醫 1 +汝寧 1 +汞柱 1 +江協 1 +江口 1 +江浙 1 +江海 1 +江源 1 +江漢 1 +江灣 1 +江谷 1 +江都 1 +江閣 1 +江魚 1 +池塘 1 +池田 1 +污損 1 +污點 1 +汪 1 +汪達 1 +汪達爾 1 +汲及 1 +決意 1 +決擇 1 +決然 1 +決裂 1 +汽油 1 +汽船 1 +沃奎茲 1 +沃季采 1 +沃州 1 +沃思 1 +沃斯托克 1 +沃爾 1 +沃羅涅日 1 +沈氏 1 +沉水 1 +沉迷 1 +沉重 1 +沉降 1 +沒能 1 +沒落 1 +沒錯 1 +沖之 1 +沖片 1 +沖走 1 +沙丘 1 +沙依 1 +沙崙 1 +沙巴 1 +沙普爾 1 +沙梁伐 1 +沙池 1 +沙洛蒙 1 +沙漠 1 +沙瓦納 1 +沙田 1 +沙畹 1 +沙蠶 1 +沙迦罕 1 +沙邦 1 +沙里亞 1 +河卡 1 +河圖 1 +河岸 1 +河心 1 +河段 1 +河漫 1 +河西 1 +油煙 1 +油田 1 +油菜 1 +油量 1 +油電 1 +治中 1 +治勲 1 +治勳 1 +治喪 1 +治國 1 +治學 1 +治水 1 +治理 1 +治軍 1 +沼 1 +沽渚 1 +沾解 1 +沿 1 +沿線 1 +沿襲 1 +沿途 1 +泉 1 +法令 1 +法師 1 +法拉利 1 +法拉龍 1 +法政 1 +法斯塔夫 1 +法格拿 1 +法比恩 1 +法海 1 +法登 1 +法羅 1 +法老 1 +法蘭克尼亞 1 +法西斯 1 +法輪 1 +泛濫 1 +泠 1 +波包 1 +波卡特洛 1 +波及 1 +波因 1 +波圖 1 +波城 1 +波塞冬 1 +波形 1 +波恩 1 +波折 1 +波普 1 +波森 1 +波爾 1 +波特威瑟 1 +波特蘭 1 +波瓦坦 1 +波的尼亞 1 +波西斯 1 +波錠 1 +波黑 1 +泥土 1 +泥潭 1 +注資 1 +泰 1 +泰共 1 +泰勒 1 +泰北 1 +泰始 1 +泰姬 1 +泰姬瑪哈 1 +泰州 1 +泰曾 1 +泰然 1 +泰琳達 1 +泰米爾納德 1 +泰興 1 +泳屋 1 +泳灘 1 +洋介 1 +洗劫 1 +洗衣 1 +洛佩斯 1 +洛加尼斯 1 +洛城 1 +洛夫喬伊 1 +洛夫森 1 +洛布尼亞 1 +洛恩 1 +洛書 1 +洛珊 1 +洛維爾 1 +洛茲 1 +洛雷托 1 +洞子 1 +洞穴 1 +洞窟 1 +津 1 +津貼 1 +洩慾 1 +洩漏 1 +洪堡 1 +洪家 1 +洪橋 1 +洵 1 +洵美 1 +活出 1 +活化 1 +活埋 1 +活水 1 +活潑 1 +活用 1 +活躍 1 +活靈活現 1 +派對 1 +派往 1 +流 1 +流下 1 +流亡 1 +流入 1 +流出 1 +流嶼 1 +流放 1 +流星 1 +流標 1 +流民 1 +流水 1 +流浪 1 +流產 1 +流程 1 +流言 1 +流逝 1 +流露 1 +浚稽 1 +浦市 1 +浦那 1 +浦鎮 1 +浪 1 +浪漫 1 +浪潮 1 +浪費 1 +浪跡 1 +浮 1 +浮動 1 +浴場 1 +海事 1 +海光 1 +海因茨 1 +海地 1 +海峰 1 +海布隆 1 +海平 1 +海廷 1 +海德克 1 +海怡 1 +海昌 1 +海景 1 +海淀 1 +海港 1 +海濱 1 +海灘 1 +海爾賽 1 +海神 1 +海秀 1 +海老名 1 +海航 1 +海藍 1 +海螺 1 +海豐 1 +海陸 1 +海風 1 +海鷗 1 +浸染 1 +浸泡 1 +涅爾皮奇耶 1 +涇波 1 +涇陽 1 +消極 1 +消耗 1 +消退 1 +消除 1 +涉世 1 +涉嫌 1 +涉足 1 +涪江 1 +涮煮 1 +液 1 +液化 1 +液壓 1 +涵蓋 1 +淄川 1 +淑妃 1 +淑怡 1 +淘寶 1 +淘金 1 +淡 1 +淡定 1 +淡色 1 +淨土 1 +淪 1 +淪落 1 +淪陷 1 +淫蕩 1 +淮南 1 +淮許 1 +深受 1 +深埋 1 +深層 1 +深度 1 +深感 1 +深有 1 +深海 1 +深港 1 +深溪 1 +深紅 1 +深綠 1 +深色 1 +深處 1 +深造 1 +淵源 1 +混 1 +混亂 1 +混凝 1 +混沌 1 +混為一談 1 +混燃 1 +淹浸 1 +淺 1 +淺水 1 +淺綠 1 +添丁 1 +清償 1 +清凈 1 +清單 1 +清帝 1 +清拆 1 +清教 1 +清文 1 +清明 1 +清潔 1 +清理 1 +清道 1 +清遠 1 +清還 1 +清鄉 1 +減低 1 +減刑 1 +減小 1 +減退 1 +渠子 1 +渡 1 +渣打 1 +渤海 1 +測繪 1 +渭州 1 +港交 1 +港區 1 +港府 1 +渴求 1 +游 1 +游標 1 +游說 1 +渾 1 +湄洲 1 +湖上 1 +湖人 1 +湖名 1 +湖畔 1 +湘南 1 +湘西 1 +湘陰 1 +湛恩 1 +湧現 1 +湮滅 1 +湯姆萊利 1 +湯料 1 +源於 1 +源田 1 +準 1 +準基 1 +準將 1 +準確 1 +溝 1 +溝壑 1 +溝齒鼩 1 +溢漏 1 +溪 1 +溪水 1 +溪美 1 +溪鱂 1 +溫哥華 1 +溫坡 1 +溫布萊 1 +溫布頓 1 +溫徹斯特 1 +溫斯頓 1 +溫柔 1 +溫特夸特斯 1 +溫特斯 1 +溶劑 1 +溶氣 1 +滅 1 +滑板 1 +滑稽 1 +滑鼠 1 +滕氏 1 +滙業 1 +滬江 1 +滯洪 1 +滲出 1 +滴下 1 +滾動 1 +滾石 1 +滿意 1 +滿清 1 +滿載 1 +漁村 1 +漁梁 1 +漁船 1 +漂浮 1 +漆器 1 +演 1 +演成 1 +演戲 1 +演技 1 +演繹 1 +演義 1 +演講 1 +漢中 1 +漢娜 1 +漢字 1 +漢桓 1 +漫漶 1 +漫長 1 +漬 1 +漱芳 1 +漲幅 1 +漸變 1 +漸趨 1 +潑 1 +潔瑩 1 +潘丘 1 +潘恩 1 +潛伏 1 +潛力 1 +潛望 1 +潛水 1 +潛游 1 +潟湖 1 +潢川 1 +潭村 1 +潭東 1 +潭陽 1 +潰散 1 +澀谷 1 +澤尻 1 +激勵 1 +激發 1 +激素 1 +激進 1 +濁 1 +濃 1 +濃厚 1 +濃煙 1 +濕地 1 +濟 1 +濟世 1 +濟科 1 +濟邦 1 +濤 1 +濫用 1 +濱海 1 +濾掉 1 +瀏陽 1 +瀕危 1 +瀘溪 1 +瀝泗 1 +瀟洒 1 +火上加薪 1 +火候 1 +火喉 1 +火山 1 +火心 1 +火掌 1 +火炮 1 +火爆 1 +火鍋 1 +灰棕 1 +灰雲 1 +灰黑 1 +災禍 1 +炎熱 1 +炙手可熱 1 +炭疽 1 +炮 1 +炸彈 1 +炸死 1 +炸毀 1 +炸糕 1 +為時 1 +烈格司 1 +烏代 1 +烏來杜鵑 1 +烏孜別克 1 +烏宗哈珊 1 +烏干達 1 +烏德特 1 +烏扎 1 +烏拉圭 1 +烏普薩拉 1 +烏腳 1 +烏魯木齊 1 +烴 1 +烹煮 1 +焊接 1 +焗豆 1 +焚 1 +焚屍 1 +焚燒 1 +焜耀 1 +無俚頭 1 +無危 1 +無厭 1 +無子 1 +無家可歸 1 +無心 1 +無忌 1 +無所不能 1 +無暇 1 +無有 1 +無機 1 +無氧 1 +無水氯化鋁 1 +無派 1 +無產 1 +無疑 1 +無盡 1 +無罪 1 +無能為力 1 +無與倫比 1 +無色 1 +無處 1 +無視 1 +無誤 1 +無過 1 +無量壽 1 +無關緊要 1 +無限 1 +無雙 1 +無頭 1 +無點 1 +無齒龍 1 +焦尼 1 +焦點 1 +煉油 1 +煉金 1 +煙 1 +煙囪 1 +煙槍 1 +煙霧 1 +煜全 1 +煤建 1 +煤氣 1 +煥 1 +煦 1 +照射 1 +煮 1 +煮食 1 +煽動 1 +熄匙 1 +熊族 1 +熊本 1 +熊隊 1 +熏烤 1 +熏陶 1 +熔化 1 +熔岩 1 +熟知 1 +熟釜 1 +熱值 1 +熱刺 1 +熱力 1 +熱心 1 +熱愛 1 +熱羅姆 1 +熱身 1 +熱量 1 +熱電 1 +熱鬧 1 +熾熱 1 +燁 1 +燃氣 1 +燈謎 1 +燒灼 1 +燒荒 1 +燕 1 +燕窩 1 +營口 1 +營團 1 +營地 1 +營寨 1 +營帳 1 +營火 1 +營造 1 +營長 1 +營養 1 +燦爛 1 +燭光 1 +燾 1 +爐 1 +爪部 1 +爬到 1 +爬山 1 +爬梯 1 +爭冠 1 +爭占 1 +爭吵 1 +爭奪 1 +爭寵 1 +爭得 1 +爭界 1 +爭相 1 +爭端 1 +爭競 1 +爭論 1 +爭鬥 1 +父風 1 +爸爸 1 +爺 1 +爺爺 1 +爽文 1 +爾炘 1 +牆 1 +牆上 1 +牆身 1 +牆面 1 +片劑 1 +片尾 1 +片斷 1 +片頭 1 +版主 1 +版畫 1 +牌照 1 +牙籤 1 +牙線 1 +牙薩克 1 +牙醫 1 +牛池 1 +牛潭尾 1 +牛石 1 +牛首 1 +牛鼻栓 1 +牟 1 +牟利 1 +牟合 1 +牠 1 +牡蠣 1 +牧 1 +牧區 1 +牧民 1 +牧羊 1 +牧谷 1 +物件 1 +物產 1 +物象 1 +物鏡 1 +物阜 1 +牲畜 1 +特備 1 +特優 1 +特務 1 +特區 1 +特工 1 +特快 1 +特意 1 +特拉華 1 +特攝 1 +特派 1 +特爾瑪 1 +特瓦史塔 1 +特產 1 +特異 1 +特菲爾 1 +特重 1 +特隆赫姆 1 +特雷格羅恩 1 +牽引 1 +牽牛花 1 +犧牲 1 +犬科 1 +犬種 1 +犬髖 1 +犯人 1 +狂亂 1 +狄 1 +狄拉克 1 +狐 1 +狐庸 1 +狡猾 1 +狸藻 1 +狹小 1 +狼人 1 +狼堡 1 +狼影 1 +狼群 1 +猜忌 1 +猜想 1 +猝死 1 +猴年 1 +猴群 1 +猶大 1 +獅子 1 +獎牌 1 +獎盃 1 +獨一無二 1 +獨具 1 +獨唱 1 +獨孤 1 +獨家 1 +獨有 1 +獨眠 1 +獨行 1 +獨資 1 +獲准 1 +獲判 1 +獲勳 1 +獲召 1 +獲悉 1 +獲授 1 +獲獎 1 +獲益 1 +獲薦 1 +獲選 1 +獲頒 1 +獵物 1 +獸人 1 +獸族 1 +獻 1 +獻上 1 +獻堂 1 +獻策 1 +獻議 1 +玄天 1 +玄宗 1 +玄武 1 +玄策 1 +玄貓 1 +玉柴 1 +玉純 1 +玉魔 1 +玉鳳花 1 +玉麟 1 +王儲 1 +王冠 1 +王墓 1 +王宮 1 +王座 1 +王爾德 1 +王蓮 1 +玩伴 1 +玩弄 1 +玩法 1 +玩笑 1 +玫瑰 1 +玲玲 1 +玷染 1 +珀斯 1 +珍寶 1 +珠 1 +珠璣 1 +珠鋼 1 +班克斯 1 +班卓 1 +班子 1 +班布里奇 1 +班機 1 +班次 1 +班禪 1 +班級 1 +現役 1 +現身 1 +球壇 1 +球差 1 +球星 1 +球根 1 +球狀 1 +球道 1 +球面 1 +琅 1 +理性 1 +理由 1 +琦 1 +琬 1 +琳 1 +琳達 1 +琴弓 1 +琺琅 1 +瑋 1 +瑛 1 +瑜伽 1 +瑞普肯 1 +瑞欽 1 +瑞霖 1 +瑟洛 1 +瑣法 1 +瑪 1 +瑪利 1 +瑪利亞路易莎 1 +瑪利歐 1 +瑪君龍 1 +瑪莉安 1 +瑪莎 1 +瑪麗特 1 +瑾 1 +環保 1 +環帶 1 +環狀 1 +環節 1 +環繞 1 +瓊斯 1 +瓊珊 1 +瓘 1 +瓜里利亞 1 +瓦伊什維爾卡斯 1 +瓦伊杜 1 +瓦卡加 1 +瓦德 1 +瓦拉 1 +瓦薩 1 +瓦解 1 +瓦里奧 1 +甄別 1 +甘草 1 +甚厚 1 +甚嚴 1 +甚多 1 +甚小 1 +甚深 1 +甚篤 1 +甚至是 1 +甜兒 1 +甜度 1 +生主 1 +生出 1 +生動 1 +生天 1 +生子 1 +生平 1 +生性 1 +生效 1 +生機 1 +生殺 1 +生氣 1 +生火 1 +生肖 1 +生財之道 1 +生還 1 +產 1 +產出 1 +產經 1 +甦醒 1 +用人 1 +用來 1 +用光 1 +用兵 1 +用字 1 +用完 1 +用手 1 +用有 1 +用水 1 +用藥 1 +用計 1 +用詞 1 +甬 1 +田園 1 +田地 1 +田心 1 +田納西 1 +田野 1 +田頭 1 +甲山 1 +甲殼 1 +申辦 1 +男人 1 +男士 1 +男嬰 1 +男方 1 +男童 1 +界定 1 +界限 1 +畔 1 +留傳 1 +留哥 1 +留待 1 +留空 1 +留聲 1 +留良 1 +畜牧 1 +畜養 1 +畢打 1 +畢氏 1 +畢蘭德拉 1 +畢馬威 1 +略帶 1 +略有 1 +略為 1 +畫下 1 +畫中 1 +畫分 1 +畫會 1 +畫畫 1 +畫面 1 +異事 1 +異姓 1 +異度 1 +異形 1 +異曲同工 1 +異母 1 +異端 1 +當上 1 +當下 1 +當值 1 +當官 1 +當屆 1 +當政 1 +當晚 1 +當期 1 +當歸 1 +當面 1 +疆域 1 +疏浚 1 +疏遠 1 +疑 1 +疑點 1 +疙瘩 1 +疲勞 1 +疲弱 1 +疼痛 1 +病原 1 +病患 1 +病情 1 +病歷 1 +病死 1 +病重 1 +症候 1 +症狀 1 +痕跡 1 +痙攣 1 +痛心疾首 1 +痢疾 1 +痰 1 +瘦 1 +瘧疾 1 +癌 1 +癖 1 +癥狀 1 +登 1 +登丹 1 +發 1 +發佈 1 +發作 1 +發兵 1 +發呆 1 +發奮 1 +發揚光大 1 +發改委 1 +發放 1 +發洩 1 +發炎 1 +發燒 1 +發牌 1 +發球 1 +發病 1 +發聲 1 +發財 1 +發車 1 +發配 1 +白丁 1 +白井 1 +白公 1 +白利南 1 +白化 1 +白堊 1 +白天 1 +白宮 1 +白砂 1 +白蓮 1 +白蛇 1 +白軍 1 +白金 1 +白銅 1 +白陵 1 +白雲 1 +白面 1 +白頸長尾雉 1 +白鹿 1 +白麗 1 +百事 1 +百代 1 +百億 1 +百兆 1 +百帕斯卡 1 +百廢待舉 1 +百濟 1 +百無聊賴 1 +百老匯 1 +百花齊放 1 +百萬 1 +百貨 1 +百餘 1 +百鳴 1 +的士 1 +的確 1 +的黎波里 1 +皇位 1 +皇冠 1 +皇城 1 +皇太極 1 +皇妃 1 +皇廷 1 +皇權 1 +皇發 1 +皈依 1 +皋 1 +皓 1 +皓若 1 +皮亞韋 1 +皮克爾 1 +皮內羅洛 1 +皮特 1 +皮特凱恩 1 +皮耶特普拉桑克穆斯特魯 1 +皮雅福斯 1 +皰疹 1 +盆地 1 +盈盈 1 +益 1 +益城 1 +益新 1 +益處 1 +盔甲 1 +盛事 1 +盛大 1 +盛妝 1 +盛揮 1 +盛產 1 +盛行 1 +盜用 1 +盟 1 +盟軍 1 +盡到 1 +盡喪 1 +盡情 1 +盡頭 1 +監工 1 +監控 1 +監測 1 +監禁 1 +監聽 1 +盤踞 1 +盧 1 +盧加 1 +盧溝 1 +盧瓦斯 1 +盧甘斯克 1 +盧福瓦 1 +盪 1 +目睹 1 +目鏡 1 +直勉 1 +直屬 1 +直覺 1 +直言 1 +直說 1 +直間 1 +相位 1 +相傳 1 +相容 1 +相差無幾 1 +相悖 1 +相應 1 +相挺 1 +相異 1 +相稱 1 +相約 1 +相繼 1 +相聲 1 +相若 1 +相處 1 +相見 1 +相較 1 +相通 1 +相速 1 +相鄰 1 +相間 1 +盾座苣苔 1 +盾系 1 +省務 1 +省思 1 +省油 1 +眉山 1 +看中 1 +看出 1 +看台 1 +看得 1 +看看 1 +看管 1 +看見 1 +看透 1 +看重 1 +真 1 +真光 1 +真北 1 +真名 1 +真好 1 +真希 1 +真木 1 +真核 1 +真相大白 1 +眯眼 1 +眷村 1 +眼下 1 +眼淚 1 +眼狀 1 +眼球 1 +眼皮 1 +眼神 1 +眾經 1 +眾說紛紜 1 +睡 1 +睡眠 1 +睡覺 1 +督撫 1 +睾丁蛋白 1 +睿 1 +睿智 1 +瞪羚 1 +瞬時 1 +瞭如指掌 1 +矗立 1 +矛 1 +矢口否認 1 +知府 1 +知曉 1 +知足 1 +短少 1 +短期 1 +短草 1 +短裙 1 +短詩 1 +短語 1 +短音 1 +短髮 1 +矮星 1 +石像 1 +石器 1 +石塊 1 +石材 1 +石湖 1 +石灰 1 +石牆 1 +石牌 1 +石頭門坎 1 +砂拉越 1 +砂漿 1 +砂紙 1 +砍伐 1 +砒霜 1 +研磨 1 +砝碼 1 +破損 1 +破滅 1 +破舊 1 +破落 1 +硝庫爾 1 +硝酸甘油片 1 +硫 1 +硫化氫 1 +硫化鉛 1 +硫酸銨 1 +硬幣 1 +碑亭 1 +碑刻 1 +碧波 1 +碧琴 1 +碰撞 1 +碳紙 1 +碳酸鎂 1 +確知 1 +確診 1 +碼 1 +磁性 1 +磐田 1 +磚室 1 +磨坊 1 +磨折 1 +磨槽 1 +磷化 1 +磷素 1 +磷酸 1 +礙 1 +礦場 1 +礦物 1 +礦石 1 +礦藏 1 +示人 1 +示愛 1 +社皮 1 +社論 1 +社長 1 +祁鏞 1 +祈願 1 +祐希 1 +祖 1 +祖上 1 +祖圭 1 +祖外公 1 +祖外婆 1 +祖宗 1 +祖籍 1 +神仙 1 +神偷 1 +神器 1 +神明 1 +神殿 1 +神社 1 +神秘果 1 +神籤 1 +神魔 1 +祠 1 +祥子 1 +票據 1 +票數 1 +祭司 1 +祭壇 1 +祭師 1 +祭物 1 +祭祀 1 +祭酒 1 +祿勸 1 +祿山 1 +禁煙 1 +禁用 1 +禁藥 1 +禁賽 1 +禍 1 +福克沙尼 1 +福安 1 +福康安 1 +福慧 1 +福池 1 +福清 1 +禕 1 +禪師 1 +禮堂 1 +禮濤 1 +禮炮 1 +禮物 1 +禱文 1 +禽流感 1 +秀實 1 +秀康 1 +秀怡 1 +秀珠 1 +私下 1 +私交 1 +私奔 1 +私宅 1 +私家 1 +私立 1 +私財 1 +秉國 1 +秋人 1 +秋山 1 +秋爽 1 +秋興 1 +秋香 1 +科多爾 1 +科屬 1 +科恩 1 +科教 1 +科朗 1 +科爾基斯 1 +科特 1 +科目 1 +秘指 1 +租予 1 +租務 1 +租地 1 +租戶 1 +租用 1 +秦城 1 +秦州 1 +秦晉之好 1 +秦朝 1 +秦石 1 +秩序 1 +移交 1 +移往 1 +移植 1 +移至 1 +移送 1 +稀釋 1 +稅項 1 +稍為 1 +稗官野史 1 +種內 1 +種名 1 +種子 1 +種屬 1 +稱海 1 +稱病 1 +稱銜 1 +稻子 1 +稻草 1 +稼祥 1 +穀 1 +穀物 1 +穆宗 1 +穆拉 1 +穆斯塔法凱馬爾帕沙 1 +穆爾西亞 1 +穆薩 1 +積山 1 +積良 1 +穩 1 +穩固 1 +穩妥 1 +究竟 1 +空出 1 +空前 1 +空名 1 +空客 1 +空戰 1 +空隙 1 +空難 1 +穿幫 1 +穿戴 1 +穿甲 1 +穿行 1 +穿過 1 +突尼西亞 1 +突感 1 +突現 1 +窄袖 1 +窗口 1 +窗外 1 +窘境 1 +窟檐 1 +窮苦 1 +窮追 1 +窯 1 +窯洞 1 +竄紅 1 +竊聽 1 +立交 1 +立國 1 +立村 1 +立營 1 +立花 1 +立蒙 1 +立面 1 +立體 1 +站內 1 +站名 1 +站坪 1 +站廳 1 +站點 1 +竟 1 +章回 1 +章斐 1 +童女 1 +童男 1 +端川 1 +競相 1 +竹 1 +竹器 1 +竹治 1 +竹溪 1 +竹片 1 +笛 1 +符 1 +符桐 1 +第 1 +第999 1 +第三十三 1 +第十七 1 +第十五 1 +第十四 1 +第廿 1 +第比利斯 1 +第谷 1 +笳冬 1 +等位 1 +等客 1 +等號 1 +筐仔沙 1 +筒狀 1 +答應 1 +箏 1 +算出 1 +算術 1 +管制 1 +管子 1 +箬松 1 +箱型 1 +箴言 1 +節度 1 +節節 1 +範疇 1 +篡位 1 +篡國 1 +篡地 1 +簡化 1 +簡約 1 +簡訊 1 +簧 1 +簽名 1 +簽定 1 +簽認 1 +簽證 1 +簽賬 1 +籃筐 1 +籌備 1 +籌措 1 +籌款 1 +籌資 1 +籌辦 1 +籍貫 1 +籠式 1 +米南加保 1 +米古 1 +米哈伊 1 +米拉麥克斯 1 +米沙鄢 1 +米洛塞維奇 1 +米特斯 1 +米線 1 +米酒 1 +米高梅 1 +粉 1 +粉碎 1 +粉紅 1 +粉絲 1 +粗壯 1 +粗鱗蟒 1 +粵明 1 +粽子 1 +精 1 +精力 1 +精子 1 +精密 1 +精心 1 +精湛 1 +精算 1 +精索 1 +精裝 1 +糖尿 1 +糖蒜 1 +糞 1 +糟糕 1 +糧儲 1 +糧餉 1 +系數 1 +糾正 1 +糾紛 1 +紀元 1 +紂 1 +約定 1 +約熱夫 1 +約瑟芬 1 +約翰內斯堡 1 +約翰麥克連 1 +約長 1 +紅旗 1 +紅日 1 +紅杏出牆 1 +紅樓 1 +紅樓夢 1 +紅樹 1 +紅玉 1 +紅磨 1 +紅茶 1 +紅襪 1 +紅遍 1 +紅酒 1 +紅點 1 +紈 1 +紋路 1 +紋飾 1 +納入 1 +納塔爾 1 +納爾西斯 1 +納爾遜 1 +納瓦拉 1 +納蘇爾 1 +紐國 1 +紐澤西 1 +紐約尼克斯 1 +紐芬蘭 1 +紐華克 1 +紐黑文 1 +純一 1 +純凈 1 +純樸 1 +純陽 1 +紙上 1 +紙條 1 +紙盒 1 +級數 1 +素包 1 +素食 1 +素餡 1 +索倫 1 +索尼 1 +索溪峪 1 +索維克 1 +索菲 1 +索菲亞 1 +索西納 1 +索賠 1 +索馬里 1 +紮實 1 +累計 1 +細 1 +細岡 1 +細窄 1 +細菌 1 +細部 1 +細長 1 +紳士 1 +紹 1 +紹儀 1 +紹榮 1 +紺三郎 1 +終審 1 +終身大事 1 +組件 1 +組像 1 +組別 1 +組口 1 +組態 1 +組織胺 1 +組隊 1 +結交 1 +結冰 1 +結尾 1 +結雅 1 +絕壁 1 +絕大 1 +絕後 1 +絕版 1 +絕罰 1 +絞刑 1 +絞死 1 +絞痛 1 +給定 1 +給職 1 +給藥 1 +給體 1 +統 1 +統帥 1 +統籌 1 +絲山 1 +絲帶 1 +絶 1 +綁 1 +綉 1 +綏遠 1 +經國 1 +經意 1 +經文 1 +經昌 1 +經期 1 +經由 1 +經界 1 +綜 1 +綜理 1 +綜錄 1 +綠化 1 +綠帶 1 +綠滙 1 +綠燈 1 +綠社 1 +綠黨 1 +維健 1 +維克托 1 +維利爾斯 1 +維埃拉 1 +維多莉亞 1 +維希 1 +維德 1 +維景灣 1 +維爾紐斯 1 +維生 1 +維祀 1 +維羅納 1 +維記 1 +維護 1 +維迪斯 1 +維迪爾 1 +綱領 1 +網址 1 +網易 1 +網線 1 +網購 1 +綺塍 1 +綺色佳 1 +綽號 1 +綿羊 1 +緊張 1 +緊緊 1 +緊貼 1 +緊逼 1 +緊閉 1 +線上 1 +線前 1 +線度 1 +線條 1 +線索 1 +線道 1 +締造 1 +編上 1 +編導 1 +編程 1 +編篡 1 +編繪 1 +編纂 1 +編者 1 +編腔 1 +編隊 1 +緩衝 1 +緩解 1 +緩鬢 1 +緩龍 1 +緬 1 +緯來 1 +練兵 1 +緹 1 +縣市 1 +縣裡 1 +縫 1 +縫製 1 +縮寫 1 +縮小 1 +縱 1 +縱使 1 +縱觀 1 +縱隊 1 +總區 1 +總和 1 +總局 1 +總站 1 +總行 1 +總裁 1 +總計 1 +總辦 1 +績效 1 +繁多 1 +繁瑣 1 +繁盛 1 +繁雜 1 +繁體 1 +繞境 1 +繞開 1 +繡 1 +繩架 1 +繭 1 +繳付 1 +繳納 1 +繼業 1 +繼科 1 +續航 1 +續部 1 +纏足 1 +纜車 1 +缺口 1 +缺失 1 +缺少 1 +缺氧 1 +缺血 1 +罕有 1 +罪惡 1 +置有 1 +置物 1 +罰則 1 +署理 1 +罵聲 1 +罷免 1 +罷工 1 +罹癌 1 +罹難 1 +羅乞多毗闍 1 +羅什艾因 1 +羅伊 1 +羅克斯堡 1 +羅培茲 1 +羅夫 1 +羅希 1 +羅德西亞 1 +羅拔 1 +羅曼什 1 +羅柔 1 +羅森費爾德 1 +羅爾夫 1 +羅隆基 1 +羊圈 1 +美味 1 +美孚 1 +美寶 1 +美幸 1 +美林豬籠草 1 +美琴 1 +美知留 1 +美稱 1 +美索不達米亞 1 +美聯 1 +美聲 1 +美薇 1 +美術 1 +美觀 1 +美譽 1 +美里 1 +美食 1 +美麗華 1 +羚羊 1 +羞恥 1 +群峰 1 +群族 1 +群組 1 +群落 1 +群速 1 +群雄 1 +群體 1 +羨慕 1 +義久 1 +義勇 1 +義安 1 +義工 1 +義弘 1 +義春 1 +義民 1 +義父 1 +義項 1 +羱羊 1 +羲 1 +羽田 1 +羽絨 1 +翌日 1 +習經 1 +翔 1 +翔麟 1 +翟 1 +翠鳥 1 +翻覆 1 +翼手龍 1 +翼龍 1 +耀樞 1 +耀武 1 +耀邦 1 +老人 1 +老大 1 +老套 1 +老婦 1 +老將 1 +老少 1 +老弱 1 +老橋 1 +老漢 1 +考上 1 +考夫卡 1 +考尼律斯 1 +考柯 1 +考牙 1 +考生 1 +考究 1 +考績 1 +考進 1 +考選 1 +而已 1 +耐受 1 +耐庵 1 +耐玩 1 +耐航 1 +耳光 1 +耳勺 1 +耳孔 1 +耳朵眼 1 +耳珠 1 +耳環 1 +耳癤 1 +耳蝸 1 +耳門 1 +耳骨 1 +耶索洛 1 +耶路撒冷 1 +耽擱 1 +聆聽 1 +聖人 1 +聖保羅 1 +聖克萊爾 1 +聖名 1 +聖地亞哥 1 +聖彌格 1 +聖彼得堡 1 +聖徒 1 +聖拉扎爾 1 +聖歌 1 +聖水 1 +聖求 1 +聖潔 1 +聖祖 1 +聖神 1 +聖經 1 +聖訓 1 +聖赫勒拿 1 +聖赫勒拿島戴勝 1 +聖路易斯 1 +聖體 1 +聘問 1 +聘用 1 +聚氯乙烯 1 +聚禮 1 +聚苯乙烯 1 +聚變 1 +聚體 1 +聞名 1 +聞言 1 +聯姻 1 +聯播 1 +聯江 1 +聯浦 1 +聯產 1 +聯美 1 +聰敏 1 +聲恆 1 +聲援 1 +聲波 1 +聲谷 1 +聲門 1 +聲音 1 +聶丞益 1 +職員 1 +職棒 1 +聽到 1 +聽命 1 +聽從 1 +聽眾 1 +聽聞 1 +聾人 1 +肅宗 1 +肆 1 +肆意 1 +肇 1 +肉夾 1 +肉湯 1 +肉瘤 1 +肉緊 1 +肌肉 1 +肖嚴 1 +肚臍 1 +肚餓 1 +肝 1 +股市 1 +股本 1 +肥牛 1 +肥田 1 +肥胖 1 +肩 1 +肯 1 +肯亞 1 +肯特 1 +育有 1 +育樂 1 +育空 1 +肺病 1 +胃 1 +胃石 1 +背上 1 +背依 1 +背包 1 +背叛 1 +背後 1 +背靠 1 +背面 1 +背鰭 1 +胎 1 +胚 1 +胚胎 1 +胞 1 +胞弟 1 +胡特勒 1 +胡禮 1 +胡蜂 1 +胡馬雍 1 +胸痛 1 +胸管 1 +胸部 1 +胸鰭 1 +能人 1 +能否 1 +能幹 1 +脆 1 +脊椎 1 +脫疽 1 +脫落 1 +脫隊 1 +脫離 1 +脱口秀 1 +脾氣 1 +腐敗 1 +腐蝕 1 +腓力 1 +腔 1 +腫瘤 1 +腳掌 1 +腳本 1 +腳點 1 +腸胃 1 +腸道 1 +腸骨 1 +腹 1 +腿 1 +腿部 1 +膝傷 1 +膝頭 1 +膠 1 +膠州 1 +膠東 1 +膠澳 1 +膠體 1 +膨脹 1 +膽 1 +膽酸 1 +臉 1 +臉頰 1 +臉龐 1 +臘 1 +臥龍 1 +臧 1 +臨 1 +臨榆 1 +臨終 1 +臨高 1 +自作自受 1 +自保 1 +自信 1 +自卑 1 +自在 1 +自學 1 +自帶 1 +自強 1 +自從 1 +自成 1 +自用 1 +自發 1 +自製 1 +自訂 1 +自負 1 +自辦 1 +至上 1 +至善 1 +至柔 1 +至正 1 +至死不渝 1 +至關 1 +至關重要 1 +致使 1 +致函 1 +致恐 1 +致病 1 +致瘋 1 +致癌 1 +臺大 1 +舀出 1 +舅父 1 +興 1 +興國 1 +興學 1 +興業 1 +興海 1 +興祖 1 +舉世矚目 1 +舉例 1 +舉國 1 +舉止 1 +舉薦 1 +舉起 1 +舊友 1 +舊屋 1 +舊時 1 +舊稱 1 +舊部 1 +舊金山 1 +舌頭 1 +舍爾 1 +舍訥費爾德 1 +舒 1 +舒查特 1 +舒爾特 1 +舜初 1 +舞 1 +舞劇 1 +舞陽 1 +舟 1 +航天 1 +航站 1 +般若 1 +船塢 1 +船山 1 +船業 1 +船體 1 +艦身 1 +良 1 +良師益友 1 +良心 1 +良性 1 +良田 1 +良知 1 +艱巨 1 +色帶 1 +色情 1 +色目 1 +色調 1 +艷姬 1 +艷麗 1 +艾伍士 1 +艾倫 1 +艾塞羅 1 +艾夏 1 +艾崔奇 1 +艾巴德 1 +艾度蘭 1 +艾琳 1 +艾瑞 1 +艾瑪 1 +艾登堡 1 +艾美 1 +艾蓮娜 1 +艾薩克 1 +艾迴 1 +艾雲 1 +艾麗卡 1 +芬妮 1 +芬華絲 1 +芬迪絲 1 +芭蕉 1 +芭黎絲 1 +花上 1 +花俏 1 +花園蔥蝸牛 1 +花坮 1 +花城 1 +花店 1 +花旗 1 +花月 1 +花果 1 +花枝 1 +花瓶 1 +花甲 1 +花蜜 1 +花鞋 1 +苗栗 1 +苗穗 1 +苟且 1 +若愚 1 +若羌 1 +若英 1 +苦 1 +苦力 1 +苦悶 1 +苦情 1 +苦苣苔 1 +苦讀 1 +苯並芘 1 +苯乙烯 1 +英一 1 +英乙 1 +英倫 1 +英傑 1 +英勇 1 +英吋 1 +英國短毛豬 1 +英寸 1 +英尺 1 +英年 1 +英廷 1 +英格瑪 1 +英男 1 +英里 1 +英龍華 1 +茂 1 +茂名 1 +范恩 1 +茄南 1 +茄芮 1 +茅家 1 +茲羅提 1 +茶樓 1 +茶湯 1 +茶館 1 +荃灣 1 +荃麟 1 +草原 1 +草地 1 +草坪 1 +草席 1 +草稿 1 +荊州 1 +荒地 1 +荒蕪 1 +荒誕不經 1 +荔灣 1 +荷爾蒙 1 +荷銀 1 +莆 1 +莊嚴 1 +莊王 1 +莎樂美 1 +莫 1 +莫吉爾諾 1 +莫埃索 1 +莫扎特 1 +莫札特 1 +莫桑 1 +莫瑙恩 1 +莫瓦桑 1 +莫納加斯 1 +莫臥兒 1 +莫過 1 +莫里亞 1 +莽山 1 +菅 1 +菊 1 +菊花 1 +菜 1 +華倫西亞 1 +華少 1 +華新 1 +華族 1 +華林 1 +華爾 1 +華界 1 +華石 1 +華秀 1 +華納 1 +華西 1 +華頓 1 +菲力 1 +菲國 1 +菲德爾 1 +菲爾 1 +菲萊 1 +菲詩 1 +菸害 1 +萊因 1 +萊夫斯 1 +萊希 1 +萊斯特 1 +萊爾 1 +萊特曼 1 +萊茵蘭 1 +萊蕪 1 +萊采巴 1 +萌 1 +萌芽 1 +萎縮 1 +萬一 1 +萬丹 1 +萬貴 1 +落 1 +落下 1 +落實 1 +落敗 1 +落葉 1 +葆玖 1 +葉利欽 1 +葉士域治 1 +葉序 1 +葉綠 1 +著手 1 +著有 1 +著譯 1 +葛 1 +葛力馬 1 +葛朱 1 +葛浩文 1 +葛羅斯 1 +葛蕾絲 1 +葛量洪 1 +葡 1 +葡超 1 +葫蘆 1 +葬禮 1 +葵青 1 +蒂利妮 1 +蒂娜 1 +蒂迦納 1 +蒙丹 1 +蒙卡達 1 +蒙哥 1 +蒙哥馬利 1 +蒙塔尼萊博恩 1 +蒙巴薩 1 +蒙得維 1 +蒙特利爾 1 +蒙羞 1 +蒙面 1 +蒙馬特 1 +蒲 1 +蒲飛 1 +蒸氣 1 +蒸發 1 +蒼白 1 +蓄水 1 +蓋兒 1 +蓋因 1 +蓋多 1 +蓋曼 1 +蓋朗杜克西亞 1 +蓋頂 1 +蓓 1 +蓓天翼龍 1 +蓬塔德馬塔 1 +蓬拉貝 1 +蓬皮杜 1 +蓮 1 +蓮安 1 +蓮花 1 +蔑稱 1 +蔡斯 1 +蔣公 1 +蕙嫻 1 +蕨類 1 +蕩漾 1 +蕾妮 1 +薄 1 +薄弱 1 +薄扶林 1 +薔 1 +薛慶 1 +薦 1 +薩克森 1 +薩凡娜 1 +薩卡拉瓦 1 +薩哈 1 +薩哈林 1 +薩平頓 1 +薩德 1 +薩拉只 1 +薩摩亞 1 +薩爾曼 1 +薩爾瓦多 1 +薩爾茨卡默古特 1 +薩爾馬提亞 1 +薩瑞阿尼迪 1 +薩維塔 1 +薩維奧洛夫 1 +薩馬 1 +薪俸 1 +藉助 1 +藉此 1 +藍儂 1 +藍寶石華麗雨林 1 +藍尼 1 +藍本 1 +藍欽 1 +藍潟 1 +藍灰 1 +藍田 1 +藍白 1 +藍背 1 +藍邊 1 +藍領 1 +藍黨 1 +藏之介 1 +藏寶 1 +藏有 1 +藝 1 +藝名 1 +藝能 1 +藝謀 1 +藝電 1 +藤原 1 +藤木 1 +藤本 1 +藤村 1 +藤枝 1 +藤藝 1 +藥品 1 +藥師 1 +藥材 1 +藥水 1 +藥石 1 +藩主 1 +藩士 1 +藩西 1 +蘇利文 1 +蘇北 1 +蘇尋三 1 +蘇木 1 +蘇格拉底 1 +蘇維匯 1 +蘇美爾 1 +蘇萊曼尼亞 1 +蘇醒 1 +蘇里南 1 +蘊藏 1 +蘭利 1 +蘭卡斯特 1 +蘭封 1 +蘭弗朗克 1 +蘭德 1 +虎式 1 +虎棒 1 +虎翼 1 +虎視眈眈 1 +虔信 1 +處之泰然 1 +處女 1 +處決 1 +處置 1 +處長 1 +虛弱 1 +虛榮 1 +虛無 1 +號吾 1 +號子 1 +號稱 1 +號誌 1 +虢 1 +虢國 1 +虹 1 +虹橋 1 +蚊類 1 +蚩尤 1 +蛇油 1 +蛇種 1 +蛇魔 1 +蛋 1 +蛋白質 1 +蛙 1 +蜂擁而至 1 +蜂蜜 1 +蜆殼 1 +蜚聲 1 +蜥蜴 1 +蜿蜒 1 +蝴蝶 1 +融入 1 +融化 1 +融和 1 +融雪 1 +螞蟻 1 +螢幕 1 +蟬聯 1 +蟲 1 +蟲洞 1 +蠟浸 1 +蠶院 1 +蠻子 1 +血型 1 +血液 1 +血竭 1 +血管 1 +血腥 1 +行人 1 +行使 1 +行列 1 +行將 1 +行用 1 +行禮 1 +行長 1 +行騙 1 +術 1 +街上 1 +街名 1 +街市 1 +街路 1 +街頭 1 +衛理 1 +衝動 1 +衝鋒 1 +衡 1 +衡量 1 +衢山 1 +衣 1 +衣冠 1 +衣物 1 +衣索比亞 1 +表型 1 +表妹 1 +表姐 1 +表徵 1 +表情 1 +表態 1 +表揚 1 +表格 1 +表決 1 +表白 1 +表述 1 +衰敗 1 +衰落 1 +袖手旁觀 1 +袖箭 1 +被告 1 +被子 1 +裁決 1 +裁減 1 +裂縫 1 +裂變 1 +裋褐 1 +裕 1 +裕智 1 +裕軍 1 +裙子 1 +補償 1 +補天 1 +補教 1 +補時 1 +補褂 1 +裝修 1 +裝備 1 +裝嵌 1 +裝有 1 +裝瓶 1 +裝葯 1 +裝設 1 +裝載 1 +裴 1 +裴林 1 +裸子 1 +裸照 1 +製備 1 +製得 1 +複數 1 +褐色 1 +褪色 1 +褲 1 +褲子 1 +褲袋 1 +襄 1 +襄助 1 +襄王 1 +襄陽 1 +襲 1 +襲封 1 +西亞特 1 +西京 1 +西周 1 +西哈莫尼 1 +西坑 1 +西域 1 +西夏 1 +西奧多 1 +西宮 1 +西岸 1 +西島 1 +西廠 1 +西式 1 +西弗萊德 1 +西斯廷 1 +西晉 1 +西段 1 +西河 1 +西洋坪 1 +西漢 1 +西甌 1 +西線 1 +西美 1 +西蒙 1 +西薩 1 +西蘭卡普 1 +西西里 1 +西距 1 +西迪 1 +西鄉 1 +要是 1 +要脅 1 +要衝 1 +要道 1 +見人 1 +見稱 1 +見聞 1 +見解 1 +見識 1 +見長 1 +規例 1 +覓食 1 +視乎 1 +視作 1 +視圖 1 +視角 1 +親人 1 +親信 1 +親政 1 +親朋 1 +親筆 1 +親臨 1 +親身 1 +覺察 1 +覽 1 +觀光 1 +觀察 1 +觀念 1 +觀戰 1 +觀望 1 +觀看 1 +觀者 1 +角膜 1 +解僱 1 +解夢 1 +解析 1 +解答 1 +解職 1 +解脫 1 +解說 1 +觸怒 1 +觸手可及 1 +觸覺 1 +觸診 1 +言官 1 +言語 1 +言辭 1 +訂位 1 +訃告 1 +訄書 1 +訇開 1 +計委 1 +計謀 1 +討逆 1 +訓 1 +託 1 +記念 1 +記述 1 +記集 1 +設站 1 +許昌 1 +許諾 1 +許願 1 +訴 1 +訴求 1 +訴諸 1 +註 1 +註明 1 +註銷 1 +詐死 1 +詔書 1 +評出 1 +評判 1 +評鑑 1 +詛咒 1 +詞幹 1 +詞義 1 +詢問 1 +試劑 1 +試播 1 +試種 1 +試製 1 +試音 1 +試飛 1 +詩文 1 +該事 1 +該人 1 +該墓 1 +該島 1 +該年 1 +該批 1 +該族 1 +該會 1 +該條 1 +該段 1 +該科 1 +該系 1 +該處 1 +該路 1 +該黨 1 +詳情 1 +詳細 1 +詹姆士 1 +詼諧 1 +誇德拉多 1 +誇祖魯 1 +誌 1 +誌家 1 +認一民 1 +認同 1 +認定 1 +認罪 1 +認證 1 +認輔 1 +誓言 1 +誕 1 +誕下 1 +誘因 1 +語文 1 +語法 1 +語流 1 +語訓 1 +語調 1 +語速 1 +語音 1 +誠意 1 +誤 1 +誤信 1 +誤差 1 +誤會 1 +誤槍 1 +誤譯 1 +誥命 1 +誦 1 +說出 1 +說客 1 +說成 1 +說話 1 +說謊 1 +說道 1 +課本 1 +誹謗 1 +調值 1 +調停 1 +調入 1 +調和 1 +調控 1 +調水 1 +調沙 1 +調研 1 +調節 1 +調職 1 +調解 1 +諂媚 1 +談判 1 +談妥 1 +談論 1 +請來 1 +請辭 1 +請願 1 +論事 1 +諜海 1 +諧波 1 +諶 1 +諸 1 +諸如 1 +諸暨 1 +諸河 1 +諺言 1 +諾丁漢 1 +諾域治 1 +諾斯 1 +諾曼 1 +諾爾曼 1 +謀取 1 +謀士 1 +謀求 1 +謀職 1 +謁者 1 +謇 1 +謊言 1 +謙卑 1 +謚 1 +講完 1 +講究 1 +講談 1 +講道 1 +謝世 1 +謝列梅捷沃 1 +謝爾比 1 +謝瓦爾德納澤 1 +謝蓋爾 1 +謹 1 +謹慎 1 +證 1 +譚 1 +譜代 1 +警務 1 +警句 1 +警告 1 +警員 1 +警戒 1 +警衛 1 +警覺 1 +警鐘 1 +譯作 1 +譯員 1 +譯場 1 +譯本 1 +議席 1 +譴責 1 +護佑 1 +護城 1 +護墊 1 +護送 1 +讀取 1 +讀法 1 +變動 1 +變差 1 +變調 1 +變身 1 +變遷 1 +變革 1 +讓步 1 +讓開 1 +讚喻 1 +讚揚 1 +讚美 1 +讚譽 1 +谷山 1 +谷氨酸 1 +豆瓣 1 +豈 1 +豎立 1 +豎起 1 +豐久 1 +豐厚 1 +豐城 1 +豐臣 1 +豐隆 1 +象數 1 +象晉 1 +象牙 1 +象牙喙啄木鳥 1 +豢養 1 +豪宅 1 +豪門 1 +豫南 1 +豬 1 +豬圈 1 +豬油 1 +豬肉 1 +貂 1 +貓咪 1 +貓囒 1 +貓科 1 +貝克 1 +貝克漢 1 +貝加爾 1 +貝南 1 +貝斯 1 +貝爾普 1 +貝爾蘇斯 1 +貝碧嘉 1 +貝納斯科 1 +貝都因 1 +貝類 1 +貞昌 1 +貞潔 1 +貞觀 1 +負擔 1 +負芻 1 +負荷 1 +負面 1 +負額 1 +財經 1 +財落 1 +貢 1 +貢品 1 +貢哥拉 1 +貢嘎 1 +貢巴 1 +貧 1 +貧乏 1 +貧窮 1 +貧鈾 1 +貨 1 +貨品 1 +貨機 1 +販賣 1 +貪圖 1 +貪婪 1 +貪心 1 +貪瀆 1 +貫徹 1 +貫穿 1 +貫通 1 +責怪 1 +責難 1 +貴築 1 +貴賓 1 +貴陽 1 +貴霜 1 +貶意 1 +買入 1 +買賣 1 +費曼 1 +費爾南多 1 +費用 1 +費盡 1 +費羅 1 +貼身 1 +賀特 1 +賀立 1 +賄選 1 +資 1 +資政 1 +資陽 1 +賈亞辛哈 1 +賈多特 1 +賈斯丁 1 +賈斯珀 1 +賈氏 1 +賓客 1 +賓尼迪斯 1 +賓州 1 +賞識 1 +賠禮 1 +賡臣 1 +賢思 1 +賣 1 +賣出 1 +賣到 1 +賣地 1 +賣家 1 +賣掉 1 +賣空 1 +賤女 1 +賤民 1 +質詢 1 +賭徒 1 +賭檔 1 +賴宣 1 +賺取 1 +賺錢 1 +購得 1 +購置 1 +賽場 1 +賽普勒斯 1 +賽爾金德 1 +賽車 1 +賽道 1 +贈 1 +贈送 1 +贊博尼 1 +贊成 1 +贊比西亞 1 +贏家 1 +贖回 1 +赤坂 1 +赤壁 1 +赤樹 1 +赤狐 1 +赤鱲 1 +赦 1 +赫伯特 1 +赫塔卜 1 +赫斯 1 +赫比格 1 +赫爾克 1 +赫爾辛基 1 +赫雷爾斯 1 +赫魯曉夫 1 +走上 1 +走到 1 +走勢 1 +走漏 1 +走私 1 +起事 1 +起伏 1 +起初 1 +起名 1 +起因 1 +起始 1 +起建 1 +起止 1 +起死回生 1 +起碼 1 +起端 1 +起舞 1 +起落 1 +起訖 1 +起降 1 +起點 1 +趁 1 +超出 1 +超導 1 +超強 1 +超我 1 +超時 1 +超武 1 +超然 1 +超重 1 +超齡 1 +越亮 1 +越共 1 +越前 1 +越好 1 +越弱 1 +越戰 1 +越早 1 +越暗 1 +越牆 1 +越發 1 +越近 1 +越過 1 +趕往 1 +趙氏 1 +趟 1 +趣事 1 +趨勢 1 +趨於 1 +足不出戶 1 +足夠 1 +足見 1 +足跡 1 +趾爪 1 +趾骨 1 +跋扈 1 +跌 1 +跑 1 +跑壘 1 +跑步 1 +跑車 1 +跑馬 1 +跟操 1 +跟班 1 +跟蹤 1 +跟進 1 +跟隨 1 +跨 1 +跨國 1 +跨度 1 +跨步 1 +跨足 1 +跨過 1 +路政 1 +路易斯安那 1 +路濟亞 1 +路綫 1 +路網 1 +路透 1 +路過 1 +路障 1 +路面 1 +跳動 1 +跳槽 1 +跳過 1 +跳遠 1 +跳高 1 +踏上 1 +踏入 1 +踢進 1 +躁 1 +躁動 1 +躍升 1 +身受 1 +身型 1 +身旁 1 +身為 1 +身無分文 1 +身著 1 +身軀 1 +身高 1 +躬耕 1 +躲到 1 +車上 1 +車仁 1 +車型 1 +車士打菲特 1 +車外 1 +車尾 1 +車市 1 +車廠 1 +車手 1 +車票 1 +車程 1 +車窗 1 +車系 1 +車號 1 +車費 1 +車路士 1 +車迷 1 +車頭 1 +軋箏 1 +軌跡 1 +軍中 1 +軍備 1 +軍功 1 +軍務 1 +軍委 1 +軍師 1 +軍援 1 +軍方 1 +軍服 1 +軍營 1 +軍艦 1 +軍裝 1 +軍階 1 +軍需 1 +軒轅 1 +軟 1 +軟化 1 +軟硬體 1 +軟骨 1 +軸 1 +軸心 1 +較低 1 +較佳 1 +較厚 1 +較快 1 +較深 1 +載人 1 +載淳 1 +輔 1 +輔佐 1 +輕微 1 +輕易 1 +輕軌 1 +輕鐵 1 +輕髻 1 +輕鬆 1 +輝 1 +輝彥 1 +輪周 1 +輪廓 1 +輪流 1 +輪船 1 +輪迴 1 +輯 1 +輯錄 1 +輸 1 +輸掉 1 +輸精 1 +輸血 1 +輸送 1 +輻轍 1 +輻鰭 1 +輾轉 1 +轅 1 +轉交 1 +轉任 1 +轉動 1 +轉化 1 +轉向 1 +轉型 1 +轉差 1 +轉往 1 +轉念 1 +轉播 1 +轉會 1 +轉正 1 +轉角 1 +轉賣 1 +轉赴 1 +辛普朗 1 +辛普森 1 +辛辛那提 1 +辜 1 +辟邪 1 +辦學 1 +辦有 1 +辨別 1 +辨明 1 +辨識 1 +辭典 1 +辭官 1 +辭歲 1 +辯證 1 +辰國 1 +辰男 1 +農事 1 +農墾 1 +農書 1 +農林 1 +農舍 1 +迅 1 +迅即 1 +迅猛 1 +迎 1 +迎神 1 +迎賓 1 +迎送 1 +迎面 1 +近似 1 +近侍 1 +近平 1 +近日 1 +近東 1 +近海 1 +近現代 1 +近親 1 +近鄰 1 +返 1 +返樸歸真 1 +迦南 1 +迦納 1 +迪克 1 +迪克蘭 1 +迪士尼 1 +迪斯雷利 1 +迪比亞吉奧 1 +迪爾汗 1 +迪米特 1 +迫切 1 +述 1 +迴流 1 +迷你變色龍 1 +迷唐 1 +迷路 1 +追兇 1 +追回 1 +追封 1 +追尋 1 +追尾 1 +追思 1 +追憶 1 +追查 1 +追根究底 1 +追殺 1 +追求 1 +追究 1 +追討 1 +追述 1 +退位 1 +退回 1 +退夷 1 +退居 1 +退敵 1 +退隱 1 +送來 1 +送到 1 +送回 1 +送殯 1 +送給 1 +送院 1 +逃亡 1 +逃奔 1 +逃至 1 +逃跑 1 +逆 1 +逆戟鯨 1 +逍遙 1 +透徹 1 +透支 1 +透水 1 +透視 1 +透鏡 1 +逐客 1 +途中 1 +途人 1 +途經 1 +這兒 1 +這時 1 +通俗 1 +通商 1 +通天 1 +通宏 1 +通州 1 +通渭 1 +通貨 1 +通通 1 +通運 1 +通靈 1 +通風 1 +逛街 1 +速往 1 +速銷 1 +造價 1 +造反 1 +造就 1 +造幣 1 +造福 1 +造血 1 +造訪 1 +造謠 1 +逢吉 1 +連串 1 +連克 1 +連坐 1 +連年 1 +連座 1 +連成 1 +連拍 1 +連筆 1 +連篇累牘 1 +連結 1 +連絡 1 +連通 1 +連進 1 +連餓 1 +週末 1 +週邊 1 +進位 1 +進來 1 +進出 1 +進動 1 +進犯 1 +逼 1 +逼使 1 +逼停 1 +逼到 1 +逾期 1 +遂起 1 +遇上 1 +遇刺 1 +遇有 1 +遇陛 1 +遇難 1 +遊憩 1 +遊擊 1 +遊歷 1 +遊艇 1 +遊覽 1 +遊說 1 +遊離 1 +運 1 +運回 1 +運往 1 +運煤 1 +運算 1 +運糧 1 +運補 1 +運載 1 +遍 1 +遍布 1 +過冷 1 +過剩 1 +過多 1 +過往 1 +過敏 1 +過橋 1 +過濾 1 +過甚 1 +過繼 1 +過苛 1 +過路 1 +過頭 1 +道世民 1 +道具 1 +道墟 1 +道士 1 +道學 1 +道宇 1 +道安 1 +道格拉斯 1 +道歉 1 +道理 1 +道綽 1 +道羅 1 +道義 1 +道靜 1 +達上 1 +達人 1 +達克斯 1 +達古武 1 +達恩利 1 +達拉斯 1 +達拏 1 +達拖錯 1 +達母拿錯 1 +達濠 1 +達爾文 1 +達章 1 +達華 1 +達賴 1 +違背 1 +遙陽 1 +遜位 1 +遞交 1 +遞增 1 +遠呂智 1 +遠嫁 1 +遠揚 1 +遠日 1 +遠洋 1 +遠處 1 +遠遠 1 +遠離 1 +遣 1 +遣返 1 +適之 1 +適用 1 +遭殃 1 +遮天 1 +遮蔭 1 +遮陰 1 +遲 1 +遲遲 1 +遷出 1 +遷居 1 +遷校 1 +選上 1 +選修 1 +選定 1 +選用 1 +選美 1 +選訓 1 +選調 1 +選進 1 +選題 1 +遹 1 +遺物 1 +遺留 1 +遺腹 1 +遺迹 1 +遺骸 1 +遼西翼龍 1 +避 1 +避禍 1 +避開 1 +邁克 1 +邁向 1 +邁阿密 1 +還擊 1 +還有 1 +邊區 1 +邗江 1 +那時 1 +那普拉夫尼克 1 +那曲 1 +邦國 1 +邦德 1 +邦蒂 1 +邦達倉 1 +邪惡 1 +邪神 1 +邪馬台 1 +邱家 1 +邳縣 1 +邵伯 1 +邵氏 1 +郊狼 1 +郎 1 +郝 1 +郡區 1 +郡縣 1 +郡艾塞克斯 1 +部位 1 +部字 1 +部將 1 +部首 1 +郪江 1 +郫縣 1 +郭家 1 +郵報 1 +郵輪 1 +都城嘉慕 1 +都察 1 +都尉 1 +都會 1 +都有 1 +都督 1 +都靈 1 +鄂 1 +鄂倫春 1 +鄂溫克 1 +鄂霍次克 1 +鄉內 1 +鄉團 1 +鄉村 1 +鄉長 1 +鄰 1 +鄰域 1 +鄰居 1 +鄰里 1 +酃縣 1 +酆 1 +配上 1 +配件 1 +配備 1 +配器 1 +配有 1 +配角 1 +酒家 1 +酒杯 1 +酒樓 1 +酒鬼 1 +酩酊大醉 1 +酵母 1 +酷似 1 +酷刑 1 +醉醺醺 1 +醋酸根 1 +醫書 1 +醫科 1 +醫術 1 +醬貨 1 +醴陵 1 +釀成 1 +釀造 1 +釉色 1 +釋出 1 +釋迦 1 +釋迦牟尼 1 +里士滿 1 +里奧多 1 +里港 1 +里馬 1 +重創 1 +重力 1 +重回 1 +重復 1 +重心 1 +重情 1 +重播 1 +重核 1 +重物 1 +重獲 1 +重現 1 +重生 1 +重用 1 +重疊 1 +重禮 1 +重組 1 +重義 1 +重考 1 +重製 1 +重複 1 +重見天日 1 +重讀 1 +重鎮 1 +重開 1 +重陽 1 +重音 1 +重鳳 1 +野外 1 +野心勃勃 1 +野戰 1 +野木 1 +野球 1 +野菜 1 +量度 1 +金剛 1 +金寶 1 +金帶英麗魚 1 +金幣 1 +金平 1 +金氏 1 +金泉 1 +金浦 1 +金湖 1 +金牛 1 +金獎 1 +金箔 1 +金羅斯 1 +金美 1 +金華 1 +金質 1 +金邊 1 +金銀 1 +金錢 1 +金門 1 +金靴 1 +金頂 1 +金魚 1 +金鵰 1 +釜山 1 +針劑 1 +釧路 1 +鈇 1 +鈦 1 +鈺源 1 +鉑金 1 +銀杏 1 +銀熊 1 +銀牌 1 +銀白 1 +銀紅 1 +銀色 1 +銅仁 1 +銅像 1 +銅削 1 +銅斧 1 +銅柄 1 +銅臿 1 +銅製 1 +銅銎 1 +銅錛 1 +銅錢 1 +銘 1 +銘皖 1 +銘銘 1 +銜稱 1 +銠 1 +銳利 1 +銷毀 1 +銷量 1 +鋒 1 +鋪成 1 +鋪有 1 +鋸齒龍 1 +鋼板 1 +錄影 1 +錄得 1 +錄放影機 1 +錢上 1 +錦 1 +錦俊 1 +錦承 1 +錦江 1 +錦田 1 +錫 1 +錫伯 1 +錫勇 1 +錫昌 1 +錯 1 +錯視 1 +錯覺 1 +錳 1 +錳礦 1 +鍊金 1 +鍋中 1 +鍋內 1 +鍋爐 1 +鍔 1 +鍛鍊 1 +鍝 1 +鍾 1 +鎖妖 1 +鎖閉 1 +鎮守 1 +鎮岳 1 +鎮朔 1 +鎮賚 1 +鎮里 1 +鎮靜 1 +鎰 1 +鎳銀 1 +鏈 1 +鏡波 1 +鏡湖 1 +鐳 1 +鐵削 1 +鐵匾 1 +鐵棍 1 +鐵民 1 +鐵爐 1 +鐵管 1 +鐵釘 1 +鐵銹 1 +鐵錛 1 +鑑別 1 +鑑定 1 +鑑泉 1 +鑑證 1 +鑒定 1 +鑫新 1 +鑽入 1 +鑽出 1 +鑽探 1 +鑿出 1 +長凳 1 +長史 1 +長婁 1 +長孫 1 +長岡 1 +長崎 1 +長廊 1 +長廷 1 +長方 1 +長榮 1 +長毛 1 +長治 1 +長溝 1 +長滿 1 +長瑪喀比 1 +長盛 1 +長笛 1 +長篇 1 +長編 1 +長跑 1 +長頸鹿 1 +長髮 1 +門修斯 1 +門廳 1 +門式 1 +閃米特 1 +閃長 1 +閃電 1 +閉日 1 +開價 1 +開光 1 +開啟 1 +開場 1 +開墾 1 +開學 1 +開工 1 +開往 1 +開戰 1 +開拓 1 +開挖 1 +開支 1 +開教 1 +開業 1 +開槍 1 +開球 1 +開瑞坦 1 +開票 1 +開車 1 +開辦 1 +開錄 1 +閑聊 1 +閑談 1 +閒言閒語 1 +間斷 1 +間碟 1 +間距 1 +閘口 1 +閘機 1 +閣 1 +閩侯 1 +閩南 1 +闖進 1 +關中 1 +關斷 1 +關連 1 +闡述 1 +闢 1 +阡陌 1 +阪神 1 +防凍 1 +防止 1 +防盜 1 +防護 1 +阻塞 1 +阻撓 1 +阻隔 1 +阿一 1 +阿仙奴 1 +阿信 1 +阿修羅 1 +阿內爾卡 1 +阿勒格尼郡 1 +阿勝 1 +阿勞 1 +阿基里斯 1 +阿堯 1 +阿奇里斯 1 +阿寧 1 +阿布 1 +阿拉法特 1 +阿斗 1 +阿普第 1 +阿曼達 1 +阿東 1 +阿格拉 1 +阿格雷斯蒂 1 +阿森斯 1 +阿森納 1 +阿比西尼亞豬 1 +阿波羅 1 +阿爾及利亞 1 +阿爾及爾 1 +阿爾布巴 1 +阿爾扎阿爾拉齊蓋 1 +阿爾法 1 +阿爾發 1 +阿爾茨海默 1 +阿爾高 1 +阿特 1 +阿特拉斯 1 +阿猴 1 +阿瑜陀耶 1 +阿穆爾 1 +阿羅那順 1 +阿耳忒彌斯 1 +阿聯酋 1 +阿育 1 +阿茲海默 1 +阿諾 1 +阿賈克斯 1 +阿赫 1 +阿連德 1 +阿道夫 1 +阿達姆庫斯 1 +阿里 1 +阿隆索 1 +陀斯妥也夫斯基 1 +附上 1 +附加 1 +附蟲 1 +附表 1 +附身 1 +降將 1 +降格 1 +降水 1 +降班 1 +降臨 1 +降魔 1 +限 1 +限定 1 +限時 1 +陞 1 +陡壁 1 +院士 1 +院子 1 +院落 1 +陣 1 +除冰 1 +除夕 1 +除此 1 +除非 1 +陪葬 1 +陪都 1 +陰天 1 +陰暗 1 +陰陽 1 +陳國 1 +陳屍 1 +陳相 1 +陳述 1 +陵園 1 +陶恩 1 +陷落 1 +陸仔 1 +陸域 1 +陸行 1 +陽 1 +陽安 1 +陽明 1 +隆亨 1 +隊列 1 +隊名 1 +隔日 1 +隔開 1 +隕星 1 +隕鐵 1 +際春 1 +隠居 1 +隨丁 1 +隨便 1 +隨同 1 +隨往 1 +隨時 1 +隨軍 1 +隨隊 1 +險些 1 +險要 1 +隱含 1 +隱姓埋名 1 +隱居 1 +隱性 1 +隱私 1 +隻身 1 +雄 1 +雄師 1 +雄獅 1 +雅克 1 +雅加達 1 +雅各布 1 +雅君 1 +集寧 1 +集結 1 +集聚 1 +雌性 1 +雌獸 1 +雌鯨 1 +雎 1 +雙十 1 +雙子 1 +雙江 1 +雜姓 1 +雜糧 1 +雜處 1 +雜食 1 +雞腿 1 +雞頭 1 +離別 1 +離域 1 +離場 1 +離子 1 +離島 1 +離群索居 1 +離職 1 +難吃 1 +難得 1 +難攻 1 +難過 1 +雨季 1 +雨後春筍 1 +雨林 1 +雪上加霜 1 +雪佛龍 1 +雪兒 1 +雪崩 1 +雪弟 1 +雪梅 1 +雲中 1 +雲亭 1 +雲岩 1 +雲松 1 +雲里 1 +零件 1 +零部件 1 +零食 1 +雷 1 +雷克南 1 +雷克斯 1 +雷切爾 1 +雷姆 1 +雷定 1 +雷昂納多 1 +雷曼 1 +雷王 1 +雷蒂亞 1 +雷雨 1 +電信 1 +電器 1 +電極 1 +電氣 1 +電瓶 1 +電線 1 +電通 1 +電邀 1 +需時 1 +霆鋒 1 +震寰 1 +震波 1 +震災 1 +霍亂 1 +霍伊爾 1 +霍夫堡 1 +霍姆 1 +霍巴特 1 +霍斯 1 +霍普金斯 1 +霍爾滕 1 +霍爾特 1 +霞 1 +霧 1 +露出 1 +露比 1 +露臉 1 +露西 1 +霸佔 1 +霸權 1 +靈前 1 +靈力 1 +靈性 1 +靈感 1 +靈柩 1 +靈活 1 +靈異 1 +靈籤 1 +靈長 1 +靈魂 1 +青 1 +青梅 1 +青森 1 +青睞 1 +青訓 1 +青金 1 +靖 1 +靖雯 1 +靜安 1 +靜岡 1 +靜華 1 +靠右 1 +靠左 1 +面具 1 +面向 1 +面貌 1 +革除 1 +鞏 1 +鞦韆 1 +韃靼 1 +韋 1 +韋契特 1 +韋德 1 +韋拉克魯斯 1 +韋拿 1 +韋斯特 1 +韋科 1 +韌 1 +韓氏 1 +韓浜 1 +音律 1 +音色 1 +音量 1 +音高 1 +韶之 1 +響號 1 +頂上 1 +頂尖 1 +頂峰 1 +頂端 1 +頂級 1 +項鏈 1 +順宗 1 +順岸 1 +順德 1 +順應 1 +順懷 1 +順治 1 +順滑 1 +順陽 1 +頌平 1 +頌揚 1 +預 1 +預估 1 +預告 1 +預知 1 +預示 1 +預約 1 +頑石 1 +頒給 1 +頗 1 +頗多 1 +頗大 1 +頗有 1 +頗盛 1 +頗豐 1 +領事 1 +領取 1 +領奏 1 +領航 1 +領軍 1 +領隊 1 +頡 1 +頭上 1 +頭前 1 +頭型 1 +頭尾 1 +頭槌 1 +頭版 1 +頭盔 1 +頭紗 1 +頭髮 1 +頸 1 +頸部 1 +頹垣 1 +頻 1 +頻寬 1 +頻散 1 +頻繁 1 +頻頻 1 +題獻 1 +題記 1 +額外 1 +額度 1 +類別 1 +類固醇 1 +顥 1 +顯 1 +顯光 1 +顯徑 1 +顯現 1 +顯靈 1 +風化 1 +風尚 1 +風波 1 +風行 1 +風間 1 +風雨 1 +飈 1 +飛往 1 +飛抵 1 +飛毛 1 +飛沫 1 +飛碟 1 +飛鏢 1 +飛靶 1 +飛鳥 1 +飛龍 1 +食人 1 +食肆 1 +食肉 1 +食蟲 1 +食鹽 1 +飲茶 1 +飼料 1 +飼草 1 +飽和 1 +飽經 1 +飾物 1 +餃子 1 +餅 1 +養份 1 +養大 1 +養女 1 +養母 1 +養父 1 +養精蓄銳 1 +養育 1 +養菊 1 +養蠶 1 +餐車 1 +餘 1 +餘熱 1 +餘眾 1 +館前 1 +館名 1 +館址 1 +饃 1 +饑餓 1 +饒平 1 +饕餮 1 +首仗 1 +首個 1 +首名 1 +首場 1 +首屈一指 1 +首席 1 +首戰 1 +首批 1 +首日 1 +首映 1 +首條 1 +首艦 1 +首讀 1 +香 1 +香亭 1 +香儂 1 +香吉士 1 +香味 1 +香坊 1 +香塍 1 +香水 1 +香洲 1 +香火 1 +香織 1 +馬丁尼茲 1 +馬丁斯維勒 1 +馬上 1 +馬修 1 +馬克安諾 1 +馬克西米利 1 +馬內阿 1 +馬六甲 1 +馬匹 1 +馬喇 1 +馬圈 1 +馬奇頓 1 +馬尼拉 1 +馬托格羅索 1 +馬爾他 1 +馬爾吉阿納 1 +馬爾地夫 1 +馬爾默 1 +馬球 1 +馬約拉那 1 +馬莎 1 +馬薩 1 +馬薩諸塞 1 +馬賽 1 +馬赫盧普 1 +馬路 1 +馬達加斯加 1 +馬里內蒂 1 +馬里蘭 1 +馬雅可夫斯基 1 +馬鞍 1 +馬黑麻 1 +馳名 1 +馴化 1 +駐任 1 +駐地 1 +駐防 1 +駕崩 1 +駙馬 1 +駛 1 +駛入 1 +駛過 1 +駿業 1 +騁遠 1 +騎 1 +騎馬 1 +騏一郎 1 +騙徒 1 +騰出 1 +騰訊 1 +騷擾 1 +驅 1 +驗屍 1 +驗票 1 +驗證 1 +驗電 1 +驚人 1 +驚動 1 +驚喜 1 +驚嘆 1 +驚訝 1 +驚醒 1 +驟減 1 +驟逝 1 +驢肉 1 +驥 1 +骨幹 1 +骯髒 1 +骷髏 1 +體側 1 +體外 1 +體委 1 +體工 1 +體會 1 +體溫 1 +髖骨 1 +高下 1 +高傲 1 +高傲不群 1 +高出 1 +高升 1 +高地 1 +高大 1 +高峰 1 +高座 1 +高手 1 +高效 1 +高新 1 +高杉 1 +高檔 1 +高清 1 +高漲 1 +高熱 1 +高燥 1 +高爾夫 1 +高爾德 1 +高琦 1 +高盧 1 +高聳 1 +高處 1 +高買 1 +高質 1 +高超 1 +高雄 1 +高高在上 1 +髮 1 +髮生 1 +髮辮 1 +鬆髻 1 +鬚 1 +鬚鯨 1 +鬥雞 1 +鬧 1 +鬧出 1 +鬼影 1 +鬼怪 1 +鬼道 1 +魁智 1 +魅惑 1 +魏國 1 +魏斯曼 1 +魏氏 1 +魏澤爾 1 +魔力 1 +魔界 1 +魔石 1 +魔鬼 1 +魚尾 1 +魚腹 1 +魚苗 1 +魚類 1 +魯 1 +魯伯 1 +魯國 1 +魯特 1 +魯登尼亞 1 +魯良新元 1 +魯茨科伊 1 +魯西迪 1 +魯道夫 1 +鮑亞士 1 +鮑克瑟 1 +鮑爾溫 1 +鮑維 1 +鮑里斯 1 +鮑魚 1 +鮮 1 +鮮有 1 +鮮用 1 +鮮虞 1 +鯉齒 1 +鰓蓋 1 +鰭條 1 +鰺沢駅 1 +鱗 1 +鱗甲 1 +鱗骨 1 +鳥 1 +鳥獸 1 +鳥種 1 +鳳 1 +鳳彬 1 +鳴叫 1 +鳴放 1 +鳴道 1 +鴛鴦 1 +鴻南 1 +鴻章 1 +鴻績 1 +鴻華 1 +鴻超 1 +鴻逵 1 +鴻銘 1 +鹽 1 +鹽城 1 +鹽州 1 +鹽酸 1 +鹿兒島 1 +鹿鼎 1 +麒 1 +麗晶 1 +麗泰 1 +麗珍 1 +麗華 1 +麗閣 1 +麥克 1 +麥克佛森 1 +麥克羅伯特森 1 +麥克默多 1 +麥加利 1 +麥卡特尼 1 +麥拉倫 1 +麥格林 1 +麥當勞 1 +麥芽 1 +麥迪文 1 +麩氨酸 1 +麵 1 +麵團 1 +麵皮 1 +麻城 1 +麻塞諸塞 1 +麻將 1 +麻布 1 +麻木 1 +麻痹 1 +黃岡 1 +黃巾 1 +黃昏 1 +黃沙 1 +黃河 1 +黃蜂 1 +黎家 1 +黎明 1 +黎筍 1 +黑奴 1 +黑帶 1 +黑手 1 +黑暗 1 +黑木 1 +黑板 1 +黑死 1 +黑海 1 +黑衫 1 +黑錢 1 +黑鐵木 1 +黑雲 1 +黑髮 1 +默多克 1 +默比施 1 +默默 1 +黛安娜 1 +黛絲 1 +點陣 1 +點點頭 1 +黨團 1 +黨委 1 +黨校 1 +黨歌 1 +黨衛 1 +黨部 1 +黨魁 1 +鼎灶 1 +鼎芬 1 +鼎金 1 +鼓手 1 +鼬鼠 1 +齊國 1 +齋 1 +齒狀 1 +齒輪 1 +齲齒 1 +龍台 1 +龍女 1 +龍文 1 +龍耳 1 +龍頭 1 +龐 1 +龐特佛雷特 1 +龐貝 1 +龜茲 1 diff --git a/syntaxnet/dragnn/conll2017/sample/zh-segmenter.checkpoint.data-00000-of-00001 b/syntaxnet/dragnn/conll2017/sample/zh-segmenter.checkpoint.data-00000-of-00001 new file mode 100644 index 0000000000000000000000000000000000000000..1f4b2bbf058ec535366b114289e904a625225537 Binary files /dev/null and b/syntaxnet/dragnn/conll2017/sample/zh-segmenter.checkpoint.data-00000-of-00001 differ diff --git a/syntaxnet/dragnn/conll2017/sample/zh-segmenter.checkpoint.index b/syntaxnet/dragnn/conll2017/sample/zh-segmenter.checkpoint.index new file mode 100644 index 0000000000000000000000000000000000000000..0223e61fd79068db6dfc2e3e70ec4c21272f1d0c Binary files /dev/null and b/syntaxnet/dragnn/conll2017/sample/zh-segmenter.checkpoint.index differ diff --git a/syntaxnet/dragnn/conll2017/sample/zh-segmenter.checkpoint.meta b/syntaxnet/dragnn/conll2017/sample/zh-segmenter.checkpoint.meta new file mode 100644 index 0000000000000000000000000000000000000000..e765f61837709bc7dd6b975e772da996f421ee1b Binary files /dev/null and b/syntaxnet/dragnn/conll2017/sample/zh-segmenter.checkpoint.meta differ diff --git a/syntaxnet/dragnn/conll2017/sample/zh-segmenter.master_spec b/syntaxnet/dragnn/conll2017/sample/zh-segmenter.master_spec new file mode 100644 index 0000000000000000000000000000000000000000..ceca40c309bfd058581dd7a8abb15a976ca6e108 --- /dev/null +++ b/syntaxnet/dragnn/conll2017/sample/zh-segmenter.master_spec @@ -0,0 +1,187 @@ +component { + name: "lookahead" + transition_system { + registered_name: "shift-only" + parameters { + key: "left_to_right" + value: "false" + } + } + resource { + name: "word-map" + part { + file_pattern: "word-map" + } + } + resource { + name: "tag-map" + part { + file_pattern: "tag-map" + } + } + resource { + name: "tag-to-category" + part { + file_pattern: "tag-to-category" + } + } + resource { + name: "lcword-map" + part { + file_pattern: "lcword-map" + } + } + resource { + name: "category-map" + part { + file_pattern: "category-map" + } + } + resource { + name: "char-map" + part { + file_pattern: "char-map" + } + } + resource { + name: "char-ngram-map" + part { + file_pattern: "char-ngram-map" + } + } + resource { + name: "label-map" + part { + file_pattern: "label-map" + } + } + resource { + name: "prefix-table" + part { + file_pattern: "prefix-table" + } + } + resource { + name: "suffix-table" + part { + file_pattern: "suffix-table" + } + } + fixed_feature { + name: "char" + fml: "input(-1).char input.char input(1).char" + embedding_dim: 32 + vocabulary_size: 3521 + size: 3 + } + fixed_feature { + name: "char-bigram" + fml: "input.char-bigram" + embedding_dim: 32 + vocabulary_size: 6579 + size: 1 + } + network_unit { + registered_name: "wrapped_units.LayerNormBasicLSTMNetwork" + parameters { + key: "hidden_layer_sizes" + value: "256" + } + } + backend { + registered_name: "SyntaxNetComponent" + } + num_actions: 1 + component_builder { + registered_name: "DynamicComponentBuilder" + } +} +component { + name: "segmenter" + transition_system { + registered_name: "binary-segment-transitions" + } + resource { + name: "word-map" + part { + file_pattern: "word-map" + } + } + resource { + name: "tag-map" + part { + file_pattern: "tag-map" + } + } + resource { + name: "tag-to-category" + part { + file_pattern: "tag-to-category" + } + } + resource { + name: "lcword-map" + part { + file_pattern: "lcword-map" + } + } + resource { + name: "category-map" + part { + file_pattern: "category-map" + } + } + resource { + name: "char-map" + part { + file_pattern: "char-map" + } + } + resource { + name: "char-ngram-map" + part { + file_pattern: "char-ngram-map" + } + } + resource { + name: "label-map" + part { + file_pattern: "label-map" + } + } + resource { + name: "prefix-table" + part { + file_pattern: "prefix-table" + } + } + resource { + name: "suffix-table" + part { + file_pattern: "suffix-table" + } + } + linked_feature { + name: "lookahead" + fml: "input.focus stack.focus" + embedding_dim: 32 + size: 2 + source_component: "lookahead" + source_translator: "reverse-token" + source_layer: "state_h_0" + } + network_unit { + registered_name: "wrapped_units.LayerNormBasicLSTMNetwork" + parameters { + key: "hidden_layer_sizes" + value: "128" + } + } + backend { + registered_name: "SyntaxNetComponent" + } + num_actions: 2 + component_builder { + registered_name: "DynamicComponentBuilder" + } +} diff --git a/syntaxnet/dragnn/core/BUILD b/syntaxnet/dragnn/core/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..259f4e312dd44a6d47c6d92f6a911141d55cae7e --- /dev/null +++ b/syntaxnet/dragnn/core/BUILD @@ -0,0 +1,340 @@ +package(default_visibility = ["//visibility:public"]) + +# Test data. +filegroup( + name = "testdata", + data = glob(["testdata/**"]), +) + +cc_library( + name = "beam", + hdrs = ["beam.h"], + deps = [ + "//dragnn/core/interfaces:cloneable_transition_state", + "//dragnn/core/interfaces:transition_state", + "@org_tensorflow//tensorflow/core:lib", # For tf/core/platform/logging.h + ], +) + +cc_library( + name = "component_registry", + srcs = ["component_registry.cc"], + hdrs = ["component_registry.h"], + deps = [ + "//dragnn/core/interfaces:component", + "//syntaxnet:registry", + ], +) + +cc_library( + name = "compute_session", + hdrs = ["compute_session.h"], + deps = [ + "//dragnn/components/util:bulk_feature_extractor", + "//dragnn/core:index_translator", + "//dragnn/core/interfaces:component", + "//dragnn/protos:spec_proto", + "//dragnn/protos:trace_proto", + ], +) + +cc_library( + name = "compute_session_impl", + srcs = ["compute_session_impl.cc"], + hdrs = ["compute_session_impl.h"], + deps = [ + ":compute_session", + ":index_translator", + ":input_batch_cache", + "//dragnn/components/util:bulk_feature_extractor", + "//dragnn/protos:data_proto", + "//dragnn/protos:spec_proto", + "//dragnn/protos:trace_proto", + "//syntaxnet:registry", + "@org_tensorflow//tensorflow/core:lib", # For tf/core/platform/logging.h + ], +) + +cc_library( + name = "compute_session_pool", + srcs = ["compute_session_pool.cc"], + hdrs = ["compute_session_pool.h"], + deps = [ + ":component_registry", + ":compute_session", + ":compute_session_impl", + "//dragnn/protos:spec_proto", + "@org_tensorflow//tensorflow/core:lib", + ], +) + +cc_library( + name = "index_translator", + srcs = ["index_translator.cc"], + hdrs = ["index_translator.h"], + deps = [ + "//dragnn/core/interfaces:component", + "//dragnn/core/interfaces:transition_state", + "@org_tensorflow//tensorflow/core:lib", # For tf/core/platform/logging.h + ], +) + +cc_library( + name = "input_batch_cache", + hdrs = ["input_batch_cache.h"], + deps = [ + "//dragnn/core/interfaces:input_batch", + "@org_tensorflow//tensorflow/core:lib", # For tf/core/platform/logging.h + ], +) + +cc_library( + name = "resource_container", + hdrs = ["resource_container.h"], + deps = [ + "//syntaxnet:base", + "@org_tensorflow//tensorflow/core:framework", + ], +) + +# Tests + +cc_test( + name = "beam_test", + srcs = ["beam_test.cc"], + deps = [ + ":beam", + "//dragnn/core/interfaces:cloneable_transition_state", + "//dragnn/core/interfaces:transition_state", + "//dragnn/core/test:mock_transition_state", + "//syntaxnet:test_main", + "@org_tensorflow//tensorflow/core:test", + ], +) + +cc_test( + name = "compute_session_impl_test", + srcs = ["compute_session_impl_test.cc"], + deps = [ + ":component_registry", + ":compute_session", + ":compute_session_impl", + ":compute_session_pool", + "//dragnn/components/util:bulk_feature_extractor", + "//dragnn/core/interfaces:component", + "//dragnn/core/test:generic", + "//dragnn/core/test:mock_component", + "//dragnn/core/test:mock_transition_state", + "@org_tensorflow//tensorflow/core:test", + ], +) + +cc_test( + name = "compute_session_pool_test", + srcs = ["compute_session_pool_test.cc"], + deps = [ + ":compute_session", + ":compute_session_pool", + "//dragnn/core/test:generic", + "//dragnn/core/test:mock_component", + "//dragnn/core/test:mock_compute_session", + "//syntaxnet:test_main", + "@org_tensorflow//tensorflow/core:lib", + "@org_tensorflow//tensorflow/core:test", + ], +) + +cc_test( + name = "index_translator_test", + srcs = ["index_translator_test.cc"], + deps = [ + ":index_translator", + "//dragnn/core/test:mock_component", + "//dragnn/core/test:mock_transition_state", + "//syntaxnet:test_main", + "@org_tensorflow//tensorflow/core:test", + ], +) + +cc_test( + name = "input_batch_cache_test", + srcs = ["input_batch_cache_test.cc"], + deps = [ + ":input_batch_cache", + "//dragnn/core/interfaces:input_batch", + "//syntaxnet:test_main", + "@org_tensorflow//tensorflow/core:test", + ], +) + +cc_test( + name = "resource_container_test", + srcs = ["resource_container_test.cc"], + deps = [ + ":resource_container", + "//syntaxnet:test_main", + "@org_tensorflow//tensorflow/core:test", + ], +) + +# Tensorflow op kernel BUILD rules. + +load( + "//dragnn:tensorflow_ops.bzl", + "tf_gen_op_libs", + "tf_gen_op_wrapper_py", + "tf_kernel_library", +) + +tf_gen_op_libs( + op_lib_names = ["dragnn_ops"], +) + +tf_gen_op_wrapper_py( + name = "dragnn_ops", + deps = [":dragnn_ops_op_lib"], +) + +tf_gen_op_libs( + op_lib_names = ["dragnn_bulk_ops"], +) + +tf_gen_op_wrapper_py( + name = "dragnn_bulk_ops", + deps = [":dragnn_bulk_ops_op_lib"], +) + +cc_library( + name = "compute_session_op", + srcs = [ + "ops/compute_session_op.cc", + ], + hdrs = ["ops/compute_session_op.h"], + deps = [ + ":compute_session", + ":resource_container", + "@org_tensorflow//tensorflow/core:framework", + "@org_tensorflow//third_party/eigen3", + ], +) + +cc_library( + name = "dragnn_ops_cc", + srcs = [ + "ops/dragnn_op_kernels.cc", + "ops/dragnn_ops.cc", + ], + deps = [ + ":compute_session", + ":compute_session_op", + ":compute_session_pool", + ":resource_container", + "//dragnn/protos:data_proto", + "//dragnn/protos:spec_proto", + "@org_tensorflow//tensorflow/core:framework", + "@org_tensorflow//tensorflow/core:lib", + "@org_tensorflow//third_party/eigen3", + ], + alwayslink = 1, +) + +cc_library( + name = "dragnn_bulk_ops_cc", + srcs = [ + "ops/dragnn_bulk_op_kernels.cc", + "ops/dragnn_bulk_ops.cc", + ], + deps = [ + ":compute_session_op", + ":resource_container", + "@org_tensorflow//tensorflow/core:framework", + "@org_tensorflow//tensorflow/core:lib", + "@org_tensorflow//tensorflow/core:protos_all_cc", + "@org_tensorflow//third_party/eigen3", + ], +) + +# Tensorflow kernel libraries, for use with unit tests. + +tf_kernel_library( + name = "dragnn_op_kernels", + srcs = [ + "ops/dragnn_op_kernels.cc", + "ops/dragnn_ops.cc", + ], + hdrs = [ + ], + deps = [ + ":compute_session", + ":compute_session_op", + ":compute_session_pool", + ":resource_container", + "//dragnn/protos:data_proto", + "//dragnn/protos:spec_proto", + "@org_tensorflow//tensorflow/core:framework", + "@org_tensorflow//tensorflow/core:lib", + "@org_tensorflow//third_party/eigen3", + ], +) + +tf_kernel_library( + name = "dragnn_bulk_op_kernels", + srcs = [ + "ops/dragnn_bulk_op_kernels.cc", + "ops/dragnn_bulk_ops.cc", + ], + hdrs = [ + ], + deps = [ + ":compute_session", + ":compute_session_op", + ":compute_session_pool", + ":resource_container", + "//dragnn/components/util:bulk_feature_extractor", + "//dragnn/protos:spec_proto", + "@org_tensorflow//tensorflow/core:framework", + "@org_tensorflow//tensorflow/core:lib", + "@org_tensorflow//tensorflow/core:protos_all_cc", + "@org_tensorflow//third_party/eigen3", + ], +) + +# Tensorflow kernel tests. + +cc_test( + name = "dragnn_op_kernels_test", + srcs = ["ops/dragnn_op_kernels_test.cc"], + deps = [ + ":compute_session", + ":compute_session_pool", + ":dragnn_op_kernels", + ":resource_container", + "//dragnn/core/test:generic", + "//dragnn/core/test:mock_compute_session", + "//syntaxnet:test_main", + "@org_tensorflow//tensorflow/core:framework", + "@org_tensorflow//tensorflow/core:protos_all_cc", + "@org_tensorflow//tensorflow/core:test", + "@org_tensorflow//tensorflow/core:testlib", + "@org_tensorflow//tensorflow/core/kernels:ops_testutil", + "@org_tensorflow//tensorflow/core/kernels:ops_util", + "@org_tensorflow//tensorflow/core/kernels:quantized_ops", + ], +) + +cc_test( + name = "dragnn_bulk_op_kernels_test", + srcs = ["ops/dragnn_bulk_op_kernels_test.cc"], + deps = [ + ":compute_session_pool", + ":dragnn_bulk_op_kernels", + ":resource_container", + "//dragnn/components/util:bulk_feature_extractor", + "//dragnn/core/test:mock_compute_session", + "//syntaxnet:test_main", + "@org_tensorflow//tensorflow/core:framework", + "@org_tensorflow//tensorflow/core:testlib", + "@org_tensorflow//tensorflow/core/kernels:ops_testutil", + "@org_tensorflow//tensorflow/core/kernels:quantized_ops", + ], +) diff --git a/syntaxnet/dragnn/core/beam.h b/syntaxnet/dragnn/core/beam.h new file mode 100644 index 0000000000000000000000000000000000000000..32d9ca1411a9cb511209e52ed39b2a40ff5d2704 --- /dev/null +++ b/syntaxnet/dragnn/core/beam.h @@ -0,0 +1,347 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_BEAM_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_BEAM_H_ + +#include +#include +#include + +#include "dragnn/core/interfaces/cloneable_transition_state.h" +#include "dragnn/core/interfaces/transition_state.h" +#include "tensorflow/core/platform/logging.h" + +namespace syntaxnet { +namespace dragnn { + +// The Beam class wraps the logic necessary to advance a set of transition +// states for an arbitrary Component. Because the Beam class is generic, it +// doesn't know how to act on the states it is provided - the instantiating +// Component is expected to provide it the three functions it needs to interact +// with that Component's TransitionState subclasses. + +template +class Beam { + public: + // Creates a new Beam which can grow up to max_size elements. + explicit Beam(int max_size) : max_size_(max_size), num_steps_(0) { + VLOG(2) << "Creating beam with max size " << max_size_; + static_assert( + std::is_base_of, T>::value, + "This class must be instantiated to use a CloneableTransitionState"); + } + + // Sets the Beam functions, as follows: + // bool is_allowed(TransitionState *, int): Return true if transition 'int' is + // allowed for transition state 'TransitionState *'. + // void perform_transition(TransitionState *, int): Performs transition 'int' + // on transition state 'TransitionState *'. + // int oracle_function(TransitionState *): Returns the oracle-specified action + // for transition state 'TransitionState *'. + void SetFunctions(std::function is_allowed, + std::function is_final, + std::function perform_transition, + std::function oracle_function) { + is_allowed_ = is_allowed; + is_final_ = is_final; + perform_transition_ = perform_transition; + oracle_function_ = oracle_function; + } + + // Resets the Beam and initializes it with the given set of states. The Beam + // takes ownership of these TransitionStates. + void Init(std::vector> initial_states) { + VLOG(2) << "Initializing beam. Beam max size is " << max_size_; + CHECK_LE(initial_states.size(), max_size_) + << "Attempted to initialize a beam with more states (" + << initial_states.size() << ") than the max size " << max_size_; + beam_ = std::move(initial_states); + std::vector previous_beam_indices(max_size_, -1); + for (int i = 0; i < beam_.size(); ++i) { + previous_beam_indices.at(i) = beam_[i]->ParentBeamIndex(); + beam_[i]->SetBeamIndex(i); + } + beam_index_history_.emplace_back(previous_beam_indices); + } + + // Advances the Beam from the given transition matrix. + void AdvanceFromPrediction(const float transition_matrix[], int matrix_length, + int num_actions) { + // Ensure that the transition matrix is the correct size. All underlying + // states should have the same transition profile, so using the one at 0 + // should be safe. + CHECK_EQ(matrix_length, max_size_ * num_actions) + << "Transition matrix size does not match max beam size * number of " + "state transitions!"; + + if (max_size_ == 1) { + // In the case where beam size is 1, we can advance by simply finding the + // highest score and advancing the beam state in place. + VLOG(2) << "Beam size is 1. Using fast beam path."; + int best_action = -1; + float best_score = -INFINITY; + auto &state = beam_[0]; + for (int action_idx = 0; action_idx < num_actions; ++action_idx) { + if (is_allowed_(state.get(), action_idx) && + transition_matrix[action_idx] > best_score) { + best_score = transition_matrix[action_idx]; + best_action = action_idx; + } + } + CHECK_GE(best_action, 0) << "Num actions: " << num_actions + << " score[0]: " << transition_matrix[0]; + perform_transition_(state.get(), best_action); + const float new_score = state->GetScore() + best_score; + state->SetScore(new_score); + state->SetBeamIndex(0); + } else { + // Create the vector of all possible transitions, along with their scores. + std::vector candidates; + + // Iterate through all beams, examining all actions for each beam. + for (int beam_idx = 0; beam_idx < beam_.size(); ++beam_idx) { + const auto &state = beam_[beam_idx]; + for (int action_idx = 0; action_idx < num_actions; ++action_idx) { + // If the action is allowed, calculate the proposed new score and add + // the candidate action to the vector of all actions at this state. + if (is_allowed_(state.get(), action_idx)) { + Transition candidate; + + // The matrix is laid out by beam index, with a linear set of + // actions for that index - so beam N's actions start at [nr. of + // actions]*[N]. + const int matrix_idx = action_idx + beam_idx * num_actions; + CHECK_LT(matrix_idx, matrix_length) + << "Matrix index out of bounds!"; + const double score_delta = transition_matrix[matrix_idx]; + CHECK(!isnan(score_delta)); + candidate.source_idx = beam_idx; + candidate.action = action_idx; + candidate.resulting_score = state->GetScore() + score_delta; + candidates.emplace_back(candidate); + } + } + } + + // Sort the vector of all possible transitions and scores. + const auto comparator = [](const Transition &a, const Transition &b) { + return a.resulting_score > b.resulting_score; + }; + std::sort(candidates.begin(), candidates.end(), comparator); + + // Apply the top transitions, up to a maximum of 'max_size_'. + std::vector> new_beam; + std::vector previous_beam_indices(max_size_, -1); + const int beam_size = + std::min(max_size_, static_cast(candidates.size())); + VLOG(2) << "Previous beam size = " << beam_.size(); + VLOG(2) << "New beam size = " << beam_size; + VLOG(2) << "Maximum beam size = " << max_size_; + for (int i = 0; i < beam_size; ++i) { + // Get the source of the i'th transition. + const auto &transition = candidates[i]; + VLOG(2) << "Taking transition with score: " + << transition.resulting_score + << " and action: " << transition.action; + VLOG(2) << "transition.source_idx = " << transition.source_idx; + const auto &source = beam_[transition.source_idx]; + + // Put the new transition on the new state beam. + auto new_state = source->Clone(); + perform_transition_(new_state.get(), transition.action); + new_state->SetScore(transition.resulting_score); + new_state->SetBeamIndex(i); + previous_beam_indices.at(i) = transition.source_idx; + new_beam.emplace_back(std::move(new_state)); + } + + beam_ = std::move(new_beam); + beam_index_history_.emplace_back(previous_beam_indices); + } + + ++num_steps_; + } + + // Advances the Beam from the state oracles. + void AdvanceFromOracle() { + std::vector previous_beam_indices(max_size_, -1); + for (int i = 0; i < beam_.size(); ++i) { + previous_beam_indices.at(i) = i; + if (is_final_(beam_[i].get())) continue; + const auto oracle_label = oracle_function_(beam_[i].get()); + VLOG(2) << "AdvanceFromOracle beam_index:" << i + << " oracle_label:" << oracle_label; + perform_transition_(beam_[i].get(), oracle_label); + beam_[i]->SetScore(0.0); + beam_[i]->SetBeamIndex(i); + } + if (max_size_ > 1) { + beam_index_history_.emplace_back(previous_beam_indices); + } + num_steps_++; + } + + // Returns true if all states in the beam are final. + bool IsTerminal() { + for (auto &state : beam_) { + if (!is_final_(state.get())) { + return false; + } + } + return true; + } + + // Destroys the states held by this beam and resets its history. + void Reset() { + beam_.clear(); + beam_index_history_.clear(); + num_steps_ = 0; + } + + // Given an index into the current beam, determine the index of the item's + // parent at beam step "step", which should be less than the total number + // of steps taken by this beam. + int FindPreviousIndex(int current_index, int step) const { + VLOG(2) << "FindPreviousIndex requested for current_index:" << current_index + << " at step:" << step; + if (VLOG_IS_ON(2)) { + int step_index = 0; + for (const auto &step : beam_index_history_) { + string row = + "Step " + std::to_string(step_index) + " element source slot: "; + for (const auto &index : step) { + if (index == -1) { + row += " X"; + } else { + row += " " + std::to_string(index); + } + } + VLOG(2) << row; + ++step_index; + } + } + + // If the max size of the beam is 1, make sure the steps are in sync with + // the size. + if (max_size_ > 1) { + CHECK(num_steps_ == beam_index_history_.size() - 1); + } + + // Check if the step is too far into the past or future. + if (step < 0 || step > num_steps_) { + return -1; + } + + // Check that the index is within the beam. + if (current_index < 0 || current_index >= max_size_) { + return -1; + } + + // If the max size of the beam is 1, always return 0. + if (max_size_ == 1) { + return 0; + } + + // Check that the start index isn't -1; -1 means that we don't have an + // actual transition state in that beam slot. + if (beam_index_history_.back().at(current_index) == -1) { + return -1; + } + + int beam_index = current_index; + for (int i = beam_index_history_.size() - 1; i >= step; --i) { + beam_index = beam_index_history_.at(i).at(beam_index); + } + CHECK_GE(beam_index, 0); + VLOG(2) << "Index is " << beam_index; + return beam_index; + } + + // Returns the current state of the beam. + std::vector beam() const { + std::vector state_ptrs; + for (const auto &beam_state : beam_) { + state_ptrs.emplace_back(beam_state.get()); + } + return state_ptrs; + } + + // Returns the beam at the current state index. + T *beam_state(int beam_index) { return beam_.at(beam_index).get(); } + + // Returns the raw history vectors for this beam. + const std::vector> &history() { + if (max_size_ == 1) { + // If max size is 1, we haven't been keeping track of the beam. Quick + // create it. + beam_index_history_.clear(); + beam_index_history_.push_back({beam_[0]->ParentBeamIndex()}); + for (int i = 0; i < num_steps_; ++i) { + beam_index_history_.push_back({0}); + } + } + return beam_index_history_; + } + + // Sets the max size of the beam. + void SetMaxSize(int max_size) { + max_size_ = max_size; + Reset(); + } + + // Returns the number of steps taken so far. + const int num_steps() const { return num_steps_; } + + // Returns the max size of this beam. + const int max_size() const { return max_size_; } + + // Returns the current size of the beam. + const int size() const { return beam_.size(); } + + private: + // Associates an action taken on an index into current_state_ with a score. + struct Transition { + // The index of the source item. + int source_idx; + + // The index of the action being taken. + int action; + + // The score of the full derivation. + double resulting_score; + }; + + // The maximum beam size. + int max_size_; + + // The current beam. + std::vector> beam_; + + // Function to check if a transition is allowed for a given state. + std::function is_allowed_; + + // Function to check if a state is final. + std::function is_final_; + + // Function to perform a transition on a given state. + std::function perform_transition_; + + // Function to provide the oracle action for a given state. + std::function oracle_function_; + + // The history of the states in this beam. The vector indexes across steps. + // For every step, there is a vector in the vector. This inner vector denotes + // the state of the beam at that step, and contains the beam index that + // was transitioned to create the transition state at that index (so, + // if at step 2 the transition state at beam index 4 was created by applying + // a transition to the state in beam index 3 during step 1, the query would + // be "beam_index_history_.at(2).at(4)" and the value would be 3. Empty beam + // states will return -1. + std::vector> beam_index_history_; + + // The number of steps taken so far. + int num_steps_; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_BEAM_H_ diff --git a/syntaxnet/dragnn/core/beam_test.cc b/syntaxnet/dragnn/core/beam_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..db60ff2bf36fba95a470302b2b5f9728cf7b3f7e --- /dev/null +++ b/syntaxnet/dragnn/core/beam_test.cc @@ -0,0 +1,773 @@ +#include "dragnn/core/beam.h" + +#include "dragnn/core/interfaces/cloneable_transition_state.h" +#include "dragnn/core/interfaces/transition_state.h" +#include "dragnn/core/test/mock_transition_state.h" +#include +#include "tensorflow/core/platform/test.h" + +namespace syntaxnet { +namespace dragnn { + +using testing::MockFunction; +using testing::Return; +using testing::Ne; +using testing::_; + +namespace { + +// ***************************************************************************** +// Test-internal class definitions. +// ***************************************************************************** + +// Create a very basic transition state to test the beam. All it does is keep +// track of its current beam index and score, as well as providing a field +// for the transition function to write in what transition occurred. +// Note that this class does not fulfill the entire TransitionState contract, +// since it is only used in this particular test. +class TestTransitionState + : public CloneableTransitionState { + public: + TestTransitionState() {} + + void Init(const TransitionState &parent) override {} + + std::unique_ptr Clone() const override { + std::unique_ptr ptr(new TestTransitionState()); + return ptr; + } + + const int ParentBeamIndex() const override { return parent_beam_index_; } + + // Get the current beam index for this state. + const int GetBeamIndex() const override { return beam_index_; } + + // Set the current beam index for this state. + void SetBeamIndex(const int index) override { beam_index_ = index; } + + // Get the score associated with this transition state. + const float GetScore() const override { return score_; } + + // Set the score associated with this transition state. + void SetScore(const float score) override { score_ = score; } + + // Depicts this state as an HTML-language string. + string HTMLRepresentation() const override { return ""; } + + int parent_beam_index_; + + int beam_index_; + + float score_; + + int transition_action_; +}; + +// This transition function annotates a TestTransitionState with the action that +// was chosen for the transition. +auto transition_function = [](TestTransitionState *state, int action) { + TestTransitionState *cast_state = dynamic_cast(state); + cast_state->transition_action_ = action; +}; + +// Create oracle and permission functions that do nothing. +auto null_oracle = [](TestTransitionState *) { return 0; }; +auto null_permissions = [](TestTransitionState *, int) { return true; }; +auto null_finality = [](TestTransitionState *) { return false; }; + +// Create a unique_ptr with a test transition state in it and set its initial +// score. +std::unique_ptr CreateState(float score) { + std::unique_ptr state; + state.reset(new TestTransitionState()); + state->SetScore(score); + return state; +} + +// Convenience accessor for the action field in TestTransitionState. +int GetTransition(const TransitionState *state) { + return (dynamic_cast(state))->transition_action_; +} + +// Convenience accessor for the parent_beam_index_ field in TestTransitionState. +void SetParentBeamIndex(TransitionState *state, int index) { + (dynamic_cast(state))->parent_beam_index_ = index; +} + +} // namespace + +// ***************************************************************************** +// Tests begin here. +// ***************************************************************************** +TEST(BeamTest, AdvancesFromPredictionWithSingleBeam) { + // Create a matrix of transitions. + constexpr int kNumTransitions = 4; + constexpr int kMatrixSize = kNumTransitions; + constexpr float matrix[kMatrixSize] = {30.0, 20.0, 40.0, 10.0}; + constexpr int kBestTransition = 2; + constexpr float kOldScore = 3.0; + + // Create the beam and transition it. + std::vector> states; + states.push_back(CreateState(kOldScore)); + constexpr int kBeamSize = 1; + Beam beam(kBeamSize); + beam.SetFunctions(null_permissions, null_finality, transition_function, + null_oracle); + beam.Init(std::move(states)); + beam.AdvanceFromPrediction(matrix, kMatrixSize, kNumTransitions); + + // Validate the new beam. + EXPECT_EQ(beam.beam().size(), kBeamSize); + + // Make sure the state has performed the expected transition. + EXPECT_EQ(GetTransition(beam.beam().at(0)), kBestTransition); + + // Make sure the state has had its score updated properly. + EXPECT_EQ(beam.beam().at(0)->GetScore(), kOldScore + matrix[kBestTransition]); + + // Make sure that the beam index field is consistent with the actual beam idx. + EXPECT_EQ(beam.beam().at(0)->GetBeamIndex(), 0); + + // Make sure that the beam_state accessor actually accesses the beam. + EXPECT_EQ(beam.beam().at(0), beam.beam_state(0)); + + // Validate the beam history field. + auto history = beam.history(); + EXPECT_EQ(history.at(1).at(0), 0); +} + +TEST(BeamTest, AdvancingCreatesNewTransitions) { + // Create a matrix of transitions. + constexpr int kMaxBeamSize = 8; + constexpr int kNumTransitions = 4; + constexpr int kMatrixSize = kNumTransitions * kMaxBeamSize; + constexpr float matrix[kMatrixSize] = { + 30.0, 20.0, 40.0, 10.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, + 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, + 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0}; + constexpr float kOldScore = 4.0; + + // Create the beam and transition it. + std::vector> states; + states.push_back(CreateState(kOldScore)); + + Beam beam(kMaxBeamSize); + beam.SetFunctions(null_permissions, null_finality, transition_function, + null_oracle); + beam.Init(std::move(states)); + beam.AdvanceFromPrediction(matrix, kMatrixSize, kNumTransitions); + + // Validate the new beam. + EXPECT_EQ(beam.beam().size(), 4); + + // Make sure the state has performed the expected transition. + EXPECT_EQ(GetTransition(beam.beam().at(0)), 2); + EXPECT_EQ(GetTransition(beam.beam().at(1)), 0); + EXPECT_EQ(GetTransition(beam.beam().at(2)), 1); + EXPECT_EQ(GetTransition(beam.beam().at(3)), 3); + + // Make sure the state has had its score updated properly. + EXPECT_EQ(beam.beam().at(0)->GetScore(), kOldScore + matrix[2]); + EXPECT_EQ(beam.beam().at(1)->GetScore(), kOldScore + matrix[0]); + EXPECT_EQ(beam.beam().at(2)->GetScore(), kOldScore + matrix[1]); + EXPECT_EQ(beam.beam().at(3)->GetScore(), kOldScore + matrix[3]); + + // Make sure that the beam index field is consistent with the actual beam idx. + for (int i = 0; i < beam.beam().size(); ++i) { + EXPECT_EQ(beam.beam().at(i)->GetBeamIndex(), i); + } + + // In this case, we expect the top 4 results to have come from state 0 and + // the remaining 4 slots to be empty (-1). + auto history = beam.history(); + EXPECT_EQ(history.at(1).at(0), 0); + EXPECT_EQ(history.at(1).at(1), 0); + EXPECT_EQ(history.at(1).at(2), 0); + EXPECT_EQ(history.at(1).at(3), 0); + EXPECT_EQ(history.at(1).at(4), -1); + EXPECT_EQ(history.at(1).at(5), -1); + EXPECT_EQ(history.at(1).at(6), -1); + EXPECT_EQ(history.at(1).at(7), -1); +} + +TEST(BeamTest, MultipleElementBeamsAdvanceAllElements) { + // Create a matrix of transitions. + constexpr int kMaxBeamSize = 8; + constexpr int kNumTransitions = 4; + constexpr int kMatrixSize = kNumTransitions * kMaxBeamSize; + + constexpr float matrix[kMatrixSize] = { + 30.0, 20.0, 40.0, 10.0, // State 0 + 31.0, 21.0, 41.0, 11.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, + 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, + 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0}; + + constexpr float kOldScores[] = {5.0, 7.0}; + + // Create the beam and transition it. + std::vector> states; + states.push_back(CreateState(kOldScores[0])); + states.push_back(CreateState(kOldScores[1])); + + Beam beam(kMaxBeamSize); + beam.SetFunctions(null_permissions, null_finality, transition_function, + null_oracle); + beam.Init(std::move(states)); + beam.AdvanceFromPrediction(matrix, kMatrixSize, kNumTransitions); + + // Validate the new beam. + EXPECT_EQ(beam.beam().size(), 8); + + // Make sure the state has performed the expected transition. + // Note that the transition index is not the index into the matrix, but rather + // the index into the matrix 'row' for that state. + EXPECT_EQ(GetTransition(beam.beam().at(0)), 2); + EXPECT_EQ(GetTransition(beam.beam().at(1)), 2); + EXPECT_EQ(GetTransition(beam.beam().at(2)), 0); + EXPECT_EQ(GetTransition(beam.beam().at(3)), 0); + EXPECT_EQ(GetTransition(beam.beam().at(4)), 1); + EXPECT_EQ(GetTransition(beam.beam().at(5)), 1); + EXPECT_EQ(GetTransition(beam.beam().at(6)), 3); + EXPECT_EQ(GetTransition(beam.beam().at(7)), 3); + + // Make sure the state has had its score updated properly. + EXPECT_EQ(beam.beam().at(0)->GetScore(), kOldScores[1] + matrix[6]); + EXPECT_EQ(beam.beam().at(1)->GetScore(), kOldScores[0] + matrix[2]); + EXPECT_EQ(beam.beam().at(2)->GetScore(), kOldScores[1] + matrix[4]); + EXPECT_EQ(beam.beam().at(3)->GetScore(), kOldScores[0] + matrix[0]); + EXPECT_EQ(beam.beam().at(4)->GetScore(), kOldScores[1] + matrix[5]); + EXPECT_EQ(beam.beam().at(5)->GetScore(), kOldScores[0] + matrix[1]); + EXPECT_EQ(beam.beam().at(6)->GetScore(), kOldScores[1] + matrix[7]); + EXPECT_EQ(beam.beam().at(7)->GetScore(), kOldScores[0] + matrix[3]); + + // Make sure that the beam index field is consistent with the actual beam idx. + for (int i = 0; i < beam.beam().size(); ++i) { + EXPECT_EQ(beam.beam().at(i)->GetBeamIndex(), i); + } + + // Validate the history at this step. + auto history = beam.history(); + EXPECT_EQ(history.at(1).at(0), 1); + EXPECT_EQ(history.at(1).at(1), 0); + EXPECT_EQ(history.at(1).at(2), 1); + EXPECT_EQ(history.at(1).at(3), 0); + EXPECT_EQ(history.at(1).at(4), 1); + EXPECT_EQ(history.at(1).at(5), 0); + EXPECT_EQ(history.at(1).at(6), 1); + EXPECT_EQ(history.at(1).at(7), 0); +} + +TEST(BeamTest, AdvancingDropsLowValuePredictions) { + // Create a matrix of transitions. + constexpr int kNumTransitions = 4; + constexpr int kMaxBeamSize = 8; + constexpr int kMatrixSize = kNumTransitions * kMaxBeamSize; + constexpr float matrix[kMatrixSize] = {30.0, 20.0, 40.0, 10.0, // State 0 + 31.0, 21.0, 41.0, 11.0, // State 1 + 32.0, 22.0, 42.0, 12.0, // State 2 + 33.0, 23.0, 43.0, 13.0, // State 3 + 34.0, 24.0, 44.0, 14.0, // State 4 + 35.0, 25.0, 45.0, 15.0, // State 5 + 36.0, 26.0, 46.0, 16.0, // State 6 + 37.0, 27.0, 47.0, 17.0}; // State 7 + constexpr float kOldScores[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8}; + + // Create the beam and transition it. + std::vector> states; + states.push_back(CreateState(kOldScores[0])); + states.push_back(CreateState(kOldScores[1])); + states.push_back(CreateState(kOldScores[2])); + states.push_back(CreateState(kOldScores[3])); + states.push_back(CreateState(kOldScores[4])); + states.push_back(CreateState(kOldScores[5])); + states.push_back(CreateState(kOldScores[6])); + states.push_back(CreateState(kOldScores[7])); + Beam beam(kMaxBeamSize); + beam.SetFunctions(null_permissions, null_finality, transition_function, + null_oracle); + beam.Init(std::move(states)); + beam.AdvanceFromPrediction(matrix, kMatrixSize, kNumTransitions); + + // Validate the new beam. + EXPECT_EQ(beam.beam().size(), 8); + + // Make sure the state has performed the expected transition. + // In this case, every state will perform transition 2. + EXPECT_EQ(GetTransition(beam.beam().at(0)), 2); + EXPECT_EQ(GetTransition(beam.beam().at(1)), 2); + EXPECT_EQ(GetTransition(beam.beam().at(2)), 2); + EXPECT_EQ(GetTransition(beam.beam().at(3)), 2); + EXPECT_EQ(GetTransition(beam.beam().at(4)), 2); + EXPECT_EQ(GetTransition(beam.beam().at(5)), 2); + EXPECT_EQ(GetTransition(beam.beam().at(6)), 2); + EXPECT_EQ(GetTransition(beam.beam().at(7)), 2); + + // Make sure the state has had its score updated properly. (Note that row + // 0 had the smallest transition score, so it ends up on the bottom of the + // beam, and so forth.) For the matrix index, N*kNumTransitions gets into the + // correct state row and we add 2 since that was the transition index. + EXPECT_EQ(beam.beam().at(0)->GetScore(), + kOldScores[7] + matrix[7 * kNumTransitions + 2]); + EXPECT_EQ(beam.beam().at(1)->GetScore(), + kOldScores[6] + matrix[6 * kNumTransitions + 2]); + EXPECT_EQ(beam.beam().at(2)->GetScore(), + kOldScores[5] + matrix[5 * kNumTransitions + 2]); + EXPECT_EQ(beam.beam().at(3)->GetScore(), + kOldScores[4] + matrix[4 * kNumTransitions + 2]); + EXPECT_EQ(beam.beam().at(4)->GetScore(), + kOldScores[3] + matrix[3 * kNumTransitions + 2]); + EXPECT_EQ(beam.beam().at(5)->GetScore(), + kOldScores[2] + matrix[2 * kNumTransitions + 2]); + EXPECT_EQ(beam.beam().at(6)->GetScore(), + kOldScores[1] + matrix[1 * kNumTransitions + 2]); + EXPECT_EQ(beam.beam().at(7)->GetScore(), + kOldScores[0] + matrix[0 * kNumTransitions + 2]); + + // Make sure that the beam index field is consistent with the actual beam idx. + for (int i = 0; i < beam.beam().size(); ++i) { + EXPECT_EQ(beam.beam().at(i)->GetBeamIndex(), i); + } + + auto history = beam.history(); + EXPECT_EQ(history.at(1).at(0), 7); + EXPECT_EQ(history.at(1).at(1), 6); + EXPECT_EQ(history.at(1).at(2), 5); + EXPECT_EQ(history.at(1).at(3), 4); + EXPECT_EQ(history.at(1).at(4), 3); + EXPECT_EQ(history.at(1).at(5), 2); + EXPECT_EQ(history.at(1).at(6), 1); + EXPECT_EQ(history.at(1).at(7), 0); +} + +TEST(BeamTest, AdvancesFromOracleWithSingleBeam) { + // Create an oracle function for this state. + constexpr int kOracleLabel = 3; + auto oracle_function = [](TransitionState *) { return kOracleLabel; }; + + // Create the beam and transition it. + std::vector> states; + states.push_back(CreateState(0.0)); + constexpr int kBeamSize = 1; + Beam beam(kBeamSize); + beam.SetFunctions(null_permissions, null_finality, transition_function, + oracle_function); + beam.Init(std::move(states)); + beam.AdvanceFromOracle(); + + // Validate the new beam. + EXPECT_EQ(beam.beam().size(), kBeamSize); + + // Make sure the state has performed the expected transition. + EXPECT_EQ(GetTransition(beam.beam().at(0)), kOracleLabel); + + // Make sure the state has had its score held to 0. + EXPECT_EQ(beam.beam().at(0)->GetScore(), 0.0); + + // Make sure that the beam index field is consistent with the actual beam idx. + EXPECT_EQ(beam.beam().at(0)->GetBeamIndex(), 0); + + // Validate the beam history field. + auto history = beam.history(); + EXPECT_EQ(history.at(1).at(0), 0); +} + +TEST(BeamTest, AdvancesFromOracleWithMultipleStates) { + constexpr int kMaxBeamSize = 8; + + // Create a beam with 8 transition states. + std::vector> states; + for (int i = 0; i < kMaxBeamSize; ++i) { + // This is nonzero to test the oracle holding scores to 0. + states.push_back(CreateState(10.0)); + } + + std::vector expected_actions; + + // Create an oracle function for this state. Use mocks for finer control. + testing::MockFunction mock_oracle_function; + for (int i = 0; i < kMaxBeamSize; ++i) { + // We expect each state to be queried for its oracle label, + // and then to be transitioned in place with its oracle label. + int oracle_label = i % 3; // 3 is arbitrary. + EXPECT_CALL(mock_oracle_function, Call(states.at(i).get())) + .WillOnce(Return(oracle_label)); + expected_actions.push_back(oracle_label); + } + + Beam beam(kMaxBeamSize); + beam.SetFunctions(null_permissions, null_finality, transition_function, + mock_oracle_function.AsStdFunction()); + beam.Init(std::move(states)); + beam.AdvanceFromOracle(); + + // Make sure the state has performed the expected transition, has had its + // score held to 0, and is self consistent. + for (int i = 0; i < beam.beam().size(); ++i) { + EXPECT_EQ(GetTransition(beam.beam().at(i)), expected_actions.at(i)); + EXPECT_EQ(beam.beam().at(i)->GetScore(), 0.0); + EXPECT_EQ(beam.beam().at(i)->GetBeamIndex(), i); + } + + auto history = beam.history(); + for (int i = 0; i < beam.beam().size(); ++i) { + EXPECT_EQ(history.at(1).at(i), i); + } +} + +TEST(BeamTest, ReportsNonFinality) { + constexpr int kMaxBeamSize = 8; + + // Create a beam with 8 transition states. + std::vector> states; + for (int i = 0; i < kMaxBeamSize; ++i) { + // This is nonzero to test the oracle holding scores to 0. + states.push_back(CreateState(10.0)); + } + + std::vector expected_actions; + + // Create a finality function for this state. Use mocks for finer control. + testing::MockFunction mock_finality_function; + + // Make precisely one call return false, which should cause IsFinal + // to report false. + constexpr int incomplete_state = 3; + EXPECT_CALL(mock_finality_function, Call(states.at(incomplete_state).get())) + .WillOnce(Return(false)); + EXPECT_CALL(mock_finality_function, + Call(Ne(states.at(incomplete_state).get()))) + .WillRepeatedly(Return(true)); + + Beam beam(kMaxBeamSize); + beam.SetFunctions(null_permissions, mock_finality_function.AsStdFunction(), + transition_function, null_oracle); + beam.Init(std::move(states)); + + EXPECT_FALSE(beam.IsTerminal()); +} + +TEST(BeamTest, ReportsFinality) { + constexpr int kMaxBeamSize = 8; + + // Create a beam with 8 transition states. + std::vector> states; + for (int i = 0; i < kMaxBeamSize; ++i) { + // This is nonzero to test the oracle holding scores to 0. + states.push_back(CreateState(10.0)); + } + + std::vector expected_actions; + + // Create a finality function for this state. Use mocks for finer control. + testing::MockFunction mock_finality_function; + + // All calls will return true, so IsFinal should return true. + EXPECT_CALL(mock_finality_function, Call(_)).WillRepeatedly(Return(true)); + + Beam beam(kMaxBeamSize); + beam.SetFunctions(null_permissions, mock_finality_function.AsStdFunction(), + transition_function, null_oracle); + beam.Init(std::move(states)); + + EXPECT_TRUE(beam.IsTerminal()); +} + +TEST(BeamTest, IgnoresForbiddenTransitionActions) { + // Create a matrix of transitions. + constexpr int kMaxBeamSize = 4; + constexpr int kNumTransitions = 4; + constexpr int kMatrixSize = kNumTransitions * kMaxBeamSize; + constexpr float matrix[kMatrixSize] = { + 10.0, 1000.0, 40.0, 30.0, 00.0, 0000.0, 00.0, 00.0, + 00.0, 0000.0, 00.0, 00.0, 00.0, 0000.0, 00.0, 00.0}; + constexpr float kOldScore = 4.0; + + // Create the beam. + std::vector> states; + states.push_back(CreateState(kOldScore)); + + // Forbid the second transition (index 1). + testing::MockFunction + mock_permission_function; + EXPECT_CALL(mock_permission_function, Call(states.at(0).get(), 0)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_permission_function, Call(states.at(0).get(), 1)) + .WillOnce(Return(false)); + EXPECT_CALL(mock_permission_function, Call(states.at(0).get(), 2)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_permission_function, Call(states.at(0).get(), 3)) + .WillOnce(Return(true)); + + Beam beam(kMaxBeamSize); + beam.SetFunctions(mock_permission_function.AsStdFunction(), null_finality, + transition_function, null_oracle); + beam.Init(std::move(states)); + beam.AdvanceFromPrediction(matrix, kMatrixSize, kNumTransitions); + + // Validate the new beam. + EXPECT_EQ(beam.beam().size(), 3); + + // Make sure the state has performed the expected transition. + EXPECT_EQ(GetTransition(beam.beam().at(0)), 2); + EXPECT_EQ(GetTransition(beam.beam().at(1)), 3); + EXPECT_EQ(GetTransition(beam.beam().at(2)), 0); + + // Make sure the state has had its score updated properly. + EXPECT_EQ(beam.beam().at(0)->GetScore(), kOldScore + matrix[2]); + EXPECT_EQ(beam.beam().at(1)->GetScore(), kOldScore + matrix[3]); + EXPECT_EQ(beam.beam().at(2)->GetScore(), kOldScore + matrix[0]); + + // Make sure that the beam index field is consistent with the actual beam idx. + for (int i = 0; i < beam.beam().size(); ++i) { + EXPECT_EQ(beam.beam().at(i)->GetBeamIndex(), i); + } + + // In this case, we expect the top 3 results to have come from state 0 and + // the remaining 3 slots to be empty (-1). + auto history = beam.history(); + EXPECT_EQ(history.at(1).at(0), 0); + EXPECT_EQ(history.at(1).at(1), 0); + EXPECT_EQ(history.at(1).at(2), 0); + EXPECT_EQ(history.at(1).at(3), -1); +} + +TEST(BeamTest, BadlySizedMatrixDies) { + // Create a matrix of transitions. + constexpr int kNumTransitions = 4; + constexpr int kMatrixSize = 4; // We have a max beam size of 4; should be 16. + constexpr float matrix[kMatrixSize] = {30.0, 20.0, 40.0, 10.0}; + + // Create the beam and transition it. + std::vector> states; + states.push_back(CreateState(0.0)); + states.push_back(CreateState(0.0)); + constexpr int kMaxBeamSize = 8; + Beam beam(kMaxBeamSize); + beam.SetFunctions(null_permissions, null_finality, transition_function, + null_oracle); + beam.Init(std::move(states)); + + // This matrix should have 8 elements, not 4, so this should die. + EXPECT_DEATH(beam.AdvanceFromPrediction(matrix, kMatrixSize, kNumTransitions), + "Transition matrix size does not match max beam size \\* number " + "of state transitions"); +} + +TEST(BeamTest, BadlySizedBeamInitializationDies) { + // Create an initialization beam too large for the max beam size. + constexpr int kMaxBeamSize = 4; + std::vector> states; + for (int i = 0; i < kMaxBeamSize + 1; ++i) { + states.push_back(CreateState(0.0)); + } + + Beam beam(kMaxBeamSize); + beam.SetFunctions(null_permissions, null_finality, transition_function, + null_oracle); + + // Try to initialize the beam; this should die. + EXPECT_DEATH(beam.Init(std::move(states)), + "Attempted to initialize a beam with more states"); +} + +TEST(BeamTest, ValidBeamIndicesAfterBeamInitialization) { + // Create a standard beam. + constexpr int kMaxBeamSize = 4; + std::vector> states; + for (int i = 0; i < kMaxBeamSize; ++i) { + states.push_back(CreateState(0.0)); + } + + Beam beam(kMaxBeamSize); + beam.SetFunctions(null_permissions, null_finality, transition_function, + null_oracle); + + beam.Init(std::move(states)); + + // Verify that all beam indices have been initialized. + for (int i = 0; i < kMaxBeamSize; ++i) { + EXPECT_EQ(i, beam.beam_state(i)->GetBeamIndex()); + } +} + +TEST(BeamTest, FindPreviousIndexTracesHistory) { + // Create a matrix of transitions. + constexpr int kNumTransitions = 4; + constexpr int kMaxBeamSize = 8; + constexpr int kMatrixSize = kNumTransitions * kMaxBeamSize; + constexpr float matrix[kMatrixSize] = { + 30.0, 20.0, 40.0, 10.0, // State 0 + 31.0, 21.0, 41.0, 11.0, // State 1 + 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, + 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0, 00.0}; + constexpr float kOldScores[] = {5.0, 7.0}; + constexpr int kParentBeamIndices[] = {1138, 42}; + + // Create the beam and transition it. + std::vector> states; + states.push_back(CreateState(kOldScores[0])); + states.push_back(CreateState(kOldScores[1])); + + // Set parent beam indices. + SetParentBeamIndex(states.at(0).get(), kParentBeamIndices[0]); + SetParentBeamIndex(states.at(1).get(), kParentBeamIndices[1]); + + Beam beam(kMaxBeamSize); + beam.SetFunctions(null_permissions, null_finality, transition_function, + null_oracle); + beam.Init(std::move(states)); + beam.AdvanceFromPrediction(matrix, kMatrixSize, kNumTransitions); + + // Validate the new beam. + EXPECT_EQ(beam.beam().size(), 8); + + // Make sure the state has performed the expected transition. + // Note that the transition index is not the index into the matrix, but rather + // the index into the matrix 'row' for that state. + EXPECT_EQ(GetTransition(beam.beam().at(0)), 2); + EXPECT_EQ(GetTransition(beam.beam().at(1)), 2); + EXPECT_EQ(GetTransition(beam.beam().at(2)), 0); + EXPECT_EQ(GetTransition(beam.beam().at(3)), 0); + EXPECT_EQ(GetTransition(beam.beam().at(4)), 1); + EXPECT_EQ(GetTransition(beam.beam().at(5)), 1); + EXPECT_EQ(GetTransition(beam.beam().at(6)), 3); + EXPECT_EQ(GetTransition(beam.beam().at(7)), 3); + + // Make sure the state has had its score updated properly. + EXPECT_EQ(beam.beam().at(0)->GetScore(), kOldScores[1] + matrix[6]); + EXPECT_EQ(beam.beam().at(1)->GetScore(), kOldScores[0] + matrix[2]); + EXPECT_EQ(beam.beam().at(2)->GetScore(), kOldScores[1] + matrix[4]); + EXPECT_EQ(beam.beam().at(3)->GetScore(), kOldScores[0] + matrix[0]); + EXPECT_EQ(beam.beam().at(4)->GetScore(), kOldScores[1] + matrix[5]); + EXPECT_EQ(beam.beam().at(5)->GetScore(), kOldScores[0] + matrix[1]); + EXPECT_EQ(beam.beam().at(6)->GetScore(), kOldScores[1] + matrix[7]); + EXPECT_EQ(beam.beam().at(7)->GetScore(), kOldScores[0] + matrix[3]); + + // Make sure that the beam index field is consistent with the actual beam idx. + for (int i = 0; i < beam.beam().size(); ++i) { + EXPECT_EQ(beam.beam().at(i)->GetBeamIndex(), i); + } + + // Validate the history at this step. + auto history = beam.history(); + EXPECT_EQ(history.at(1).at(0), 1); + EXPECT_EQ(history.at(1).at(1), 0); + EXPECT_EQ(history.at(1).at(2), 1); + EXPECT_EQ(history.at(1).at(3), 0); + EXPECT_EQ(history.at(1).at(4), 1); + EXPECT_EQ(history.at(1).at(5), 0); + EXPECT_EQ(history.at(1).at(6), 1); + EXPECT_EQ(history.at(1).at(7), 0); + + EXPECT_EQ(history.at(0).at(0), kParentBeamIndices[0]); + EXPECT_EQ(history.at(0).at(1), kParentBeamIndices[1]); + EXPECT_EQ(history.at(0).at(2), -1); + EXPECT_EQ(history.at(0).at(3), -1); + EXPECT_EQ(history.at(0).at(4), -1); + EXPECT_EQ(history.at(0).at(5), -1); + EXPECT_EQ(history.at(0).at(6), -1); + EXPECT_EQ(history.at(0).at(7), -1); + + // Make sure that FindPreviousIndex can read through the history from step 1 + // to step 0. + constexpr int kDesiredIndex = 0; + constexpr int kCurrentIndexOne = 4; + EXPECT_EQ(beam.FindPreviousIndex(kCurrentIndexOne, kDesiredIndex), + kParentBeamIndices[1]); + + constexpr int kCurrentIndexTwo = 7; + EXPECT_EQ(beam.FindPreviousIndex(kCurrentIndexTwo, kDesiredIndex), + kParentBeamIndices[0]); +} + +TEST(BeamTest, FindPreviousIndexReturnsInError) { + // Create the beam. This now has only one history state, 0. + std::vector> states; + states.push_back(CreateState(0.0)); + constexpr int kMaxBeamSize = 8; + Beam beam(kMaxBeamSize); + beam.SetFunctions(null_permissions, null_finality, transition_function, + null_oracle); + beam.Init(std::move(states)); + + // If the requested step is greater than the number of steps taken, expect -1. + EXPECT_EQ(beam.FindPreviousIndex(0, 1), -1); + + // If the requested step is less than 0, expect -1. + EXPECT_EQ(beam.FindPreviousIndex(0, -1), -1); + + // If the requested index does not have a state, expect -1. + EXPECT_EQ(beam.FindPreviousIndex(0, 1), -1); + + // If the requested index is less than 0, expect -1. + EXPECT_EQ(beam.FindPreviousIndex(0, -1), -1); + + // If the requested index is larger than the maximum beam size -1, expect -1. + EXPECT_EQ(beam.FindPreviousIndex(0, kMaxBeamSize), -1); +} + +TEST(BeamTest, ResetClearsBeamState) { + // Create the beam + std::vector> states; + states.push_back(CreateState(1.0)); + constexpr int kMaxBeamSize = 8; + Beam beam(kMaxBeamSize); + beam.SetFunctions(null_permissions, null_finality, transition_function, + null_oracle); + beam.Init(std::move(states)); + + // Validate the new beam. + EXPECT_EQ(beam.beam().size(), 1); + + // Reset the beam. + beam.Reset(); + + // Validate the now-reset beam, which should be empty. + EXPECT_EQ(beam.beam().size(), 0); +} + +TEST(BeamTest, ResetClearsBeamHistory) { + // Create the beam + std::vector> states; + states.push_back(CreateState(1.0)); + constexpr int kMaxBeamSize = 8; + Beam beam(kMaxBeamSize); + beam.SetFunctions(null_permissions, null_finality, transition_function, + null_oracle); + beam.Init(std::move(states)); + + // Validate the new beam history. + EXPECT_EQ(beam.history().size(), 1); + + // Reset the beam. + beam.Reset(); + + // Validate the now-reset beam history, which should be empty. + EXPECT_EQ(beam.history().size(), 0); +} + +TEST(BeamTest, SettingMaxSizeResetsBeam) { + // Create the beam + std::vector> states; + states.push_back(CreateState(1.0)); + constexpr int kMaxBeamSize = 8; + Beam beam(kMaxBeamSize); + beam.SetFunctions(null_permissions, null_finality, transition_function, + null_oracle); + beam.Init(std::move(states)); + + // Validate the new beam history. + EXPECT_EQ(beam.history().size(), 1); + + // Reset the beam. + constexpr int kNewMaxBeamSize = 4; + beam.SetMaxSize(kNewMaxBeamSize); + EXPECT_EQ(beam.max_size(), kNewMaxBeamSize); + + // Validate the now-reset beam history, which should be empty. + EXPECT_EQ(beam.history().size(), 0); +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/component_registry.cc b/syntaxnet/dragnn/core/component_registry.cc new file mode 100644 index 0000000000000000000000000000000000000000..b91354348b7f159e6f49f5f525cb5f3c67a05c32 --- /dev/null +++ b/syntaxnet/dragnn/core/component_registry.cc @@ -0,0 +1,8 @@ +#include "dragnn/core/component_registry.h" + +namespace syntaxnet { + +// Class registry for DRAGNN components. +REGISTER_SYNTAXNET_CLASS_REGISTRY("DRAGNN Component", dragnn::Component); + +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/component_registry.h b/syntaxnet/dragnn/core/component_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..9ea25bbbb3ad7bb455d9d3cdbf52083e53ba3081 --- /dev/null +++ b/syntaxnet/dragnn/core/component_registry.h @@ -0,0 +1,14 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_COMPONENT_REGISTRY_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_COMPONENT_REGISTRY_H_ + +#include "dragnn/core/interfaces/component.h" +#include "syntaxnet/registry.h" + +// Macro to add a component to the registry. This macro associates a class with +// its class name as a string, so FooComponent would be associated with the +// string "FooComponent". +#define REGISTER_DRAGNN_COMPONENT(component) \ + REGISTER_SYNTAXNET_CLASS_COMPONENT(syntaxnet::dragnn::Component, #component, \ + component) + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_COMPONENT_REGISTRY_H_ diff --git a/syntaxnet/dragnn/core/compute_session.h b/syntaxnet/dragnn/core/compute_session.h new file mode 100644 index 0000000000000000000000000000000000000000..b861be8af0a8ce4955575ea12e167b80ac71ec7d --- /dev/null +++ b/syntaxnet/dragnn/core/compute_session.h @@ -0,0 +1,120 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_COMPUTE_SESSION_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_COMPUTE_SESSION_H_ + +#include + +#include "dragnn/components/util/bulk_feature_extractor.h" +#include "dragnn/core/index_translator.h" +#include "dragnn/core/interfaces/component.h" +#include "dragnn/protos/spec.pb.h" +#include "dragnn/protos/trace.pb.h" + +namespace syntaxnet { +namespace dragnn { + +// This defines the interface for a ComputeSession object. We only ever expect +// ComputeSessionImpl to implement the ComputeSession - this is only used +// to provide a mocking seam. + +class ComputeSession { + public: + virtual ~ComputeSession() {} + + // Initialize this ComputeSession to compute the graph defined in the given + // MasterSpec with the hyperparameters passed in the GridPoint. This should + // only be called once, when the ComputeSession is created. + virtual void Init(const MasterSpec &master_spec, + const GridPoint &hyperparams) = 0; + + // Initialize a component with data and a given maximum beam + // size. Note that attempting to initialize a component that depends on + // another component that has not yet finished will cause a CHECK failure. + virtual void InitializeComponentData(const string &component_name, + int max_beam_size) = 0; + + // Return the batch size for the given component. + virtual int BatchSize(const string &component_name) const = 0; + + // Return the beam size for the given component. + virtual int BeamSize(const string &component_name) const = 0; + + // Returns the spec used to create this ComputeSession. + virtual const ComponentSpec &Spec(const string &component_name) const = 0; + + // For a given component and linked feature channel, get the beam size of the + // component that is the source of the linked features. + virtual int SourceComponentBeamSize(const string &component_name, + int channel_id) = 0; + + // Advance the given component using the component's oracle. + virtual void AdvanceFromOracle(const string &component_name) = 0; + + // Advance the given component using the given score matrix. + virtual void AdvanceFromPrediction(const string &component_name, + const float score_matrix[], + int score_matrix_length) = 0; + + // Get the input features for the given component and channel. This passes + // through to the relevant Component's GetFixedFeatures() call. + virtual int GetInputFeatures( + const string &component_name, + std::function allocate_indices, + std::function allocate_ids, + std::function allocate_weights, + int channel_id) const = 0; + + // Get the input features for the given component and channel, advancing via + // the oracle until the state is final. This passes through to the relevant + // Component's BulkGetFixedFeatures() call. + virtual int BulkGetInputFeatures(const string &component_name, + const BulkFeatureExtractor &extractor) = 0; + + // Get the input features for the given component and channel. This function + // can return empty LinkFeatures protos, which represent unused padding slots + // in the output weight tensor. + virtual std::vector GetTranslatedLinkFeatures( + const string &component_name, int channel_id) = 0; + + // Get the oracle labels for the given component. + virtual std::vector> EmitOracleLabels( + const string &component_name) = 0; + + // Returns true if the given component is terminal. + virtual bool IsTerminal(const string &component_name) = 0; + + // Force the given component to write out its predictions to the backing data. + virtual void FinalizeData(const string &component_name) = 0; + + // Return the finalized predictions from this compute session. + virtual std::vector GetSerializedPredictions() = 0; + + // Returns the trace protos. This will CHECK fail or be empty if the + // SetTracing() has not been called to initialize the underlying Component + // traces. + virtual std::vector GetTraceProtos() = 0; + + // Provides the ComputeSession with a batch of data to compute. + virtual void SetInputData(const std::vector &data) = 0; + + // Resets all components owned by this ComputeSession. + virtual void ResetSession() = 0; + + // Set the tracing for this ComputeSession. + virtual void SetTracing(bool tracing_on) = 0; + + // Returns a unique identifier for this ComputeSession. + virtual int Id() const = 0; + + // Returns a string describing the given component. + virtual string GetDescription(const string &component_name) const = 0; + + // Get all the translators for the given component. Should only be used to + // validate correct construction of translators in tests. + virtual const std::vector Translators( + const string &component_name) const = 0; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_COMPUTE_SESSION_H_ diff --git a/syntaxnet/dragnn/core/compute_session_impl.cc b/syntaxnet/dragnn/core/compute_session_impl.cc new file mode 100644 index 0000000000000000000000000000000000000000..f7d4fa823529385ad9628f9bd7a63bbb3ac537ed --- /dev/null +++ b/syntaxnet/dragnn/core/compute_session_impl.cc @@ -0,0 +1,384 @@ +#include "dragnn/core/compute_session_impl.h" + +#include +#include + +#include "dragnn/protos/data.pb.h" +#include "dragnn/protos/spec.pb.h" +#include "dragnn/protos/trace.pb.h" +#include "syntaxnet/registry.h" +#include "tensorflow/core/platform/logging.h" + +namespace syntaxnet { +namespace dragnn { + +ComputeSessionImpl::ComputeSessionImpl( + int id, + std::function(const string &component_name, + const string &backend_type)> + component_builder) + : component_builder_(std::move(component_builder)), id_(id) {} + +void ComputeSessionImpl::Init(const MasterSpec &master_spec, + const GridPoint &hyperparams) { + spec_ = master_spec; + grid_point_ = hyperparams; + + VLOG(2) << "Creating components."; + bool is_input = true; + Component *predecessor; + for (const ComponentSpec &spec : master_spec.component()) { + // Construct the component using the specified backend. + VLOG(2) << "Creating component '" << spec.name() + << "' with backend: " << spec.backend().registered_name(); + auto component = + component_builder_(spec.name(), spec.backend().registered_name()); + + // Initializes the component. + component->InitializeComponent(spec); + + // Adds a predecessor to non-input components. + if (!is_input) { + predecessors_.insert( + std::pair(component.get(), predecessor)); + } + + // The current component will be the predecessor component next time around. + predecessor = component.get(); + + // All components after the first are non-input components. + is_input = false; + + // Move into components list. + components_.insert(std::pair>( + spec.name(), std::move(component))); + } + VLOG(2) << "Done creating components."; + + VLOG(2) << "Adding translators."; + for (const ComponentSpec &spec : master_spec.component()) { + // First, get the component object for this spec. + VLOG(2) << "Examining component: " << spec.name(); + auto map_result = components_.find(spec.name()); + CHECK(map_result != components_.end()) << "Unable to find component."; + Component *start_component = map_result->second.get(); + + if (spec.linked_feature_size() > 0) { + VLOG(2) << "Adding " << spec.linked_feature_size() << " translators for " + << spec.name(); + + // Attach all the translators described in the spec. + std::vector translator_set; + for (const LinkedFeatureChannel &channel : spec.linked_feature()) { + // For every translator, save off a non-unique ptr in the component name + // to translator map, then push the unique ptr onto the management + // vector. + auto translator = CreateTranslator(channel, start_component); + translator_set.push_back(translator.get()); + owned_translators_.push_back(std::move(translator)); + } + + // Once all translators have been created, associate this group of + // translators with a component. + translators_.insert(std::pair>( + spec.name(), std::move(translator_set))); + } else { + VLOG(2) << "No translators found for " << spec.name(); + } + } + VLOG(2) << "Done adding translators."; + + VLOG(2) << "Initialization complete."; +} + +void ComputeSessionImpl::InitializeComponentData(const string &component_name, + int max_beam_size) { + CHECK(input_data_ != nullptr) << "Attempted to access a component without " + "providing input data for this session."; + Component *component = GetComponent(component_name); + + // Try and find the source component. If one exists, check that it is terminal + // and get its data; if not, pass in an empty vector for source data. + auto source_result = predecessors_.find(component); + if (source_result == predecessors_.end()) { + VLOG(1) << "Source result not found. Using empty initialization vector for " + << component_name; + component->InitializeData({}, max_beam_size, input_data_.get()); + } else { + VLOG(1) << "Source result found. Using prior initialization vector for " + << component_name; + auto source = source_result->second; + CHECK(source->IsTerminal()) << "Source is not terminal for component '" + << component_name << "'. Exiting."; + component->InitializeData(source->GetBeam(), max_beam_size, + input_data_.get()); + } + if (do_tracing_) { + component->InitializeTracing(); + } +} + +int ComputeSessionImpl::BatchSize(const string &component_name) const { + return GetReadiedComponent(component_name)->BatchSize(); +} + +int ComputeSessionImpl::BeamSize(const string &component_name) const { + return GetReadiedComponent(component_name)->BeamSize(); +} + +const ComponentSpec &ComputeSessionImpl::Spec( + const string &component_name) const { + for (const auto &component : spec_.component()) { + if (component.name() == component_name) { + return component; + } + } + LOG(FATAL) << "Missing component '" << component_name << "'. Exiting."; +} + +int ComputeSessionImpl::SourceComponentBeamSize(const string &component_name, + int channel_id) { + const auto &translators = GetTranslators(component_name); + return translators.at(channel_id)->path().back()->BeamSize(); +} + +void ComputeSessionImpl::AdvanceFromOracle(const string &component_name) { + GetReadiedComponent(component_name)->AdvanceFromOracle(); +} + +void ComputeSessionImpl::AdvanceFromPrediction(const string &component_name, + const float score_matrix[], + int score_matrix_length) { + GetReadiedComponent(component_name) + ->AdvanceFromPrediction(score_matrix, score_matrix_length); +} + +int ComputeSessionImpl::GetInputFeatures( + const string &component_name, std::function allocate_indices, + std::function allocate_ids, + std::function allocate_weights, int channel_id) const { + return GetReadiedComponent(component_name) + ->GetFixedFeatures(allocate_indices, allocate_ids, allocate_weights, + channel_id); +} + +int ComputeSessionImpl::BulkGetInputFeatures( + const string &component_name, const BulkFeatureExtractor &extractor) { + return GetReadiedComponent(component_name)->BulkGetFixedFeatures(extractor); +} + +std::vector ComputeSessionImpl::GetTranslatedLinkFeatures( + const string &component_name, int channel_id) { + auto *component = GetReadiedComponent(component_name); + auto features = component->GetRawLinkFeatures(channel_id); + + IndexTranslator *translator = GetTranslators(component_name).at(channel_id); + for (int i = 0; i < features.size(); ++i) { + LinkFeatures &feature = features[i]; + if (feature.has_feature_value()) { + VLOG(2) << "Raw feature[" << i << "]: " << feature.ShortDebugString(); + IndexTranslator::Index index = translator->Translate( + feature.batch_idx(), feature.beam_idx(), feature.feature_value()); + feature.set_step_idx(index.step_index); + feature.set_batch_idx(index.batch_index); + feature.set_beam_idx(index.beam_index); + } else { + VLOG(2) << "Raw feature[" << i << "]: PADDING (empty proto)"; + } + } + + // Add the translated link features to the component's trace. + if (do_tracing_) { + component->AddTranslatedLinkFeaturesToTrace(features, channel_id); + } + + return features; +} +std::vector> ComputeSessionImpl::EmitOracleLabels( + const string &component_name) { + return GetReadiedComponent(component_name)->GetOracleLabels(); +} + +bool ComputeSessionImpl::IsTerminal(const string &component_name) { + return GetReadiedComponent(component_name)->IsTerminal(); +} + +void ComputeSessionImpl::SetTracing(bool tracing_on) { + do_tracing_ = tracing_on; + for (auto &component_pair : components_) { + if (!tracing_on) { + component_pair.second->DisableTracing(); + } + } +} + +void ComputeSessionImpl::FinalizeData(const string &component_name) { + VLOG(2) << "Finalizing data for " << component_name; + GetReadiedComponent(component_name)->FinalizeData(); +} + +std::vector ComputeSessionImpl::GetSerializedPredictions() { + VLOG(2) << "Geting serialized predictions."; + return input_data_->SerializedData(); +} + +std::vector ComputeSessionImpl::GetTraceProtos() { + std::vector traces; + + // First compute all possible traces for each component. + std::map>> component_traces; + std::vector pipeline; + for (auto &component_spec : spec_.component()) { + pipeline.push_back(component_spec.name()); + component_traces.insert( + {component_spec.name(), + GetComponent(component_spec.name())->GetTraceProtos()}); + } + + // Only output for the actual number of states in each beam. + auto final_beam = GetComponent(pipeline.back())->GetBeam(); + for (int batch_idx = 0; batch_idx < final_beam.size(); ++batch_idx) { + for (int beam_idx = 0; beam_idx < final_beam[batch_idx].size(); + ++beam_idx) { + std::vector beam_path; + beam_path.push_back(beam_idx); + + // Trace components backwards, finding the source of each state in the + // prior component. + VLOG(2) << "Start trace: " << beam_idx; + for (int i = pipeline.size() - 1; i > 0; --i) { + const auto *component = GetComponent(pipeline[i]); + int source_beam_idx = + component->GetSourceBeamIndex(beam_path.back(), batch_idx); + beam_path.push_back(source_beam_idx); + + VLOG(2) << "Tracing path: " << pipeline[i] << " = " << source_beam_idx; + } + + // Trace the path from the *start* to the end. + std::reverse(beam_path.begin(), beam_path.end()); + MasterTrace master_trace; + for (int i = 0; i < pipeline.size(); ++i) { + *master_trace.add_component_trace() = + component_traces[pipeline[i]][batch_idx][beam_path[i]]; + } + traces.push_back(master_trace); + } + } + + return traces; +} + +void ComputeSessionImpl::SetInputData(const std::vector &data) { + input_data_.reset(new InputBatchCache(data)); +} + +void ComputeSessionImpl::ResetSession() { + // Reset all component states. + for (auto &component_pair : components_) { + component_pair.second->ResetComponent(); + } + + // Reset the input data pointer. + input_data_.reset(); +} + +int ComputeSessionImpl::Id() const { return id_; } + +string ComputeSessionImpl::GetDescription(const string &component_name) const { + return GetComponent(component_name)->Name(); +} + +const std::vector ComputeSessionImpl::Translators( + const string &component_name) const { + auto translators = GetTranslators(component_name); + std::vector const_translators; + for (const auto &translator : translators) { + const_translators.push_back(translator); + } + return const_translators; +} + +Component *ComputeSessionImpl::GetReadiedComponent( + const string &component_name) const { + auto component = GetComponent(component_name); + CHECK(component->IsReady()) + << "Attempted to access component " << component_name + << " without first initializing it."; + return component; +} + +Component *ComputeSessionImpl::GetComponent( + const string &component_name) const { + auto result = components_.find(component_name); + if (result == components_.end()) { + LOG(ERROR) << "Could not find component \"" << component_name + << "\" in the component set. Current components are: "; + for (const auto &component_pair : components_) { + LOG(ERROR) << component_pair.first; + } + LOG(FATAL) << "Missing component. Exiting."; + } + + auto component = result->second.get(); + return component; +} + +const std::vector &ComputeSessionImpl::GetTranslators( + const string &component_name) const { + auto result = translators_.find(component_name); + if (result == translators_.end()) { + LOG(ERROR) << "Could not find component " << component_name + << " in the translator set. Current components are: "; + for (const auto &component_pair : translators_) { + LOG(ERROR) << component_pair.first; + } + LOG(FATAL) << "Missing component. Exiting."; + } + return result->second; +} + +std::unique_ptr ComputeSessionImpl::CreateTranslator( + const LinkedFeatureChannel &channel, Component *start_component) { + const int num_components = spec_.component_size(); + VLOG(2) << "Channel spec: " << channel.ShortDebugString(); + + // Find the linked feature's source component, if it exists. + auto source_map_result = components_.find(channel.source_component()); + CHECK(source_map_result != components_.end()) + << "Unable to find source component " << channel.source_component(); + const Component *end_component = source_map_result->second.get(); + + // Our goal here is to iterate up the source map from the + // start_component to the end_component. + Component *current_component = start_component; + std::vector path; + path.push_back(current_component); + while (current_component != end_component) { + // Try to find the next link upwards in the source chain. + auto source_result = predecessors_.find(current_component); + + // If this component doesn't have a source to find, that's an error. + CHECK(source_result != predecessors_.end()) + << "No link to source " << channel.source_component(); + + // If we jump more times than there are components in the graph, that + // is an error state. + CHECK_LT(path.size(), num_components) << "Too many jumps. Is there a " + "loop in the MasterSpec " + "component definition?"; + + // Add the source to the vector and repeat. + path.push_back(source_result->second); + current_component = source_result->second; + } + + // At this point, we have the source chain for the traslator and can + // build it. + std::unique_ptr translator( + new IndexTranslator(path, channel.source_translator())); + return translator; +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/compute_session_impl.h b/syntaxnet/dragnn/core/compute_session_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..24a7311a38069dd31c8f862a9ea2f9940164dc49 --- /dev/null +++ b/syntaxnet/dragnn/core/compute_session_impl.h @@ -0,0 +1,142 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_COMPUTE_SESSION_IMPL_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_COMPUTE_SESSION_IMPL_H_ + +#include + +#include "dragnn/components/util/bulk_feature_extractor.h" +#include "dragnn/core/compute_session.h" +#include "dragnn/core/index_translator.h" +#include "dragnn/core/input_batch_cache.h" +#include "dragnn/protos/data.pb.h" +#include "dragnn/protos/spec.pb.h" +#include "dragnn/protos/trace.pb.h" + +namespace syntaxnet { +namespace dragnn { + +class ComputeSessionImpl : public ComputeSession { + public: + // Creates a ComputeSessionImpl with the provided component builder function. + ComputeSessionImpl( + int id, + std::function(const string &component_name, + const string &backend_type)> + component_builder); + + void Init(const MasterSpec &master_spec, + const GridPoint &hyperparams) override; + + void InitializeComponentData(const string &component_name, + int max_beam_size) override; + + int BatchSize(const string &component_name) const override; + + int BeamSize(const string &component_name) const override; + + const ComponentSpec &Spec(const string &component_name) const override; + + int SourceComponentBeamSize(const string &component_name, + int channel_id) override; + + void AdvanceFromOracle(const string &component_name) override; + + void AdvanceFromPrediction(const string &component_name, + const float score_matrix[], + int score_matrix_length) override; + + int GetInputFeatures(const string &component_name, + std::function allocate_indices, + std::function allocate_ids, + std::function allocate_weights, + int channel_id) const override; + + int BulkGetInputFeatures(const string &component_name, + const BulkFeatureExtractor &extractor) override; + + std::vector GetTranslatedLinkFeatures( + const string &component_name, int channel_id) override; + + std::vector> EmitOracleLabels( + const string &component_name) override; + + bool IsTerminal(const string &component_name) override; + + void FinalizeData(const string &component_name) override; + + std::vector GetSerializedPredictions() override; + + std::vector GetTraceProtos() override; + + void SetInputData(const std::vector &data) override; + + void ResetSession() override; + + void SetTracing(bool tracing_on) override; + + int Id() const override; + + string GetDescription(const string &component_name) const override; + + const std::vector Translators( + const string &component_name) const override; + + private: + // Get a given component. Fails if the component is not found. + Component *GetComponent(const string &component_name) const; + + // Get a given component. CHECK-fail if the component's IsReady method + // returns false. + Component *GetReadiedComponent(const string &component_name) const; + + // Get the index translators for the given component. + const std::vector &GetTranslators( + const string &component_name) const; + + // Create an index translator. + std::unique_ptr CreateTranslator( + const LinkedFeatureChannel &channel, Component *start_component); + + // Perform initialization on the given Component. + void InitComponent(Component *component); + + // Holds all of the components owned by this ComputeSession, associated with + // their names in the MasterSpec. + std::map> components_; + + // Holds a vector of translators for each component, indexed by the name + // of the component they belong to. + std::map> translators_; + + // Holds ownership of all the IndexTranslators for this compute session. + std::vector> owned_translators_; + + // The predecessor component for every component. + // If a component is not in this map, it has no predecessor component and + // will have its beam initialized without any data from other components. + std::map predecessors_; + + // Holds the current input data for this ComputeSession. + std::unique_ptr input_data_; + + // Function that, given a string, will return a Component. + std::function(const string &component_name, + const string &backend_type)> + component_builder_; + + // The master spec for this compute session. + MasterSpec spec_; + + // The hyperparameters for this compute session. + GridPoint grid_point_; + + // Unique identifier, assigned at construction. + int id_; + + // Whether or not to perform tracing. + bool do_tracing_ = false; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_COMPUTE_SESSION_IMPL_H_ diff --git a/syntaxnet/dragnn/core/compute_session_impl_test.cc b/syntaxnet/dragnn/core/compute_session_impl_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..596abe2cfebe2c59e98244571237245eef1dbdec --- /dev/null +++ b/syntaxnet/dragnn/core/compute_session_impl_test.cc @@ -0,0 +1,1157 @@ +#include "dragnn/core/compute_session_impl.h" + +#include +#include + +#include "dragnn/components/util/bulk_feature_extractor.h" +#include "dragnn/core/component_registry.h" +#include "dragnn/core/compute_session.h" +#include "dragnn/core/compute_session_pool.h" +#include "dragnn/core/interfaces/component.h" +#include "dragnn/core/test/generic.h" +#include "dragnn/core/test/mock_component.h" +#include "dragnn/core/test/mock_transition_state.h" +#include "tensorflow/core/platform/test.h" + +namespace syntaxnet { +namespace dragnn { + +using syntaxnet::test::EqualsProto; +using testing::_; +using testing::ElementsAre; +using testing::Return; +using testing::NotNull; + +// ***************************************************************************** +// Test-internal class definitions. +// ***************************************************************************** + +// Define a test component to validate registered construction. +class TestComponentType1 : public Component { + public: + TestComponentType1() {} + void InitializeComponent(const ComponentSpec &spec) override { + name_ = spec.name(); + } + void InitializeData( + const std::vector> &states, + int max_beam_size, InputBatchCache *input_data) override {} + void InitializeTracing() override {} + void DisableTracing() override {} + bool IsReady() const override { return true; } + string Name() const override { return name_; } + int BeamSize() const override { return 3; } + int BatchSize() const override { return 1; } + int StepsTaken(int batch_index) const override { return 0; } + int GetBeamIndexAtStep(int step, int current_index, + int batch) const override { + return 0; + } + int GetSourceBeamIndex(int current_index, int batch) const override { + return 0; + } + void AdvanceFromPrediction(const float transition_matrix[], + int matrix_length) override {} + void AdvanceFromOracle() override {} + bool IsTerminal() const override { return true; } + std::function GetStepLookupFunction( + const string &method) override { + return nullptr; + } + std::vector> GetBeam() override { + std::vector> states; + return states; + } + int GetFixedFeatures(std::function allocate_indices, + std::function allocate_ids, + std::function allocate_weights, + int channel_id) const override { + return 0; + } + int BulkGetFixedFeatures(const BulkFeatureExtractor &extractor) override { + return 0; + } + std::vector GetRawLinkFeatures(int channel_id) const override { + std::vector ret; + return ret; + } + std::vector> GetOracleLabels() const override { + std::vector> ret; + return ret; + } + void FinalizeData() override {} + void ResetComponent() override {} + + std::vector> GetTraceProtos() const override { + std::vector> ret; + return ret; + } + void AddTranslatedLinkFeaturesToTrace( + const std::vector &features, int channel_id) override {} + + string name_; +}; + +REGISTER_DRAGNN_COMPONENT(TestComponentType1); + +// Define a second test component to validate registered construction. +class TestComponentType2 : public Component { + public: + TestComponentType2() {} + void InitializeComponent(const ComponentSpec &spec) override { + name_ = spec.name(); + } + void InitializeData( + const std::vector> &states, + int max_beam_size, InputBatchCache *input_data) override {} + void InitializeTracing() override {} + void DisableTracing() override {} + bool IsReady() const override { return true; } + string Name() const override { return name_; } + int BeamSize() const override { return 4; } + int BatchSize() const override { return 2; } + int StepsTaken(int batch_index) const override { return 0; } + int GetBeamIndexAtStep(int step, int current_index, + int batch) const override { + return 0; + } + int GetSourceBeamIndex(int current_index, int batch) const override { + return 0; + } + void AdvanceFromPrediction(const float transition_matrix[], + int matrix_length) override {} + void AdvanceFromOracle() override {} + bool IsTerminal() const override { return true; } + std::function GetStepLookupFunction( + const string &method) override { + return nullptr; + } + std::vector> GetBeam() override { + std::vector> states; + return states; + } + int GetFixedFeatures(std::function allocate_indices, + std::function allocate_ids, + std::function allocate_weights, + int channel_id) const override { + return 0; + } + int BulkGetFixedFeatures(const BulkFeatureExtractor &extractor) override { + return 0; + } + std::vector GetRawLinkFeatures(int channel_id) const override { + std::vector ret; + return ret; + } + std::vector> GetOracleLabels() const override { + std::vector> ret; + return ret; + } + void FinalizeData() override {} + void ResetComponent() override {} + + std::vector> GetTraceProtos() const override { + std::vector> ret; + return ret; + } + void AddTranslatedLinkFeaturesToTrace( + const std::vector &features, int channel_id) override {} + + string name_; +}; + +REGISTER_DRAGNN_COMPONENT(TestComponentType2); + +// Define a component that returns false for IsReady and IsTerminal. +class UnreadyComponent : public Component { + public: + UnreadyComponent() {} + void InitializeComponent(const ComponentSpec &spec) override { + name_ = spec.name(); + } + void InitializeData( + const std::vector> &states, + int max_beam_size, InputBatchCache *input_data) override {} + void InitializeTracing() override {} + void DisableTracing() override {} + bool IsReady() const override { return false; } + string Name() const override { return name_; } + int BeamSize() const override { return 1; } + int BatchSize() const override { return 2; } + int StepsTaken(int batch_index) const override { return 0; } + int GetBeamIndexAtStep(int step, int current_index, + int batch) const override { + return 0; + } + int GetSourceBeamIndex(int current_index, int batch) const override { + return 0; + } + void AdvanceFromPrediction(const float transition_matrix[], + int matrix_length) override {} + void AdvanceFromOracle() override {} + bool IsTerminal() const override { return false; } + std::function GetStepLookupFunction( + const string &method) override { + return nullptr; + } + std::vector> GetBeam() override { + std::vector> states; + return states; + } + int GetFixedFeatures(std::function allocate_indices, + std::function allocate_ids, + std::function allocate_weights, + int channel_id) const override { + return 0; + } + int BulkGetFixedFeatures(const BulkFeatureExtractor &extractor) override { + return 0; + } + std::vector GetRawLinkFeatures(int channel_id) const override { + std::vector ret; + return ret; + } + std::vector> GetOracleLabels() const override { + std::vector> ret; + return ret; + } + void FinalizeData() override {} + void ResetComponent() override {} + std::vector> GetTraceProtos() const override { + std::vector> ret; + return ret; + } + void AddTranslatedLinkFeaturesToTrace( + const std::vector &features, int channel_id) override {} + string name_; +}; + +REGISTER_DRAGNN_COMPONENT(UnreadyComponent); + +class ComputeSessionImplTestPoolAccessor { + public: + static void SetComponentBuilder( + ComputeSessionPool *pool, + std::function(const string &component_name, + const string &backend_type)> + component_builder_function) { + pool->SetComponentBuilder(std::move(component_builder_function)); + } +}; + +// ***************************************************************************** +// Tests begin here. +// ***************************************************************************** + +// Helper function to validate a translation path against a vector of expected +// component name strings. +void ValidatePath(const std::vector &expected_path, + const std::vector &path) { + EXPECT_EQ(expected_path.size(), path.size()); + for (int i = 0; i < expected_path.size(); ++i) { + EXPECT_EQ(expected_path.at(i), path.at(i)->Name()); + } +} + +void AddComponentToSpec(const string &component_name, + const string &backend_name, MasterSpec *spec) { + auto component_spec = spec->add_component(); + component_spec->set_name(component_name); + auto backend = component_spec->mutable_backend(); + backend->set_registered_name(backend_name); +} + +void AddTranslatorToSpec(const string &source_name, const string &dest_name, + const string &type, MasterSpec *spec) { + // Find the destination component. + ComponentSpec *dest_spec = nullptr; + for (int i = 0; i < spec->component_size(); ++i) { + if (spec->component(i).name() == dest_name) { + dest_spec = spec->mutable_component(i); + break; + } + } + + // Make sure it's not null... + EXPECT_NE(dest_spec, nullptr); + + // Set up the translator. + auto linked_feature = dest_spec->add_linked_feature(); + linked_feature->set_source_component(source_name); + linked_feature->set_source_translator(type); +} + +TEST(ComputeSessionImplTest, CreatesComponent) { + // Define a spec that creates an instance of TestComponentType1. + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType1", &spec); + + // Create a pool so we can get a session. + ComputeSessionPool pool(spec, hyperparams); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + // Make sure that the component exists and is of type TestComponentType1. + string Type1ComponentDesc = "component_one"; + constexpr int kType1BatchSize = 1; + EXPECT_EQ(Type1ComponentDesc, session->GetDescription("component_one")); + EXPECT_EQ(kType1BatchSize, session->BatchSize("component_one")); +} + +TEST(ComputeSessionImplTest, ReturnsComponentSpec) { + // Define a spec that creates an instance of TestComponentType1 and + // TestComponentType2. + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType1", &spec); + AddComponentToSpec("component_two", "TestComponentType2", &spec); + + // Create a pool so we can get a session. + ComputeSessionPool pool(spec, hyperparams); + + auto session = pool.GetSession(); + EXPECT_EQ(spec.component(1).DebugString(), + session->Spec("component_two").DebugString()); +} + +TEST(ComputeSessionImplTest, CreatesMultipleComponents) { + // Define a spec that creates an instance of TestComponentType1 and + // TestComponentType2. + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType1", &spec); + AddComponentToSpec("component_two", "TestComponentType2", &spec); + + // Create a pool so we can get a session. + ComputeSessionPool pool(spec, hyperparams); + + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + // Make sure that the components exist and are the correct type. + string Type1ComponentDesc = "component_one"; + constexpr int kType1BatchSize = 1; + EXPECT_EQ(Type1ComponentDesc, session->GetDescription("component_one")); + EXPECT_EQ(kType1BatchSize, session->BatchSize("component_one")); + + string Type2ComponentDesc = "component_two"; + constexpr int kType2BatchSize = 2; + EXPECT_EQ(Type2ComponentDesc, session->GetDescription("component_two")); + EXPECT_EQ(kType2BatchSize, session->BatchSize("component_two")); +} + +TEST(ComputeSessionImplTest, InitializesComponents) { + // Define a spec that creates an instance of TestComponentType1 and + // TestComponentType2. + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType1", &spec); + AddComponentToSpec("component_two", "TestComponentType2", &spec); + + // Create a map to hold references to mock components. Expect the correct + // initialization call (with the appropriate proto passed in). + std::map mock_components; + auto builder_function = [&mock_components, spec](const string &name, + const string &backend_type) { + VLOG(2) << "Mocking for name: " << name; + std::unique_ptr component(new MockComponent()); + if (name == "component_one") { + EXPECT_CALL(*component, + InitializeComponent(EqualsProto(spec.component(0)))); + } else { + EXPECT_CALL(*component, + InitializeComponent(EqualsProto(spec.component(1)))); + } + mock_components[name] = component.get(); + return component; + }; + + // Create a pool so we can get a session. + ComputeSessionPool pool(spec, hyperparams); + ComputeSessionImplTestPoolAccessor::SetComponentBuilder(&pool, + builder_function); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); +} + +TEST(ComputeSessionImplTest, CreatesTranslator) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType1", &spec); + AddComponentToSpec("component_two", "TestComponentType2", &spec); + + // Add a translator from component 1 to component 2. + AddTranslatorToSpec("component_one", "component_two", "identity", &spec); + + // Create a pool so we can get a session. + ComputeSessionPool pool(spec, hyperparams); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + auto linked_features = session->Translators("component_two"); + EXPECT_EQ(1, linked_features.size()); + ValidatePath({"component_two", "component_one"}, + linked_features.at(0)->path()); + EXPECT_EQ(linked_features.at(0)->method(), "identity"); +} + +TEST(ComputeSessionImplTest, CreatesTranslatorWithLongWalk) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType2", &spec); + AddComponentToSpec("component_two", "TestComponentType1", &spec); + AddComponentToSpec("component_three", "TestComponentType2", &spec); + + // Add a translator from component 3 to component 1. + AddTranslatorToSpec("component_one", "component_three", "identity", &spec); + + // Create a pool so we can get a session. + ComputeSessionPool pool(spec, hyperparams); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + // Get and validate the linked feature vector for component 3. + auto linked_features = session->Translators("component_three"); + EXPECT_EQ(1, linked_features.size()); + ValidatePath({"component_three", "component_two", "component_one"}, + linked_features.at(0)->path()); + EXPECT_EQ(linked_features.at(0)->method(), "identity"); +} + +TEST(ComputeSessionImplTest, CreatesTranslatorForMultipleComponents) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType2", &spec); + AddComponentToSpec("component_two", "TestComponentType1", &spec); + AddComponentToSpec("component_three", "TestComponentType2", &spec); + + // Add a translator from component 3 to component 1. + AddTranslatorToSpec("component_one", "component_three", "identity", &spec); + + // Add a translator from component 3 to component 2. + AddTranslatorToSpec("component_two", "component_three", "history", &spec); + + // Add a translator from component 2 to component 1. + AddTranslatorToSpec("component_one", "component_two", "history", &spec); + + // Create a pool so we can get a session. + ComputeSessionPool pool(spec, hyperparams); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + // Get and validate the linked feature vector for component 3. + auto linked_features = session->Translators("component_three"); + EXPECT_EQ(2, linked_features.size()); + ValidatePath({"component_three", "component_two", "component_one"}, + linked_features.at(0)->path()); + EXPECT_EQ(linked_features.at(0)->method(), "identity"); + ValidatePath({"component_three", "component_two"}, + linked_features.at(1)->path()); + EXPECT_EQ(linked_features.at(1)->method(), "history"); + + // Get and validate the linked feature vector for component 2. + auto linked_features_2 = session->Translators("component_two"); + EXPECT_EQ(1, linked_features_2.size()); + ValidatePath({"component_two", "component_one"}, + linked_features_2.at(0)->path()); + EXPECT_EQ(linked_features_2.at(0)->method(), "history"); +} + +TEST(ComputeSessionImplTest, CreatesMultipleTranslatorsBetweenSameComponents) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType2", &spec); + AddComponentToSpec("component_two", "TestComponentType1", &spec); + + // Add a translator from component 2 to component 1. + AddTranslatorToSpec("component_one", "component_two", "identity", &spec); + + // Add a translator from component 2 to component 1. + AddTranslatorToSpec("component_one", "component_two", "history", &spec); + + // Create a pool so we can get a session. + ComputeSessionPool pool(spec, hyperparams); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + // Get and validate the linked feature vector for component 2. + auto linked_features = session->Translators("component_two"); + EXPECT_EQ(2, linked_features.size()); + ValidatePath({"component_two", "component_one"}, + linked_features.at(0)->path()); + EXPECT_EQ(linked_features.at(0)->method(), "identity"); + ValidatePath({"component_two", "component_one"}, + linked_features.at(1)->path()); + EXPECT_EQ(linked_features.at(1)->method(), "history"); +} + +TEST(ComputeSessionImplTest, CreatesSelfReferentialTranslator) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType2", &spec); + AddComponentToSpec("component_two", "TestComponentType1", &spec); + + // Add a translator from component 1 to component 1. + AddTranslatorToSpec("component_one", "component_one", "identity", &spec); + + // Create a pool so we can get a session. + ComputeSessionPool pool(spec, hyperparams); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + // Get and validate the linked feature vector for component 1. + auto linked_features = session->Translators("component_one"); + EXPECT_EQ(1, linked_features.size()); + ValidatePath({"component_one"}, linked_features.at(0)->path()); +} + +TEST(ComputeSessionImplTest, CreateTranslatorFailsWithWrongNameDeathTest) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType2", &spec); + AddComponentToSpec("component_two", "TestComponentType1", &spec); + + // Add a translator from a nonexistent component to component 1. + AddTranslatorToSpec("NONEXISTENT_COMPONENT_THIS_WILL_DIE", "component_one", + "identity", &spec); + + // Create a pool so we can get a session. + ComputeSessionPool pool(spec, hyperparams); + + EXPECT_DEATH(pool.GetSession(), "Unable to find source component"); +} + +TEST(ComputeSessionImplTest, GetsSourceComponentBeamSize) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType1", &spec); + AddComponentToSpec("component_two", "TestComponentType2", &spec); + + // Add a translator from component 1 to component 2. + AddTranslatorToSpec("component_one", "component_two", "identity", &spec); + + // Create a pool so we can get a session. + ComputeSessionPool pool(spec, hyperparams); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + constexpr int kChannelId = 0; + constexpr int kType1BeamSize = 3; + EXPECT_EQ(kType1BeamSize, + session->SourceComponentBeamSize("component_two", kChannelId)); +} + +TEST(ComputeSessionImplTest, GetsTranslatedLinkFeatures) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType1", &spec); + AddComponentToSpec("component_two", "TestComponentType2", &spec); + + // Add a translator from component 1 to component 2. + AddTranslatorToSpec("component_one", "component_two", "identity", &spec); + + // Create a map to hold references to mock components. + std::map mock_components; + auto builder_function = [&mock_components, spec](const string &name, + const string &backend_type) { + VLOG(2) << "Mocking for name: " << name; + std::unique_ptr component(new MockComponent()); + EXPECT_CALL(*component, InitializeComponent(_)); + EXPECT_CALL(*component, IsReady()).WillRepeatedly(Return(true)); + mock_components[name] = component.get(); + return component; + }; + + // Create a pool, substituting a mock component builder. + ComputeSessionPool pool(spec, hyperparams); + ComputeSessionImplTestPoolAccessor::SetComponentBuilder(&pool, + builder_function); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + // Create a link features vector to return from the destination component. + std::vector features; + LinkFeatures feature_one; + feature_one.set_batch_idx(12); + feature_one.set_beam_idx(23); + feature_one.set_feature_value(34); + features.push_back(feature_one); + LinkFeatures feature_two; + feature_two.set_batch_idx(45); + feature_two.set_beam_idx(56); + feature_two.set_feature_value(67); + features.push_back(feature_two); + + // This link feature should remain empty. + LinkFeatures padding_feature; + features.push_back(padding_feature); + + // The session should request the raw link features for the specified channel. + constexpr int kChannelId = 0; + EXPECT_CALL(*mock_components["component_two"], GetRawLinkFeatures(kChannelId)) + .WillOnce(Return(features)); + + // The session will request the source beam index for both features. + constexpr int kSourceBeamOneIndex = 7; + EXPECT_CALL( + *mock_components["component_two"], + GetSourceBeamIndex(feature_one.beam_idx(), feature_one.batch_idx())) + .WillOnce(Return(kSourceBeamOneIndex)); + constexpr int kSourceBeamTwoIndex = 77; + EXPECT_CALL( + *mock_components["component_two"], + GetSourceBeamIndex(feature_two.beam_idx(), feature_two.batch_idx())) + .WillOnce(Return(kSourceBeamTwoIndex)); + + // The translate call should use the 'identity' translator on the step index. + // This means that the GetBeamIndexAtStep call will have the values from + // the linked feature proto (since we also don't have an intermediate + // component.) + constexpr int kFeatureOneBeamIndex = 9; + EXPECT_CALL(*mock_components["component_one"], + GetBeamIndexAtStep(feature_one.feature_value(), + kSourceBeamOneIndex, feature_one.batch_idx())) + .WillOnce(Return(kFeatureOneBeamIndex)); + + constexpr int kFeatureTwoBeamIndex = 99; + EXPECT_CALL(*mock_components["component_one"], + GetBeamIndexAtStep(feature_two.feature_value(), + kSourceBeamTwoIndex, feature_two.batch_idx())) + .WillOnce(Return(kFeatureTwoBeamIndex)); + + auto translated_features = + session->GetTranslatedLinkFeatures("component_two", kChannelId); + + auto translated_one = translated_features.at(0); + EXPECT_EQ(translated_one.batch_idx(), feature_one.batch_idx()); + EXPECT_EQ(translated_one.beam_idx(), kFeatureOneBeamIndex); + EXPECT_EQ(translated_one.step_idx(), feature_one.feature_value()); + + auto translated_two = translated_features.at(1); + EXPECT_EQ(translated_two.batch_idx(), feature_two.batch_idx()); + EXPECT_EQ(translated_two.beam_idx(), kFeatureTwoBeamIndex); + EXPECT_EQ(translated_two.step_idx(), feature_two.feature_value()); + + // The third feature is a padding feature, and so should be empty. + auto translated_three = translated_features.at(2); + EXPECT_FALSE(translated_three.has_batch_idx()); + EXPECT_FALSE(translated_three.has_beam_idx()); + EXPECT_FALSE(translated_three.has_step_idx()); + EXPECT_FALSE(translated_three.has_feature_value()); +} + +TEST(ComputeSessionImplTest, InitializesComponentDataWithNoSource) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType1", &spec); + + // Create a map to hold references to mock components. + std::map mock_components; + auto builder_function = [&mock_components, spec](const string &name, + const string &backend_type) { + VLOG(2) << "Mocking for name: " << name; + std::unique_ptr component(new MockComponent()); + EXPECT_CALL(*component, InitializeComponent(_)); + mock_components[name] = component.get(); + return component; + }; + + // Create a pool, substituting a mock component builder. + ComputeSessionPool pool(spec, hyperparams); + ComputeSessionImplTestPoolAccessor::SetComponentBuilder(&pool, + builder_function); + auto session = pool.GetSession(); + + // Set expectations and get a session, then get the component. + // The initialization should be called with an empty state vector, but with + // a non-null input batch cache pointer. + constexpr int kMaxBeamSize = 11; + EXPECT_CALL(*(mock_components["component_one"]), + InitializeData(testing::IsEmpty(), kMaxBeamSize, NotNull())); + session->SetInputData({"arbitrary_data"}); + session->InitializeComponentData("component_one", kMaxBeamSize); +} + +TEST(ComputeSessionImplTest, InitializesComponentWithSource) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType2", &spec); + AddComponentToSpec("component_two", "TestComponentType1", &spec); + + // Create a map to hold references to mock components. + std::map mock_components; + auto builder_function = [&mock_components, spec](const string &name, + const string &backend_type) { + VLOG(2) << "Mocking for name: " << name; + std::unique_ptr component(new MockComponent()); + EXPECT_CALL(*component, InitializeComponent(_)); + mock_components[name] = component.get(); + return component; + }; + + // Create a pool, substituting a mock component builder.. + ComputeSessionPool pool(spec, hyperparams); + ComputeSessionImplTestPoolAccessor::SetComponentBuilder(&pool, + builder_function); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + // Set expectations. + constexpr int kMaxBeamSize = 11; + MockTransitionState mock_transition_state; + std::vector> beam( + {{&mock_transition_state}}); + + // Expect that the first component will report that it is terminal and return + // a beam. + EXPECT_CALL(*mock_components["component_one"], IsTerminal()) + .WillOnce(Return(true)); + EXPECT_CALL(*mock_components["component_one"], GetBeam()) + .WillOnce(Return(beam)); + + // Expect that the second component will recieve that beam. + EXPECT_CALL(*mock_components["component_two"], + InitializeData(beam, kMaxBeamSize, NotNull())); + + // Attempt to initialize the component. + session->InitializeComponentData("component_two", kMaxBeamSize); +} + +TEST(ComputeSessionImplTest, + InitializeDataFailsWhenInputDataNotProvidedDeathTest) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType2", &spec); + + ComputeSessionPool pool(spec, hyperparams); + auto session = pool.GetSession(); + + constexpr int kMaxBeamSize = 3; + EXPECT_DEATH(session->InitializeComponentData("component_one", kMaxBeamSize), + "without providing input data"); +} + +TEST(ComputeSessionImplTest, + InitializeDataFailsWhenComponentDoesNotExistdeathTest) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType2", &spec); + + ComputeSessionPool pool(spec, hyperparams); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + constexpr int kMaxBeamSize = 3; + EXPECT_DEATH( + session->InitializeComponentData("DOES_NOT_EXIST_DIE", kMaxBeamSize), + "Could not find component"); +} + +TEST(ComputeSessionImplTest, + InitializeDataFailsWhenSourceIsNotTerminalDeathTest) { + auto function_that_will_die = []() { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType2", &spec); + AddComponentToSpec("component_two", "TestComponentType1", &spec); + + // Create a map to hold references to mock components. + std::map mock_components; + auto builder_function = [&mock_components, spec]( + const string &name, + const string &backend_type) { + VLOG(2) << "Mocking for name: " << name; + std::unique_ptr component(new MockComponent()); + EXPECT_CALL(*component, InitializeComponent(_)); + mock_components[name] = component.get(); + return component; + }; + + // Create a pool, substituting a mock component builder. + ComputeSessionPool pool(spec, hyperparams); + ComputeSessionImplTestPoolAccessor::SetComponentBuilder(&pool, + builder_function); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + // Expect that the first component will report that it is not terminal + EXPECT_CALL(*mock_components["component_one"], IsTerminal()) + .WillOnce(Return(false)); + + // Attempt to initialize the component. + constexpr int kMaxBeamSize = 11; + session->InitializeComponentData("component_two", kMaxBeamSize); + }; + + // The death expectation is interacting strangely with this test, so I need + // to wrap the function in a lambda. + EXPECT_DEATH(function_that_will_die(), "Source is not terminal"); +} + +TEST(ComputeSessionImplTest, ResetSessionResetsAllComponents) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType2", &spec); + AddComponentToSpec("component_two", "TestComponentType1", &spec); + + // Create a map to hold references to mock components. + std::map mock_components; + auto builder_function = [&mock_components, spec](const string &name, + const string &backend_type) { + VLOG(2) << "Mocking for name: " << name; + std::unique_ptr component(new MockComponent()); + EXPECT_CALL(*component, InitializeComponent(_)); + mock_components[name] = component.get(); + return component; + }; + + // Create a pool, substituting a mock component builder. + ComputeSessionPool pool(spec, hyperparams); + ComputeSessionImplTestPoolAccessor::SetComponentBuilder(&pool, + builder_function); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + // Expect that the first component will report that it is not terminal + EXPECT_CALL(*mock_components["component_one"], ResetComponent()); + EXPECT_CALL(*mock_components["component_two"], ResetComponent()); + + session->ResetSession(); +} + +TEST(ComputeSessionImplTest, SetTracingPropagatesToAllComponents) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType2", &spec); + AddComponentToSpec("component_two", "TestComponentType1", &spec); + + // Add a translator from component 1 to component 2. + AddTranslatorToSpec("component_one", "component_two", "identity", &spec); + + // Create a map to hold references to mock components. + std::map mock_components; + auto builder_function = [&mock_components, spec](const string &name, + const string &backend_type) { + VLOG(2) << "Mocking for name: " << name; + std::unique_ptr component(new MockComponent()); + EXPECT_CALL(*component, InitializeComponent(_)); + mock_components[name] = component.get(); + return component; + }; + + // Create a pool, substituting a mock component builder. + ComputeSessionPool pool(spec, hyperparams); + ComputeSessionImplTestPoolAccessor::SetComponentBuilder(&pool, + builder_function); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + // Enable tracing on the session. + session->SetTracing(true); + + // Initialize the first component, along with its tracing. + constexpr int kMaxBeamSize = 1; + EXPECT_CALL(*mock_components["component_one"], + InitializeData(testing::IsEmpty(), kMaxBeamSize, NotNull())); + EXPECT_CALL(*mock_components["component_one"], InitializeTracing()); + session->InitializeComponentData("component_one", kMaxBeamSize); + + MockTransitionState mock_transition_state; + std::vector> beam( + {{&mock_transition_state}}); + EXPECT_CALL(*mock_components["component_one"], IsTerminal()) + .WillOnce(Return(true)); + EXPECT_CALL(*mock_components["component_one"], GetBeam()) + .WillOnce(Return(beam)); + + // Expect that the second component will recieve that beam, and then its + // tracing will be initialized. + EXPECT_CALL(*mock_components["component_two"], + InitializeData(beam, kMaxBeamSize, NotNull())); + EXPECT_CALL(*mock_components["component_two"], InitializeTracing()); + session->InitializeComponentData("component_two", kMaxBeamSize); + + // Expect that all components will see the tracing value. + EXPECT_CALL(*mock_components["component_one"], IsReady()) + .WillRepeatedly(Return(true)); + EXPECT_CALL(*mock_components["component_two"], IsReady()) + .WillRepeatedly(Return(true)); + + std::vector features; + LinkFeatures feature_one; + feature_one.set_beam_idx(0); + feature_one.set_batch_idx(0); + feature_one.set_feature_value(34); + features.push_back(feature_one); + + // Translated version: feature_value is copied to step_idx. + std::vector translated; + feature_one.set_step_idx(feature_one.feature_value()); + translated.push_back(feature_one); + + // The session should request the raw link features for the specified channel. + constexpr int kChannelId = 0; + EXPECT_CALL(*mock_components["component_two"], GetRawLinkFeatures(kChannelId)) + .WillRepeatedly(Return(features)); + + // Identity will not change the features. + EXPECT_CALL(*mock_components["component_two"], + AddTranslatedLinkFeaturesToTrace( + ElementsAre(EqualsProto(translated[0])), kChannelId)); + session->GetTranslatedLinkFeatures("component_two", kChannelId); + + // Now disable tracing. This time we don't expect any tracing to be called. + EXPECT_CALL(*mock_components["component_one"], DisableTracing()); + EXPECT_CALL(*mock_components["component_two"], DisableTracing()); + session->SetTracing(false); + EXPECT_CALL(*mock_components["component_two"], + AddTranslatedLinkFeaturesToTrace( + ElementsAre(EqualsProto(translated[0])), kChannelId)) + .Times(0); + session->GetTranslatedLinkFeatures("component_two", kChannelId); +} + +TEST(ComputeSessionImplTest, TraceSourceBeamPath) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType1", &spec); + AddComponentToSpec("component_two", "TestComponentType1", &spec); + AddComponentToSpec("component_three", "TestComponentType1", &spec); + + // Create a map to hold references to mock components. + std::map mock_components; + auto builder_function = [&mock_components, spec](const string &name, + const string &backend_type) { + VLOG(2) << "Mocking for name: " << name; + std::unique_ptr component(new MockComponent()); + EXPECT_CALL(*component, InitializeComponent(_)); + mock_components[name] = component.get(); + return component; + }; + + // Create a pool, substituting a mock component builder. + ComputeSessionPool pool(spec, hyperparams); + ComputeSessionImplTestPoolAccessor::SetComponentBuilder(&pool, + builder_function); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + ComponentTrace trace; + + // Test logic: verify that the traces correspond only to the paths taken to + // reach the final states in component 3. This requires backtracking to + // retrace the path of the beam. In this case, we expect three paths: + // + // Component 0 -> Component 1 -> Component 2 + // batch 0, beam 6 -> batch 0, beam 4 -> batch 0, beam 0 + // batch 0, beam 6 -> batch 0, beam 3 -> batch 0, beam 1 + // batch 1, beam 0 -> batch 1, beam 2 -> batch 1, beam 0 + + // Fill the component traces with some dummy values of the approach beam sizes + // for each batch. + + // Component 1: batch 0 has beam size 7, batch 1 has beam size 2. + std::vector> component_one_trace = { + {trace, trace, trace, trace, trace, trace, trace}, {trace, trace}}; + + // Component 2: batch 0 has beam size 5, batch 1 has beam size 3. + std::vector> component_two_trace = { + {trace, trace, trace, trace, trace}, {trace, trace, trace}}; + + // Component 3: batch 0 has beam size 2, batch 1 has beam size 1. + std::vector> component_three_trace = { + {trace, trace}, {trace}}; + + // The Session will get all traces from every component. + EXPECT_CALL(*mock_components["component_one"], GetTraceProtos()) + .WillOnce(Return(component_one_trace)); + EXPECT_CALL(*mock_components["component_two"], GetTraceProtos()) + .WillOnce(Return(component_two_trace)); + EXPECT_CALL(*mock_components["component_three"], GetTraceProtos()) + .WillOnce(Return(component_three_trace)); + + // Final beam has 2 states in batch 0, 1 state in batch 1. So we expect three + // chains. + MockTransitionState mock_transition_state; + std::vector> beam( + {{&mock_transition_state, &mock_transition_state}, + {&mock_transition_state}}); + + EXPECT_CALL(*mock_components["component_three"], GetBeam()) + .WillOnce(Return(beam)); + + // First test chain. + EXPECT_CALL(*mock_components["component_three"], GetSourceBeamIndex(0, 0)) + .WillOnce(Return(4)); + EXPECT_CALL(*mock_components["component_two"], GetSourceBeamIndex(4, 0)) + .WillOnce(Return(6)); + + // Second test chain. + EXPECT_CALL(*mock_components["component_three"], GetSourceBeamIndex(1, 0)) + .WillOnce(Return(3)); + EXPECT_CALL(*mock_components["component_two"], GetSourceBeamIndex(3, 0)) + .WillOnce(Return(6)); + + // Third test chain. + EXPECT_CALL(*mock_components["component_three"], GetSourceBeamIndex(0, 1)) + .WillOnce(Return(2)); + EXPECT_CALL(*mock_components["component_two"], GetSourceBeamIndex(2, 1)) + .WillOnce(Return(1)); + + // Execute the call's. + session->GetTraceProtos(); +} + +TEST(ComputeSessionImplTest, InterfacePassesThrough) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "TestComponentType2", &spec); + AddComponentToSpec("component_two", "TestComponentType1", &spec); + + // Create a map to hold references to mock components. + std::map mock_components; + auto builder_function = [&mock_components, spec](const string &name, + const string &backend_type) { + VLOG(2) << "Mocking for name: " << name; + std::unique_ptr component(new MockComponent()); + EXPECT_CALL(*component, InitializeComponent(_)); + mock_components[name] = component.get(); + return component; + }; + + // Create a pool, substituting a mock component builder. + ComputeSessionPool pool(spec, hyperparams); + ComputeSessionImplTestPoolAccessor::SetComponentBuilder(&pool, + builder_function); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + // Expect that the first component will report that it is ready. + EXPECT_CALL(*mock_components["component_one"], IsReady()) + .WillRepeatedly(Return(true)); + + // BatchSize() + int batch_size = 3; + EXPECT_CALL(*mock_components["component_one"], BatchSize()) + .WillOnce(Return(batch_size)); + EXPECT_EQ(batch_size, session->BatchSize("component_one")); + + // BeamSize() + int beam_size = 32; + EXPECT_CALL(*mock_components["component_one"], BeamSize()) + .WillOnce(Return(beam_size)); + EXPECT_EQ(beam_size, session->BeamSize("component_one")); + + // AdvanceFromOracle() + EXPECT_CALL(*mock_components["component_one"], AdvanceFromOracle()); + session->AdvanceFromOracle("component_one"); + + // AdvanceFromPrediction() + constexpr int kScoreMatrixLength = 3; + const float score_matrix[kScoreMatrixLength] = {1.0, 2.3, 4.5}; + EXPECT_CALL(*mock_components["component_one"], + AdvanceFromPrediction(score_matrix, kScoreMatrixLength)); + session->AdvanceFromPrediction("component_one", score_matrix, + kScoreMatrixLength); + + // GetFixedFeatures + auto allocate_indices = [](int size) -> int32 * { return nullptr; }; + auto allocate_ids = [](int size) -> int64 * { return nullptr; }; + auto allocate_weights = [](int size) -> float * { return nullptr; }; + constexpr int kChannelId = 3; + EXPECT_CALL(*mock_components["component_one"], + GetFixedFeatures(_, _, _, kChannelId)) + .WillOnce(Return(0)); + EXPECT_EQ( + 0, session->GetInputFeatures("component_one", allocate_indices, + allocate_ids, allocate_weights, kChannelId)); + + // BulkGetFixedFeatures + BulkFeatureExtractor extractor(nullptr, nullptr, nullptr, false, 0, 0); + EXPECT_CALL(*mock_components["component_one"], BulkGetFixedFeatures(_)) + .WillOnce(Return(0)); + EXPECT_EQ(0, session->BulkGetInputFeatures("component_one", extractor)); + + // EmitOracleLabels() + std::vector> oracle_labels = {{0, 1}, {2, 3}}; + EXPECT_CALL(*mock_components["component_one"], GetOracleLabels()) + .WillOnce(Return(oracle_labels)); + EXPECT_EQ(oracle_labels, session->EmitOracleLabels("component_one")); + + // IsTerminal() + bool is_terminal = true; + EXPECT_CALL(*mock_components["component_one"], IsTerminal()) + .WillOnce(Return(is_terminal)); + EXPECT_EQ(is_terminal, session->IsTerminal("component_one")); + + // FinalizeData() + EXPECT_CALL(*mock_components["component_one"], FinalizeData()); + session->FinalizeData("component_one"); +} + +TEST(ComputeSessionImplTest, InterfaceRequiresReady) { + MasterSpec spec; + GridPoint hyperparams; + + AddComponentToSpec("component_one", "UnreadyComponent", &spec); + + // Create a pool, substituting a mock component builder. + ComputeSessionPool pool(spec, hyperparams); + auto session = pool.GetSession(); + session->SetInputData({"arbitrary_data"}); + + // Call the functions which should die if the component isn't ready. + EXPECT_DEATH(session->BatchSize("component_one"), + "without first initializing it"); + EXPECT_DEATH(session->BeamSize("component_one"), + "without first initializing it"); + EXPECT_DEATH(session->AdvanceFromOracle("component_one"), + "without first initializing it"); + EXPECT_DEATH(session->EmitOracleLabels("component_one"), + "without first initializing it"); + EXPECT_DEATH(session->IsTerminal("component_one"), + "without first initializing it"); + EXPECT_DEATH(session->FinalizeData("component_one"), + "without first initializing it"); + + constexpr int kScoreMatrixLength = 3; + const float score_matrix[kScoreMatrixLength] = {1.0, 2.3, 4.5}; + EXPECT_DEATH(session->AdvanceFromPrediction("component_one", score_matrix, + kScoreMatrixLength), + "without first initializing it"); + constexpr int kArbitraryChannelId = 3; + EXPECT_DEATH(session->GetInputFeatures("component_one", nullptr, nullptr, + nullptr, kArbitraryChannelId), + "without first initializing it"); + BulkFeatureExtractor extractor(nullptr, nullptr, nullptr, false, 0, 0); + EXPECT_DEATH(session->BulkGetInputFeatures("component_one", extractor), + "without first initializing it"); + EXPECT_DEATH( + session->GetTranslatedLinkFeatures("component_one", kArbitraryChannelId), + "without first initializing it"); +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/compute_session_pool.cc b/syntaxnet/dragnn/core/compute_session_pool.cc new file mode 100644 index 0000000000000000000000000000000000000000..690c5579470d95a2d810e078312311ae2a72c107 --- /dev/null +++ b/syntaxnet/dragnn/core/compute_session_pool.cc @@ -0,0 +1,89 @@ +#include "dragnn/core/compute_session_pool.h" + +#include + +#include "dragnn/core/component_registry.h" +#include "dragnn/core/compute_session_impl.h" +#include "tensorflow/core/platform/logging.h" + +namespace syntaxnet { +namespace dragnn { + +using tensorflow::mutex_lock; + +ComputeSessionPool::ComputeSessionPool(const MasterSpec &master_spec, + const GridPoint &hyperparams) + : master_spec_(master_spec), + hyperparams_(hyperparams), + num_unique_sessions_(0) { + // Create a default component builder function. This function looks up + // components in the component registry and returns them. + component_builder_ = []( + const string &component_name, + const string &backend_type) -> std::unique_ptr { + VLOG(2) << "Creating component " << component_name << " with backend " + << backend_type; + std::unique_ptr component(Component::Create(backend_type)); + return component; + }; + + // Create a default session builder function. This function returns a + // ComputeSessionImpl that uses the currently set component_builder_ + // function to create its components. + session_builder_ = [this]() { + return std::unique_ptr( + new ComputeSessionImpl(num_unique_sessions_, this->component_builder_)); + }; +} + +ComputeSessionPool::~ComputeSessionPool() { + LOG(INFO) << "Destroying pool: total number of sessions created = " + << num_unique_sessions_; + if (sessions_.size() < num_unique_sessions_) { + LOG(WARNING) << "Destroying pool: number of unreturned sessions = " + << (num_unique_sessions_ - sessions_.size()); + } +} + +void ComputeSessionPool::SetComputeSessionBuilder( + std::function()> session_builder) { + mutex_lock lock(lock_); + session_builder_ = std::move(session_builder); +} + +void ComputeSessionPool::SetComponentBuilder( + std::function(const string &component_name, + const string &backend_type)> + component_builder) { + mutex_lock lock(lock_); + component_builder_ = std::move(component_builder); +} + +std::unique_ptr ComputeSessionPool::GetSession() { + mutex_lock lock(lock_); + std::unique_ptr session_ptr; + if (sessions_.empty()) { + // There are no available sessions, so create and initialize one. + VLOG(2) << "Creating new session."; + session_ptr = session_builder_(); + num_unique_sessions_++; + session_ptr->Init(master_spec_, hyperparams_); + } else { + // Get the last free session, and remove it from the free sessions vector. + VLOG(2) << "Reusing session from pool of size " << sessions_.size(); + session_ptr = std::move(sessions_.back()); + sessions_.pop_back(); + + session_ptr->ResetSession(); + } + return session_ptr; +} + +void ComputeSessionPool::ReturnSession( + std::unique_ptr session) { + mutex_lock lock(lock_); + sessions_.push_back(std::move(session)); +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/compute_session_pool.h b/syntaxnet/dragnn/core/compute_session_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..f116c9411e4d3cf70b8f63609f7507c31fea1a4a --- /dev/null +++ b/syntaxnet/dragnn/core/compute_session_pool.h @@ -0,0 +1,87 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_COMPUTE_SESSION_POOL_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_COMPUTE_SESSION_POOL_H_ + +#include + +#include "dragnn/core/compute_session.h" +#include "dragnn/protos/spec.pb.h" +#include "tensorflow/core/platform/mutex.h" + +namespace syntaxnet { +namespace dragnn { + +// This pool creates and manages the reuse of ComputeSession objects. + +class ComputeSessionPool { + public: + // Create a ComputeSessionPool that creates ComputeSessions for the given + // MasterSpec and hyperparameters. + ComputeSessionPool(const MasterSpec &master_spec, + const GridPoint &hyperparams); + + virtual ~ComputeSessionPool(); + + // Get a ComputeSession. This function will attempt to use an already-created + // ComputeSession, but if none are available a new one will be created. + std::unique_ptr GetSession(); + + // Returns a ComputeSession to the backing pool. + void ReturnSession(std::unique_ptr session); + + // Returns the count of outstanding unique sessions. + int num_outstanding_sessions() { + tensorflow::mutex_lock lock(lock_); + return num_unique_sessions_ - sessions_.size(); + } + + private: + friend class ComputeSessionImplTestPoolAccessor; + friend class ComputeSessionPoolTestPoolAccessor; + + // This is a creational injection setter. It should be used for tests + // where we want our ComputeSessionPool to prepare and return + // MockComputeSessions instead of actual ComputeSessionImpls. + void SetComputeSessionBuilder( + std::function()> session_builder); + + // This injector will cause ComputeSessions built in this pool to use the + // passed function to create Components. This is useful when you want a + // ComputeSession to create MockComponents instead of real ones. + void SetComponentBuilder( + std::function(const string &component_name, + const string &backend_type)> + component_builder); + + // The MasterSpec that will be used to initialize ComputeSessions from this + // pool. + const MasterSpec master_spec_; + + // The hyperparameters that will be used to initialize ComputeSessions from + // this pool. + const GridPoint hyperparams_; + + // The function that is used to create ComputeSessions. + std::function()> session_builder_; + + // The function passed to ComputeSessions that will be used by that session + // to create components. + std::function(const string &component_name, + const string &backend_type)> + component_builder_; + + // ComputeSessions that are not currently being used. These sessions are not + // reset until they are requested by another thread. + std::vector> sessions_; + + // Count of the number of unique ComputeSession objects that have been + // created. Used to assign IDs to new Sessions. + int num_unique_sessions_; + + // Mutex that protects accesses to all members of this object. + tensorflow::mutex lock_; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_COMPUTE_SESSION_POOL_H_ diff --git a/syntaxnet/dragnn/core/compute_session_pool_test.cc b/syntaxnet/dragnn/core/compute_session_pool_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..8f955fad31bc5bcbeaef5d50a6e008ccb4375e2b --- /dev/null +++ b/syntaxnet/dragnn/core/compute_session_pool_test.cc @@ -0,0 +1,211 @@ +#include "dragnn/core/compute_session_pool.h" + +#include + +#include + +#include "dragnn/core/compute_session.h" +#include "dragnn/core/test/generic.h" +#include "dragnn/core/test/mock_component.h" +#include "dragnn/core/test/mock_compute_session.h" +#include "tensorflow/core/platform/env.h" +#include "tensorflow/core/platform/test.h" + +namespace syntaxnet { +namespace dragnn { + +using syntaxnet::test::EqualsProto; +using testing::Return; +using testing::Invoke; +using testing::MockFunction; + +class ComputeSessionPoolTestPoolAccessor { + public: + static void SetComponentBuilder( + ComputeSessionPool *pool, + std::function(const string &component_name, + const string &backend_type)> + component_builder_function) { + pool->SetComponentBuilder(std::move(component_builder_function)); + } + + static void SetSessionBuilder(ComputeSessionPool *pool, + std::function()> + session_builder_function) { + pool->SetComputeSessionBuilder(std::move(session_builder_function)); + } +}; + +TEST(ComputeSessionPoolTest, DefaultConstructorWorks) { + MasterSpec spec; + GridPoint hyperparams; + ComputeSessionPool pool(spec, hyperparams); + auto request = pool.GetSession(); + EXPECT_NE(request, nullptr); +} + +TEST(ComputeSessionPoolTest, ComponentBuilderInjectionWorks) { + MasterSpec spec; + auto component = spec.add_component(); + component->set_name("test_component_name"); + auto backend = component->mutable_backend(); + backend->set_registered_name("arbitrary_component"); + GridPoint hyperparams; + + ComputeSessionPool pool(spec, hyperparams); + + // Set up a mock component builder. + MockFunction(const string &component_name, + const string &backend_type)> + mock_component_builder; + auto mock_creation_function = [](string, string) { + return std::unique_ptr(new MockComponent()); + }; + EXPECT_CALL(mock_component_builder, + Call("test_component_name", "arbitrary_component")) + .WillOnce(Invoke(mock_creation_function)); + ComputeSessionPoolTestPoolAccessor::SetComponentBuilder( + &pool, mock_component_builder.AsStdFunction()); + + // Now, when the session is requested, the mock component builder should see + // the expected call. + auto request = pool.GetSession(); + EXPECT_NE(request, nullptr); +} + +TEST(ComputeSessionPoolTest, CreatesNewSessionIfNoSessionsExist) { + // We don't need to fill these for this test. + MasterSpec spec; + GridPoint hyperparams; + ComputeSessionPool pool(spec, hyperparams); + + // Create a function that will track calls to the session builder. + MockFunction()> mock_session_builder; + + // Initialize expectations for a request for a ComputeSession. + std::unique_ptr session_one(new MockComputeSession()); + MockComputeSession *session_one_ptr = session_one.get(); + auto mock_creation_function = [&session_one]() { + return std::move(session_one); + }; + EXPECT_CALL(mock_session_builder, Call()) + .WillOnce(Invoke(mock_creation_function)) + .RetiresOnSaturation(); + EXPECT_CALL(*session_one_ptr, + Init(EqualsProto(spec), EqualsProto(hyperparams))); + + // Initialize expectations for another request for a ComputeSession. + std::unique_ptr session_two(new MockComputeSession()); + MockComputeSession *session_two_ptr = session_two.get(); + auto mock_creation_function_two = [&session_two]() { + return std::move(session_two); + }; + EXPECT_CALL(mock_session_builder, Call()) + .WillOnce(Invoke(mock_creation_function_two)) + .RetiresOnSaturation(); + EXPECT_CALL(*session_two_ptr, + Init(EqualsProto(spec), EqualsProto(hyperparams))); + + // Inject the function to the pool. + ComputeSessionPoolTestPoolAccessor::SetSessionBuilder( + &pool, mock_session_builder.AsStdFunction()); + + // The first call will recieve the second session because of how the mocks go. + auto first_request = pool.GetSession(); + EXPECT_EQ(first_request.get(), session_two_ptr); + + auto second_request = pool.GetSession(); + EXPECT_EQ(second_request.get(), session_one_ptr); +} + +TEST(ComputeSessionPoolTest, ReusesAvailableSessions) { + // We don't need to fill these for this test. + MasterSpec spec; + GridPoint hyperparams; + ComputeSessionPool pool(spec, hyperparams); + + // Create a function that will track calls to the session builder. + MockFunction()> mock_session_builder; + + // Initialize expectations for a request for a ComputeSession. + std::unique_ptr session_one(new MockComputeSession()); + MockComputeSession *session_one_ptr = session_one.get(); + auto mock_creation_function = [&session_one]() { + return std::move(session_one); + }; + EXPECT_CALL(mock_session_builder, Call()) + .WillOnce(Invoke(mock_creation_function)) + .RetiresOnSaturation(); + EXPECT_CALL(*session_one_ptr, + Init(EqualsProto(spec), EqualsProto(hyperparams))); + + // Initialize expectations for another request for a ComputeSession. + std::unique_ptr session_two(new MockComputeSession()); + MockComputeSession *session_two_ptr = session_two.get(); + auto mock_creation_function_two = [&session_two]() { + return std::move(session_two); + }; + EXPECT_CALL(mock_session_builder, Call()) + .WillOnce(Invoke(mock_creation_function_two)) + .RetiresOnSaturation(); + EXPECT_CALL(*session_two_ptr, + Init(EqualsProto(spec), EqualsProto(hyperparams))); + + // Inject the function to the pool. + ComputeSessionPoolTestPoolAccessor::SetSessionBuilder( + &pool, mock_session_builder.AsStdFunction()); + + // The first call will recieve the second session because of how the mocks go. + auto first_request = pool.GetSession(); + EXPECT_EQ(1, pool.num_outstanding_sessions()); + EXPECT_EQ(first_request.get(), session_two_ptr); + + // Return the first pointer. After this, the second request should get that + // pointer. + EXPECT_CALL(*session_two_ptr, ResetSession()); + pool.ReturnSession(std::move(first_request)); + EXPECT_EQ(0, pool.num_outstanding_sessions()); + auto second_request = pool.GetSession(); + EXPECT_EQ(1, pool.num_outstanding_sessions()); + EXPECT_EQ(second_request.get(), session_two_ptr); + + // There are now no spare sessions, so the next session request should + // create a second session. + auto third_request = pool.GetSession(); + EXPECT_EQ(2, pool.num_outstanding_sessions()); + EXPECT_EQ(third_request.get(), session_one_ptr); +} + +TEST(ComputeSessionPoolTest, AssignsUniqueIds) { + MasterSpec spec; + GridPoint hyperparams; + ComputeSessionPool pool(spec, hyperparams); + auto session = pool.GetSession(); + auto session_2 = pool.GetSession(); + EXPECT_NE(session->Id(), session_2->Id()); +} + +TEST(ComputeSessionPoolTest, SupportsMultithreadedAccess) { + MasterSpec spec; + GridPoint hyperparams; + ComputeSessionPool pool(spec, hyperparams); + + std::vector> request_threads; + constexpr int kNumThreadsToTest = 100; + for (int i = 0; i < kNumThreadsToTest; ++i) { + request_threads.push_back(std::unique_ptr( + tensorflow::Env::Default()->StartThread( + tensorflow::ThreadOptions(), "thread", + [this, &pool] { auto session = pool.GetSession(); }))); + } + + // Deleting a tensorflow::Thread blocks until the thread exits, + // so clearing the vector blocks until all threads have exited. + request_threads.clear(); + + // Make sure all the threads got their session. + EXPECT_EQ(kNumThreadsToTest, pool.num_outstanding_sessions()); +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/index_translator.cc b/syntaxnet/dragnn/core/index_translator.cc new file mode 100644 index 0000000000000000000000000000000000000000..98d6cde994248941e7e926f80d4180a469cc91c6 --- /dev/null +++ b/syntaxnet/dragnn/core/index_translator.cc @@ -0,0 +1,67 @@ +#include "dragnn/core/index_translator.h" + +#include "tensorflow/core/platform/logging.h" + +namespace syntaxnet { +namespace dragnn { + +using Index = IndexTranslator::Index; + +IndexTranslator::IndexTranslator(const std::vector &path, + const string &method) + : path_(path), method_(method) { + if (method_ == "identity") { + // Identity lookup: Return the feature index. + step_lookup_ = [](int batch_index, int beam_index, int feature) { + return feature; + }; + } else if (method_ == "history") { + // History lookup: Return the number of steps taken less the feature. + step_lookup_ = [this](int batch_index, int beam_index, int feature) { + if (feature > path_.back()->StepsTaken(batch_index) - 1) { + VLOG(2) << "Translation to outside: feature is " << feature + << " and steps_taken is " + << path_.back()->StepsTaken(batch_index); + return -1; + } + return ((path_.back()->StepsTaken(batch_index) - 1) - feature); + }; + } else { + // Component defined lookup: Get the lookup function from the component. + // If the lookup function is not defined, this function will CHECK. + step_lookup_ = path_.back()->GetStepLookupFunction(method_); + } +} + +Index IndexTranslator::Translate(int batch_index, int beam_index, + int feature_value) { + Index translated_index; + translated_index.batch_index = batch_index; + VLOG(2) << "Translation requested (type: " << method_ << ") for batch " + << batch_index << " beam " << beam_index << " feature " + << feature_value; + + // For all save the last item in the path, get the source index for the + // previous component. + int current_beam_index = beam_index; + VLOG(2) << "Beam index before walk is " << current_beam_index; + for (int i = 0; i < path_.size() - 1; ++i) { + // Backtrack through previous components. For each non-final component, + // figure out what state in the prior component was used to initialize the + // state at the current beam index. + current_beam_index = + path_.at(i)->GetSourceBeamIndex(current_beam_index, batch_index); + VLOG(2) << "Beam index updated to " << current_beam_index; + } + VLOG(2) << "Beam index after walk is " << current_beam_index; + translated_index.step_index = + step_lookup_(batch_index, current_beam_index, feature_value); + VLOG(2) << "Translated step index is " << translated_index.step_index; + translated_index.beam_index = path_.back()->GetBeamIndexAtStep( + translated_index.step_index, current_beam_index, batch_index); + VLOG(2) << "Translated beam index is " << translated_index.beam_index; + return translated_index; +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/index_translator.h b/syntaxnet/dragnn/core/index_translator.h new file mode 100644 index 0000000000000000000000000000000000000000..973675fa1a4e19b197e3011e125da517194e4efc --- /dev/null +++ b/syntaxnet/dragnn/core/index_translator.h @@ -0,0 +1,68 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INDEX_TRANSLATOR_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INDEX_TRANSLATOR_H_ + +#include +#include + +#include "dragnn/core/interfaces/component.h" +#include "dragnn/core/interfaces/transition_state.h" + +namespace syntaxnet { +namespace dragnn { + +// A IndexTranslator provides an interface into the data of another component. +// It allows one component to look up a translated array index from the history +// or state of another component. +// +// When it is created, it is passed a pointer to the source component (that is, +// the component whose data it will be accessing) and a string representing the +// type of data access it will perform. There are two universal data access +// methods - "identity" and "history" - and components can declare more via +// their GetStepLookupFunction function. + +class IndexTranslator { + public: + // Index into a TensorArray. Provides a given step, and the beam index within + // that step, for TensorArray access to data in the given batch. + struct Index { + int batch_index = -1; + int beam_index = -1; + int step_index = -1; + }; + + // Creates a new IndexTranslator with access method as determined by the + // passed string. The Translator will walk the path "path" in order, and will + // translate from the last Component in the path. + IndexTranslator(const std::vector &path, const string &method); + + // Returns an index in (step, beam, batch) index space as computed from the + // given feature value. + Index Translate(int batch_index, int beam_index, int feature_value); + + // Returns the path to be walked by this translator. + const std::vector &path() const { return path_; } + + // Returns the method to be used by this translator. + const string &method() const { return method_; } + + private: + // The ordered list of components that must be walked to get from the + // requesting component to the source component. This vector has the + // requesting component at index 0 and the source component at the end. If + // the requesting component is the source component, this vector has only one + // entry. + const std::vector path_; + + // The function this translator will use to look up the step in the source + // component. The function is invoked as: + // step_lookup_(batch_index, beam_index, feature). + std::function step_lookup_; + + // This translator's method. + string method_; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INDEX_TRANSLATOR_H_ diff --git a/syntaxnet/dragnn/core/index_translator_test.cc b/syntaxnet/dragnn/core/index_translator_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..9960c6ff87cfc49355f2827dd7017eafad10bc80 --- /dev/null +++ b/syntaxnet/dragnn/core/index_translator_test.cc @@ -0,0 +1,180 @@ +#include "dragnn/core/index_translator.h" + +#include "dragnn/core/test/mock_component.h" +#include "dragnn/core/test/mock_transition_state.h" +#include +#include "tensorflow/core/platform/test.h" + +namespace syntaxnet { +namespace dragnn { + +using testing::MockFunction; +using testing::Return; + +TEST(IndexTranslatorTest, PerformsIdentityTranslation) { + MockComponent mock_component; + + // We are testing the Identity lookup with a single component (so, self- + // referencing) and thus we expect the translator to call GetBeamIndexAtStep + // for the step and index we pass in. + constexpr int kBeam = 4; + constexpr int kFeature = 2; + constexpr int kResultIndex = 3; + constexpr int kBatch = 99; + EXPECT_CALL(mock_component, GetBeamIndexAtStep(kFeature, kBeam, kBatch)) + .WillOnce(Return(kResultIndex)); + + // Execute! + IndexTranslator translator({&mock_component}, "identity"); + auto result = translator.Translate(kBatch, kBeam, kFeature); + EXPECT_EQ(kResultIndex, result.beam_index); + EXPECT_EQ(kFeature, result.step_index); + EXPECT_EQ(kBatch, result.batch_index); +} + +TEST(IndexTranslatorTest, PerformsHistoryTranslation) { + MockComponent mock_component; + + // We are testing the History lookup with a single component (so, self- + // referencing) and thus we expect the translator to call StepsTaken() to get + // the number of steps taken and GetBeamIndexAtStep with (total-desired). + constexpr int kBeam = 4; + constexpr int kFeature = 2; + constexpr int kTotalNumberSteps = 8; + constexpr int kBatch = 99; + + // Here, the expected step result is two in from the final index, so + // (8-1) - 2, or 5. + constexpr int kExpectedResult = 5; + constexpr int kResultIndex = 3; + EXPECT_CALL(mock_component, StepsTaken(kBatch)) + .WillRepeatedly(Return(kTotalNumberSteps)); + EXPECT_CALL(mock_component, + GetBeamIndexAtStep(kExpectedResult, kBeam, kBatch)) + .WillOnce(Return(kResultIndex)); + + // Execute! + IndexTranslator translator({&mock_component}, "history"); + auto result = translator.Translate(kBatch, kBeam, kFeature); + EXPECT_EQ(kResultIndex, result.beam_index); + EXPECT_EQ(kExpectedResult, result.step_index); + EXPECT_EQ(kBatch, result.batch_index); +} + +TEST(IndexTranslatorTest, TraversesPathToLookup) { + MockComponent mock_component_a; + MockComponent mock_component_b; + MockComponent mock_component_c; + constexpr int kBatch = 99; + + // The translator should request the source index from mock component A. + constexpr int kBeam = 4; + constexpr int kSourceBIndex = 3; + EXPECT_CALL(mock_component_a, GetSourceBeamIndex(kBeam, kBatch)) + .WillOnce(Return(kSourceBIndex)); + + // The translator should use the source index from A in a source index request + // to component B. + constexpr int kSourceCIndex = 17; + EXPECT_CALL(mock_component_b, GetSourceBeamIndex(kSourceBIndex, kBatch)) + .WillOnce(Return(kSourceCIndex)); + + // The translator should request the beam index at the requested step in + // component C, using the beam index from the source index request to B. + constexpr int kFeature = 2; + constexpr int kResultIndex = 1157; + + // This is testing with an identity translator, so kFeature == kStep. + EXPECT_CALL(mock_component_c, + GetBeamIndexAtStep(kFeature, kSourceCIndex, kBatch)) + .WillOnce(Return(kResultIndex)); + + // Execute! + IndexTranslator translator( + {&mock_component_a, &mock_component_b, &mock_component_c}, "identity"); + auto result = translator.Translate(kBatch, kBeam, kFeature); + EXPECT_EQ(kResultIndex, result.beam_index); + EXPECT_EQ(kFeature, result.step_index); + EXPECT_EQ(kBatch, result.batch_index); +} + +TEST(IndexTranslatorTest, RequestsArbitraryTranslationFunction) { + MockComponent mock_component; + MockFunction mock_function; + + // This test ensures that we can get an arbitrary translation function + // from the component and execute it properly. + constexpr int kBeam = 4; + constexpr int kFeature = 2; + constexpr int kFunctionResult = 10; + constexpr int kResultIndex = 3; + constexpr int kBatch = 99; + + // The arbitrary function should be called with the desired input. + EXPECT_CALL(mock_function, Call(kBatch, kBeam, kFeature)) + .WillOnce(Return(kFunctionResult)); + + // The translator should request the function from the component. + EXPECT_CALL(mock_component, GetStepLookupFunction("arbitrary_function")) + .WillOnce(Return(mock_function.AsStdFunction())); + + // The translator should call GetBeamIndexAtStep with the result of calling + // the function. + EXPECT_CALL(mock_component, + GetBeamIndexAtStep(kFunctionResult, kBeam, kBatch)) + .WillOnce(Return(kResultIndex)); + + // Execute! + IndexTranslator translator({&mock_component}, "arbitrary_function"); + auto result = translator.Translate(kBatch, kBeam, kFeature); + EXPECT_EQ(kResultIndex, result.beam_index); + EXPECT_EQ(kFunctionResult, result.step_index); + EXPECT_EQ(kBatch, result.batch_index); +} + +// This test ensures that the translation function is queried with the beam +// index for that component, and that the translation function is taken from +// the correct component. +TEST(IndexTranslatorTest, RequestsArbitraryTranslationAcrossComponents) { + MockComponent mock_component_a; + MockComponent mock_component_b; + MockFunction mock_function; + + // This test ensures that we can get an arbitrary translation function + // from the component and execute it properly. + constexpr int kFeature = 2; + constexpr int kFunctionResult = 10; + constexpr int kResultIndex = 3; + constexpr int kBatch = 99; + + // The translator should request the source index from mock component A. + constexpr int kBeam = 4; + constexpr int kSourceBIndex = 3; + EXPECT_CALL(mock_component_a, GetSourceBeamIndex(kBeam, kBatch)) + .WillOnce(Return(kSourceBIndex)); + + // The translator should request the function from the component. + EXPECT_CALL(mock_component_b, GetStepLookupFunction("arbitrary_function")) + .WillOnce(Return(mock_function.AsStdFunction())); + + // The arbitrary function should be called with the desired input. + EXPECT_CALL(mock_function, Call(kBatch, kSourceBIndex, kFeature)) + .WillOnce(Return(kFunctionResult)); + + // The translator should call GetBeamIndexAtStep with the result of calling + // the function. + EXPECT_CALL(mock_component_b, + GetBeamIndexAtStep(kFunctionResult, kSourceBIndex, kBatch)) + .WillOnce(Return(kResultIndex)); + + // Execute! + IndexTranslator translator({&mock_component_a, &mock_component_b}, + "arbitrary_function"); + auto result = translator.Translate(kBatch, kBeam, kFeature); + EXPECT_EQ(kResultIndex, result.beam_index); + EXPECT_EQ(kFunctionResult, result.step_index); + EXPECT_EQ(kBatch, result.batch_index); +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/input_batch_cache.h b/syntaxnet/dragnn/core/input_batch_cache.h new file mode 100644 index 0000000000000000000000000000000000000000..495e093e159b44743833a71258c1a1599d2d66f4 --- /dev/null +++ b/syntaxnet/dragnn/core/input_batch_cache.h @@ -0,0 +1,78 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INPUT_BATCH_CACHE_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INPUT_BATCH_CACHE_H_ + +#include +#include +#include + +#include "dragnn/core/interfaces/input_batch.h" +#include "tensorflow/core/platform/logging.h" + +namespace syntaxnet { +namespace dragnn { + +// A InputBatchCache holds data converted to a DRAGNN internal representation. +// It performs the conversion lazily via Data objects and caches the result. + +class InputBatchCache { + public: + // Create an empty cache.. + InputBatchCache() : stored_type_(std::type_index(typeid(void))) {} + + // Create a InputBatchCache from a single example. This copies the string. + explicit InputBatchCache(const string &data) + : stored_type_(std::type_index(typeid(void))), source_data_({data}) {} + + // Create a InputBatchCache from a vector of examples. The vector is copied. + explicit InputBatchCache(const std::vector &data) + : stored_type_(std::type_index(typeid(void))), source_data_(data) {} + + // Adds a single string to the cache. Only useable before GetAs() has been + // called. + void AddData(const string &data) { + CHECK(stored_type_ == std::type_index(typeid(void))) + << "You may not add data to an InputBatchCache after the cache has " + "been converted via GetAs()."; + source_data_.emplace_back(data); + } + + // Convert the stored strings into protos and return them in a specific + // InputBatch subclass. T should always be of type InputBatch. After this + // method is called once, all further calls must be of the same data type. + template + T *GetAs() { + if (!converted_data_) { + stored_type_ = std::type_index(typeid(T)); + converted_data_.reset(new T()); + converted_data_->SetData(source_data_); + } + CHECK(std::type_index(typeid(T)) == stored_type_) + << "Attempted to convert to two object types! Existing object type was " + << stored_type_.name() << ", new object type was " + << std::type_index(typeid(T)).name(); + + return dynamic_cast(converted_data_.get()); + } + + // Return the serialized representation of the data held in the input batch + // object within this cache. + const std::vector SerializedData() const { + CHECK(converted_data_) << "Cannot return batch without data."; + return converted_data_->GetSerializedData(); + } + + private: + // The typeid of the stored data. + std::type_index stored_type_; + + // The raw data. + std::vector source_data_; + + // The converted data, contained in an InputBatch object. + std::unique_ptr converted_data_; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INPUT_BATCH_CACHE_H_ diff --git a/syntaxnet/dragnn/core/input_batch_cache_test.cc b/syntaxnet/dragnn/core/input_batch_cache_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..799d2cc5a7381e9c248740c716cd8a8962f50ae7 --- /dev/null +++ b/syntaxnet/dragnn/core/input_batch_cache_test.cc @@ -0,0 +1,107 @@ +#include "dragnn/core/input_batch_cache.h" + +#include "dragnn/core/interfaces/input_batch.h" +#include +#include "tensorflow/core/platform/test.h" + +namespace syntaxnet { +namespace dragnn { + +class StringData : public InputBatch { + public: + StringData() {} + + void SetData(const std::vector &data) override { + for (const auto &element : data) { + data_.push_back(element + "_converted"); + } + } + + const std::vector GetSerializedData() const override { return data_; } + + std::vector *data() { return &data_; } + + private: + std::vector data_; +}; + +class DifferentStringData : public InputBatch { + public: + DifferentStringData() {} + + void SetData(const std::vector &data) override { + for (const auto &element : data) { + data_.push_back(element + "_also_converted"); + } + } + + const std::vector GetSerializedData() const override { return data_; } + + std::vector *data() { return &data_; } + + private: + std::vector data_; +}; + +TEST(InputBatchCacheTest, ConvertsSingleInput) { + string test_string = "Foo"; + InputBatchCache generic_set(test_string); + auto data = generic_set.GetAs(); + EXPECT_EQ(data->data()->size(), 1); + EXPECT_EQ(data->data()->at(0), "Foo_converted"); +} + +TEST(InputBatchCacheTest, ConvertsAddedInput) { + string test_string = "Foo"; + InputBatchCache generic_set; + generic_set.AddData(test_string); + auto data = generic_set.GetAs(); + EXPECT_EQ(data->data()->size(), 1); + EXPECT_EQ(data->data()->at(0), "Foo_converted"); +} + +TEST(InputBatchCacheTest, ConvertsVectorOfInputs) { + std::vector test_inputs; + test_inputs.push_back("Foo"); + test_inputs.push_back("Bar"); + test_inputs.push_back("Baz"); + InputBatchCache generic_set(test_inputs); + auto data = generic_set.GetAs(); + EXPECT_EQ(data->data()->size(), test_inputs.size()); + EXPECT_EQ(data->data()->at(0), "Foo_converted"); + EXPECT_EQ(data->data()->at(1), "Bar_converted"); + EXPECT_EQ(data->data()->at(2), "Baz_converted"); +} + +TEST(InputBatchCacheTest, ConvertingMultipleDataTypesCausesCheck) { + string test_string = "Foo"; + InputBatchCache generic_set(test_string); + auto data = generic_set.GetAs(); + EXPECT_EQ(data->data()->at(0), "Foo_converted"); + ASSERT_DEATH(generic_set.GetAs(), + "Attempted to convert to two object types!.*"); +} + +TEST(InputBatchCacheTest, ReturnsSingleInput) { + string test_string = "Foo"; + InputBatchCache generic_set(test_string); + auto data = generic_set.GetAs(); + EXPECT_NE(nullptr, data); + auto returned = generic_set.SerializedData(); + EXPECT_EQ(returned.size(), 1); + EXPECT_EQ(returned.at(0), "Foo_converted"); +} + +TEST(InputBatchCacheTest, ConvertsAddedInputDiesAfterGetAs) { + string test_string = "Foo"; + InputBatchCache generic_set; + generic_set.AddData(test_string); + auto data = generic_set.GetAs(); + EXPECT_EQ(data->data()->size(), 1); + EXPECT_EQ(data->data()->at(0), "Foo_converted"); + EXPECT_DEATH(generic_set.AddData("YOU MAY NOT DO THIS AND IT WILL DIE."), + "after the cache has been converted"); +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/interfaces/BUILD b/syntaxnet/dragnn/core/interfaces/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..bcd5e2e445bfef5496a6f5274f54b3790d374412 --- /dev/null +++ b/syntaxnet/dragnn/core/interfaces/BUILD @@ -0,0 +1,37 @@ +package(default_visibility = ["//visibility:public"]) + +cc_library( + name = "cloneable_transition_state", + hdrs = ["cloneable_transition_state.h"], + deps = [":transition_state"], +) + +cc_library( + name = "component", + hdrs = ["component.h"], + deps = [ + ":transition_state", + "//dragnn/components/util:bulk_feature_extractor", + "//dragnn/core:input_batch_cache", + "//dragnn/protos:spec_proto", + "//dragnn/protos:trace_proto", + "//syntaxnet:base", + "//syntaxnet:registry", + ], +) + +cc_library( + name = "input_batch", + hdrs = ["input_batch.h"], + deps = [ + "//syntaxnet:base", + ], +) + +cc_library( + name = "transition_state", + hdrs = ["transition_state.h"], + deps = [ + "//syntaxnet:base", + ], +) diff --git a/syntaxnet/dragnn/core/interfaces/cloneable_transition_state.h b/syntaxnet/dragnn/core/interfaces/cloneable_transition_state.h new file mode 100644 index 0000000000000000000000000000000000000000..da63a0fcbc47e073c2b3eb2530ef04d68f53ea28 --- /dev/null +++ b/syntaxnet/dragnn/core/interfaces/cloneable_transition_state.h @@ -0,0 +1,52 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INTERFACES_CLONEABLE_TRANSITION_STATE_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INTERFACES_CLONEABLE_TRANSITION_STATE_H_ + +#include +#include + +#include "dragnn/core/interfaces/transition_state.h" + +namespace syntaxnet { +namespace dragnn { + +// This defines a TransitionState object that can be used with the Beam class. +// Any class designed to be used with the Beam must inherit from +// CloneableTransitionState, not TransitionState. + +template +class CloneableTransitionState : public TransitionState { + public: + ~CloneableTransitionState() override {} + + // Initialize this TransitionState from a previous TransitionState. The + // ParentBeamIndex is the location of that previous TransitionState in the + // provided beam. + void Init(const TransitionState &parent) override = 0; + + // Return the beam index of the state passed into the initializer of this + // TransitionState. + const int ParentBeamIndex() const override = 0; + + // Get the current beam index for this state. + const int GetBeamIndex() const override = 0; + + // Set the current beam index for this state. + void SetBeamIndex(const int index) override = 0; + + // Get the score associated with this transition state. + const float GetScore() const override = 0; + + // Set the score associated with this transition state. + void SetScore(const float score) override = 0; + + // Depicts this state as an HTML-language string. + string HTMLRepresentation() const override = 0; + + // Produces a new state with the same backing data as this state. + virtual std::unique_ptr Clone() const = 0; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INTERFACES_CLONEABLE_TRANSITION_STATE_H_ diff --git a/syntaxnet/dragnn/core/interfaces/component.h b/syntaxnet/dragnn/core/interfaces/component.h new file mode 100644 index 0000000000000000000000000000000000000000..7475b4682721e4fe7e6e2691fdc3934e6f556c14 --- /dev/null +++ b/syntaxnet/dragnn/core/interfaces/component.h @@ -0,0 +1,126 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INTERFACES_COMPONENT_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INTERFACES_COMPONENT_H_ + +#include + +#include "dragnn/components/util/bulk_feature_extractor.h" +#include "dragnn/core/input_batch_cache.h" +#include "dragnn/core/interfaces/transition_state.h" +#include "dragnn/protos/spec.pb.h" +#include "dragnn/protos/trace.pb.h" +#include "syntaxnet/registry.h" + +namespace syntaxnet { +namespace dragnn { + +class Component : public RegisterableClass { + public: + virtual ~Component() {} + + // Initializes this component from the spec. + virtual void InitializeComponent(const ComponentSpec &spec) = 0; + + // Provides the previous beam to the component. + virtual void InitializeData( + const std::vector> &states, + int max_beam_size, InputBatchCache *input_data) = 0; + + // Returns true if the component has had InitializeData called on it since + // the last time it was reset. + virtual bool IsReady() const = 0; + + // Initializes the component for tracing execution, resetting any existing + // traces. This will typically have the side effect of slowing down all + // subsequent Component calculations and storing a trace in memory that can be + // returned by GetTraceProtos(). + virtual void InitializeTracing() = 0; + + // Disables tracing, freeing any associated traces and avoiding triggering + // additional computation in the future. + virtual void DisableTracing() = 0; + + // Returns the string name of this component. + virtual string Name() const = 0; + + // Returns the current batch size of the component's underlying data. + virtual int BatchSize() const = 0; + + // Returns the maximum beam size of this component. + virtual int BeamSize() const = 0; + + // Returns the number of steps taken by this component so far. + virtual int StepsTaken(int batch_index) const = 0; + + // Return the beam index of the item which is currently at index + // 'index', when the beam was at step 'step', for batch element 'batch'. + virtual int GetBeamIndexAtStep(int step, int current_index, + int batch) const = 0; + + // Return the source index of the item which is currently at index 'index' + // for batch element 'batch'. This index is into the final beam of the + // Component that this Component was initialized from. + virtual int GetSourceBeamIndex(int current_index, int batch) const = 0; + + // Request a translation function based on the given method string. + // The translation function will be called with arguments (beam, batch, value) + // and should return the step index corresponding to the given value, for the + // data in the given beam and batch. + virtual std::function GetStepLookupFunction( + const string &method) = 0; + + // Advances this component from the given transition matrix. + virtual void AdvanceFromPrediction(const float transition_matrix[], + int transition_matrix_length) = 0; + + // Advances this component from the state oracles. + virtual void AdvanceFromOracle() = 0; + + // Returns true if all states within this component are terminal. + virtual bool IsTerminal() const = 0; + + // Returns the current batch of beams for this component. + virtual std::vector> GetBeam() = 0; + + // Extracts and populates the vector of FixedFeatures for the specified + // channel. Each functor allocates storage space for the indices, the IDs, and + // the weights (respectively). + virtual int GetFixedFeatures( + std::function allocate_indices, + std::function allocate_ids, + std::function allocate_weights, + int channel_id) const = 0; + + // Extracts and populates all FixedFeatures for all channels, advancing this + // component via the oracle until it is terminal. This call uses a + // BulkFeatureExtractor object to contain the functors and other information. + virtual int BulkGetFixedFeatures(const BulkFeatureExtractor &extractor) = 0; + + // Extracts and returns the vector of LinkFeatures for the specified + // channel. Note: these are NOT translated. + virtual std::vector GetRawLinkFeatures( + int channel_id) const = 0; + + // Returns a vector of oracle labels for each element in the beam and + // batch. + virtual std::vector> GetOracleLabels() const = 0; + + // Annotate the underlying data object with the results of this Component's + // calculation. + virtual void FinalizeData() = 0; + + // Reset this component. + virtual void ResetComponent() = 0; + + // Get a vector of all traces managed by this component. + virtual std::vector> GetTraceProtos() const = 0; + + // Add the translated link features (done outside the component) to the traces + // managed by this component. + virtual void AddTranslatedLinkFeaturesToTrace( + const std::vector &features, int channel_id) = 0; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INTERFACES_COMPONENT_H_ diff --git a/syntaxnet/dragnn/core/interfaces/input_batch.h b/syntaxnet/dragnn/core/interfaces/input_batch.h new file mode 100644 index 0000000000000000000000000000000000000000..f4e20a0b14f8ec685fdc7f5986a39dd44c48b695 --- /dev/null +++ b/syntaxnet/dragnn/core/interfaces/input_batch.h @@ -0,0 +1,30 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INTERFACES_INPUT_BATCH_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INTERFACES_INPUT_BATCH_H_ + +#include +#include + +#include "syntaxnet/base.h" + +namespace syntaxnet { +namespace dragnn { + +// An InputBatch object converts strings into a given data type. It is used to +// abstract DRAGNN internal data typing. Each internal DRAGNN data type should +// subclass InputBatch, with a public accessor to the type in question. + +class InputBatch { + public: + virtual ~InputBatch() {} + + // Set the data to translate to the subclass' data type. + virtual void SetData(const std::vector &data) = 0; + + // Translate the underlying data back to a vector of strings, as appropriate. + virtual const std::vector GetSerializedData() const = 0; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INTERFACES_INPUT_BATCH_H_ diff --git a/syntaxnet/dragnn/core/interfaces/transition_state.h b/syntaxnet/dragnn/core/interfaces/transition_state.h new file mode 100644 index 0000000000000000000000000000000000000000..a698c64768adaabb44e0fe44e8efd5c3df383938 --- /dev/null +++ b/syntaxnet/dragnn/core/interfaces/transition_state.h @@ -0,0 +1,53 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INTERFACES_TRANSITION_STATE_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INTERFACES_TRANSITION_STATE_H_ + +#include +#include + +#include "syntaxnet/base.h" + +namespace syntaxnet { +namespace dragnn { + +// TransitionState defines the minimal interface required to pass data between +// Component objects. It is used to initialize one Component from the output of +// another, and every backend should define one. Note that inheriting from +// TransitionState directly is not sufficient to use the Beam class, which +// requires extra functionality given by inheriting from the +// ClonableTransitionState interface. (ClonableTransitionState is a subclass +// of TransitionState, so inheriting from ClonableTransitionState is sufficient +// to allow Components to pass your backing states.) + +class TransitionState { + public: + virtual ~TransitionState() {} + + // Initialize this TransitionState from a previous TransitionState. The + // ParentBeamIndex is the location of that previous TransitionState in the + // provided beam. + virtual void Init(const TransitionState &parent) = 0; + + // Return the beam index of the state passed into the initializer of this + // TransitionState. + virtual const int ParentBeamIndex() const = 0; + + // Get the current beam index for this state. + virtual const int GetBeamIndex() const = 0; + + // Set the current beam index for this state. + virtual void SetBeamIndex(const int index) = 0; + + // Get the score associated with this transition state. + virtual const float GetScore() const = 0; + + // Set the score associated with this transition state. + virtual void SetScore(const float score) = 0; + + // Depicts this state as an HTML-language string. + virtual string HTMLRepresentation() const = 0; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_INTERFACES_TRANSITION_STATE_H_ diff --git a/syntaxnet/dragnn/core/interfaces/transition_state_starter_test.cc b/syntaxnet/dragnn/core/interfaces/transition_state_starter_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..80466b9cc71ff15bc62831542a57cb18253c06d8 --- /dev/null +++ b/syntaxnet/dragnn/core/interfaces/transition_state_starter_test.cc @@ -0,0 +1,113 @@ +#include "dragnn/core/test/mock_transition_state.h" +#include +#include "testing/base/public/googletest.h" +#include "testing/base/public/gunit.h" + +// This test suite is intended to validate the contracts that the DRAGNN +// system expects from all transition state subclasses. Developers creating +// new TransitionState subclasses should copy this test and modify it as needed, +// using it to ensure their state conforms to DRAGNN expectations. + +namespace syntaxnet { +namespace dragnn { + +using testing::Return; + +// When this test is instantiated, this function should be changed to +// instantiate a TransitionState subclass of the appropriate type instead +// of Transitionstate-> +std::unique_ptr CreateState() { + std::unique_ptr test_state(new TransitionState()); + return test_state; +} + +// Validates the consistency of the beam index setter and getter. +TEST(TransitionStateInterfaceTest, CanSetAndGetBeamIndex) { + // Create and initialize a test state-> + MockTransitionState mock_state; + auto test_state = CreateState(); + test_state->Init(mock_state); + + constexpr int kOldBeamIndex = 12; + test_state->SetBeamIndex(kOldBeamIndex); + EXPECT_EQ(test_state->GetBeamIndex(), kOldBeamIndex); + + constexpr int kNewBeamIndex = 7; + test_state->SetBeamIndex(kNewBeamIndex); + EXPECT_EQ(test_state->GetBeamIndex(), kNewBeamIndex); +} + +// Validates the consistency of the score setter and getter. +TEST(TransitionStateInterfaceTest, CanSetAndGetScore) { + // Create and initialize a test state-> + MockTransitionState mock_state; + auto test_state = CreateState(); + test_state->Init(mock_state); + + constexpr float kOldScore = 12.1; + test_state->SetScore(kOldScore); + EXPECT_EQ(test_state->GetScore(), kOldScore); + + constexpr float kNewScore = 7.2; + test_state->SetScore(kNewScore); + EXPECT_EQ(test_state->GetScore(), kNewScore); +} + +// This test ensures that the initializing state's current index is saved +// as the parent beam index of the state being initialized. +TEST(TransitionStateInterfaceTest, ReportsParentBeamIndex) { + // Create a mock transition state that wil report a specific current index. + // This index should become the parent state index for the test state-> + MockTransitionState mock_state; + constexpr int kParentBeamIndex = 1138; + EXPECT_CALL(mock_state, GetBeamIndex()) + .WillRepeatedly(Return(kParentBeamIndex)); + + auto test_state = CreateState(); + test_state->Init(mock_state); + EXPECT_EQ(test_state->ParentBeamIndex(), kParentBeamIndex); +} + +// This test ensures that the initializing state's current score is saved +// as the current score of the state being initialized. +TEST(TransitionStateInterfaceTest, InitializationCopiesParentScore) { + // Create a mock transition state that wil report a specific current index. + // This index should become the parent state index for the test state-> + MockTransitionState mock_state; + constexpr float kParentScore = 24.12; + EXPECT_CALL(mock_state, GetScore()).WillRepeatedly(Return(kParentScore)); + + auto test_state = CreateState(); + test_state->Init(mock_state); + EXPECT_EQ(test_state->GetScore(), kParentScore); +} + +// This test ensures that calling Clone maintains the state data (parent beam +// index, beam index, score, etc.) of the state that was cloned. +TEST(TransitionStateInterfaceTest, CloningMaintainsState) { + // Create and initialize the state-> + MockTransitionState mock_state; + constexpr int kParentBeamIndex = 1138; + EXPECT_CALL(mock_state, GetBeamIndex()) + .WillRepeatedly(Return(kParentBeamIndex)); + auto test_state = CreateState(); + test_state->Init(mock_state); + + // Validate the internal state of the test state. + constexpr float kOldScore = 20.0; + test_state->SetScore(kOldScore); + EXPECT_EQ(test_state->GetScore(), kOldScore); + constexpr int kOldBeamIndex = 12; + test_state->SetBeamIndex(kOldBeamIndex); + EXPECT_EQ(test_state->GetBeamIndex(), kOldBeamIndex); + + auto clone = test_state->Clone(); + + // The clone should have identical state to the old state. + EXPECT_EQ(clone->ParentBeamIndex(), kParentBeamIndex); + EXPECT_EQ(clone->GetScore(), kOldScore); + EXPECT_EQ(clone->GetBeamIndex(), kOldBeamIndex); +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/ops/compute_session_op.cc b/syntaxnet/dragnn/core/ops/compute_session_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..c8ea8f4b7c9412d8c63bcec7b7b61e41885a99ce --- /dev/null +++ b/syntaxnet/dragnn/core/ops/compute_session_op.cc @@ -0,0 +1,70 @@ +#include "dragnn/core/ops/compute_session_op.h" + +#include "dragnn/core/compute_session.h" +#include "dragnn/core/resource_container.h" +#include "tensorflow/core/framework/tensor_shape.h" + +namespace syntaxnet { +namespace dragnn { + +using tensorflow::OpKernel; +using tensorflow::OpKernelConstruction; +using tensorflow::OpKernelContext; +using tensorflow::Tensor; +using tensorflow::TensorShape; +using tensorflow::errors::InvalidArgument; + +typedef ResourceContainer ComputeSessionResource; + +ComputeSessionOp::ComputeSessionOp(OpKernelConstruction *context) + : OpKernel(context) { + OP_REQUIRES(context, context->num_inputs() > 0, + InvalidArgument("Must declare at least one input of type string " + "for the ComputeSession handle.")); + OP_REQUIRES(context, context->input_type(0) == tensorflow::DT_STRING, + InvalidArgument("Must declare at least one input of type string " + "for the ComputeSession handle.")); + OP_REQUIRES_OK(context, context->GetAttr("component", &component_name_)); +} + +// Computes extracts the state from the resource manager and calls +// ComputeWithState(). If OutputsHandle() is true, also outputs the handle for +// subsequent ops. +void ComputeSessionOp::Compute(OpKernelContext *context) { + // Validates the input/output tensors and the op attrs. + if (RequiresComponentName()) { + OP_REQUIRES(context, !component_name_.empty(), + InvalidArgument("Required \"component\" attribute is empty.")); + } + if (OutputsHandle()) { + OP_REQUIRES(context, context->num_outputs() > 0, + InvalidArgument( + "Must declare at least one output of type string " + "for the ComputeSession handle if OutputsHandle is true.")); + OP_REQUIRES(context, + context->expected_output_dtype(0) == tensorflow::DT_STRING, + InvalidArgument( + "Must declare at least one output of type string " + "for the ComputeSession handle if OutputsHandle is true.")); + } + + // Gets the relevant ComputeSessionResource and computes with it. + auto handle = context->input(0).vec(); + ComputeSessionResource *session_resource; + OP_REQUIRES_OK(context, + context->resource_manager()->Lookup( + handle(0), handle(1), &session_resource)); + ComputeWithState(context, session_resource->get()); + + // Outputs the passed handle, if necessary, allowing op dependency chains. + if (OutputsHandle()) { + Tensor *output; + OP_REQUIRES_OK(context, + context->allocate_output(0, TensorShape({2}), &output)); + output->vec() = handle; + } + session_resource->Unref(); +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/ops/compute_session_op.h b/syntaxnet/dragnn/core/ops/compute_session_op.h new file mode 100644 index 0000000000000000000000000000000000000000..d95328f401ab554b7ae14cfffe702683d24e0b4d --- /dev/null +++ b/syntaxnet/dragnn/core/ops/compute_session_op.h @@ -0,0 +1,54 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_OPS_COMPUTE_SESSION_OP_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_OPS_COMPUTE_SESSION_OP_H_ + +#include + +#include "dragnn/core/compute_session.h" +#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/tensor.h" + +namespace syntaxnet { +namespace dragnn { + +// Abstract base class: Given a MasterState and a component name, runs some op +// on the component state. The first input is always the handle. If +// OutputsHandle() is true in the derived class, then the first output will also +// be the handle. +class ComputeSessionOp : public tensorflow::OpKernel { + public: + explicit ComputeSessionOp(tensorflow::OpKernelConstruction *context); + + // Virtual Compute()-like function that assumes the state has been extracted + // from the handle. + virtual void ComputeWithState(tensorflow::OpKernelContext *context, + ComputeSession *compute_session) = 0; + + // Compute extracts the state from the resource manager and calls + // ComputeWithState(). If OutputsHandle() is true, also outputs the handle for + // subsequent ops. + void Compute(tensorflow::OpKernelContext *context) override; + + protected: + // If true, then the handle will be the first output of this op. + virtual bool OutputsHandle() const = 0; + + // If true, then the constructor will check that the "component_name" + // attribute is set. + virtual bool RequiresComponentName() const = 0; + + // Returns the component name. + string component_name() const { + CHECK(RequiresComponentName()); + return component_name_; + } + + private: + // Name of the component used by this op. + string component_name_; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_OPS_COMPUTE_SESSION_OP_H_ diff --git a/syntaxnet/dragnn/core/ops/dragnn_bulk_op_kernels.cc b/syntaxnet/dragnn/core/ops/dragnn_bulk_op_kernels.cc new file mode 100644 index 0000000000000000000000000000000000000000..51c05ded669e24d26539f1787cfd91fda62f70ab --- /dev/null +++ b/syntaxnet/dragnn/core/ops/dragnn_bulk_op_kernels.cc @@ -0,0 +1,396 @@ +#include +#include +#include +#include + +#include "dragnn/core/ops/compute_session_op.h" +#include "dragnn/core/resource_container.h" +#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" +#include "tensorflow/core/framework/numeric_types.h" +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_shape.h" +#include "tensorflow/core/framework/types.pb.h" +#include "tensorflow/core/lib/core/status.h" +#include "tensorflow/core/platform/types.h" + +using std::vector; + +using tensorflow::DEVICE_CPU; +using tensorflow::DT_FLOAT; +using tensorflow::DT_INT32; +using tensorflow::DT_INT64; +using tensorflow::DT_STRING; +using tensorflow::DataType; +using tensorflow::OpKernel; +using tensorflow::OpKernelConstruction; +using tensorflow::OpKernelContext; +using tensorflow::quint8; +using tensorflow::Status; +using tensorflow::Tensor; +using tensorflow::TensorShape; +using tensorflow::uint8; + +namespace syntaxnet { +namespace dragnn { + +namespace { + +// Helper struct for resource manager. +struct VectorTriple { + std::unique_ptr>>> + index_vectors; + std::unique_ptr>>> id_vectors; + std::unique_ptr>>> + weight_vectors; +}; + +} // namespace + +typedef ResourceContainer VectorTripleResource; + +// See docstring in dragnn_bulk_ops.cc. +class BulkFixedFeatures : public ComputeSessionOp { + public: + explicit BulkFixedFeatures(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK(context, context->GetAttr("num_channels", &num_channels_)); + + // Input: state handle. + vector input_types(1, DT_STRING); + + // Output: indices, ids and weights for every fixed feature channel. + vector output_types; + output_types.push_back(DT_STRING); + for (int c = 0; c < num_channels_; ++c) output_types.push_back(DT_INT32); + for (int c = 0; c < num_channels_; ++c) output_types.push_back(DT_INT64); + for (int c = 0; c < num_channels_; ++c) output_types.push_back(DT_FLOAT); + output_types.push_back(DT_INT32); + OP_REQUIRES_OK(context, context->MatchSignature(input_types, output_types)); + } + + bool OutputsHandle() const override { return true; } + bool RequiresComponentName() const override { return true; } + + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + constexpr int kTensorOffset = 1; + auto indices_allocator = [context, kTensorOffset](int channel, + int num_elements) { + Tensor *output; + CHECK(context + ->allocate_output(channel + kTensorOffset, + TensorShape({num_elements}), &output) + .ok()); + return output->vec().data(); + }; + + const int num_channels = num_channels_; + auto ids_allocator = [context, num_channels, kTensorOffset]( + int channel, int num_elements) { + Tensor *output; + CHECK(context + ->allocate_output(num_channels + channel + kTensorOffset, + TensorShape({num_elements}), &output) + .ok()); + return output->vec().data(); + }; + auto weights_allocator = [context, num_channels, kTensorOffset]( + int channel, int num_elements) { + Tensor *output; + CHECK(context + ->allocate_output(2 * num_channels + channel + kTensorOffset, + TensorShape({num_elements}), &output) + .ok()); + return output->vec().data(); + }; + + BulkFeatureExtractor extractor(indices_allocator, ids_allocator, + weights_allocator); + + int num_steps = session->BulkGetInputFeatures(component_name(), extractor); + VLOG(2) << "Extracted " << num_steps; + Tensor *num_steps_tensor; + OP_REQUIRES_OK( + context, context->allocate_output(3 * num_channels_ + 1, + TensorShape({}), &num_steps_tensor)); + num_steps_tensor->scalar()() = num_steps; + } + + private: + // Number of fixed feature channels. + int num_channels_; +}; + +REGISTER_KERNEL_BUILDER(Name("BulkFixedFeatures").Device(DEVICE_CPU), + BulkFixedFeatures); + +// See docstring in dragnn_bulk_ops.cc. +class BulkFixedEmbeddings : public ComputeSessionOp { + public: + explicit BulkFixedEmbeddings(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK(context, context->GetAttr("num_channels", &num_channels_)); + + // Input: state handle. + vector input_types; + input_types.push_back(DT_STRING); + for (int c = 0; c < num_channels_; ++c) input_types.push_back(DT_FLOAT); + const vector output_types = {DT_STRING, DT_FLOAT, DT_INT32}; + OP_REQUIRES_OK(context, context->MatchSignature(input_types, output_types)); + OP_REQUIRES_OK(context, context->GetAttr("pad_to_batch", &pad_to_batch_)); + OP_REQUIRES_OK(context, context->GetAttr("pad_to_steps", &pad_to_steps_)); + use_padding_ = (pad_to_steps_ != -1) || (pad_to_batch_ != -1); + VLOG(2) << "Created a BulkFixedEmbeddings with use_padding = " + << use_padding_; + } + + bool OutputsHandle() const override { return true; } + bool RequiresComponentName() const override { return true; } + + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + const int batch_size = session->BatchSize(component_name()); + tensorflow::ResourceMgr *rmgr = context->resource_manager(); + + // Create the pool for this container, or re-use one that was allocated in a + // previous call. + auto create = [this](VectorTripleResource **resource) { + LOG(INFO) << "Creating new VectorTripleResource"; + std::unique_ptr triple(new VectorTriple()); + *resource = new VectorTripleResource(std::move(triple)); + (*resource)->get()->index_vectors.reset( + new std::vector>>(num_channels_)); + (*resource)->get()->id_vectors.reset( + new std::vector>>(num_channels_)); + (*resource)->get()->weight_vectors.reset( + new std::vector>>(num_channels_)); + for (int i = 0; i < num_channels_; ++i) { + (*resource)->get()->index_vectors->at(i).reset( + new std::vector()); + (*resource)->get()->id_vectors->at(i).reset(new std::vector()); + (*resource)->get()->weight_vectors->at(i).reset( + new std::vector()); + } + return Status::OK(); + }; + + VectorTripleResource *vector_triple; + auto handle = context->input(0).vec(); + OP_REQUIRES_OK(context, rmgr->LookupOrCreate( + handle(0), handle(1), &vector_triple, create)); + + std::vector>> *indices = + vector_triple->get()->index_vectors.get(); + std::vector>> *ids = + vector_triple->get()->id_vectors.get(); + std::vector>> *weights = + vector_triple->get()->weight_vectors.get(); + + auto indices_allocator = [context, &indices](int channel, int size) { + (*indices)[channel]->resize(size); + return (*indices)[channel]->data(); + }; + auto ids_allocator = [context, &ids](int channel, int size) { + (*ids)[channel]->resize(size); + return (*ids)[channel]->data(); + }; + auto weights_allocator = [context, &weights](int channel, int size) { + (*weights)[channel]->resize(size); + return (*weights)[channel]->data(); + }; + + BulkFeatureExtractor extractor(indices_allocator, ids_allocator, + weights_allocator, use_padding_, + pad_to_steps_, pad_to_batch_); + + int num_steps = session->BulkGetInputFeatures(component_name(), extractor); + VLOG(2) << "Extracted " << num_steps; + + Tensor *num_steps_tensor; + OP_REQUIRES_OK(context, context->allocate_output(2, TensorShape({}), + &num_steps_tensor)); + num_steps_tensor->scalar()() = num_steps; + + // Looks up and outputs embedding vectors. + const auto &spec = session->Spec(component_name()); + + int embedding_size = 0; + for (int channel = 0; channel < num_channels_; ++channel) { + embedding_size += context->input(1 + channel).shape().dim_size(1) * + spec.fixed_feature(channel).size(); + } + + const int padded_batch = std::max(pad_to_batch_, batch_size); + const int padded_num_steps = std::max(pad_to_steps_, num_steps); + Tensor *embedding_vectors; + OP_REQUIRES_OK( + context, + context->allocate_output( + 1, TensorShape({padded_num_steps * padded_batch, embedding_size}), + &embedding_vectors)); + embedding_vectors->flat().setZero(); + + int channel_offset = 0; + for (int channel = 0; channel < num_channels_; ++channel) { + ExtractForChannel(*(indices->at(channel)), *(ids->at(channel)), + *(weights->at(channel)), channel_offset, + context->input(1 + channel), embedding_vectors); + channel_offset += context->input(1 + channel).shape().dim_size(1) * + spec.fixed_feature(channel).size(); + } + vector_triple->Unref(); + } + + private: + void ExtractForChannel(const std::vector &indices, + const std::vector &ids, + const std::vector &weights, int channel_base, + const Tensor &embeddings, Tensor *output) { + // Just turn this into a feature-size matrix, then the index is just the + // X coordinate into it. Run up the row (known length!) and sum. + int num_elements = output->shape().dim_size(0); + int embedding_length = embeddings.shape().dim_size(1); + VLOG(2) << "Num elements: " << num_elements; + VLOG(2) << "Embedding length: " << embedding_length; + auto output_matrix = output->matrix(); + auto embedding_matrix = embeddings.matrix(); + VLOG(2) << "Channel base:" << channel_base; + for (int i = 0; i < indices.size(); ++i) { + VLOG(2) << "Feature: ind:" << indices[i] << ", id: " << ids[i] + << ", wt: " << weights[i]; + int y_base = + (indices[i] / num_elements) * embedding_length + channel_base; + int x_base = indices[i] % num_elements; + VLOG(2) << "Extracting to (x,y) = (" << x_base << "," << y_base << ")"; + for (int j = 0; j < embedding_length; ++j) { + output_matrix(x_base, y_base + j) += + embedding_matrix(ids[i], j) * weights[i]; + } + } + } + + // Number of fixed feature channels. + int num_channels_; + + // Will pad output to at least this many batch elements. + int pad_to_batch_ = -1; + + // Will pad output to at least this many steps. + int pad_to_steps_ = -1; + + // Set if either pad_to_batch or pad_to_steps is not -1. + bool use_padding_ = false; + + TF_DISALLOW_COPY_AND_ASSIGN(BulkFixedEmbeddings); +}; + +REGISTER_KERNEL_BUILDER(Name("BulkFixedEmbeddings").Device(DEVICE_CPU), + BulkFixedEmbeddings); + +// See docstring in dragnn_bulk_ops.cc. +class BulkAdvanceFromOracle : public ComputeSessionOp { + public: + explicit BulkAdvanceFromOracle(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK(context, + context->MatchSignature({DT_STRING}, {DT_STRING, DT_INT32})); + } + + bool OutputsHandle() const override { return true; } + bool RequiresComponentName() const override { return true; } + + // Advances all transition states along the oracle path. + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + const int batch_size = session->BatchSize(component_name()); + const int beam_size = session->BeamSize(component_name()); + const int num_items = batch_size * beam_size; + vector>> gold; + + int num_steps = 0; + while (!session->IsTerminal(component_name())) { + gold.emplace_back(session->EmitOracleLabels(component_name())); + + // Advance the component. + session->AdvanceFromOracle(component_name()); + ++num_steps; + } + + // Fills output tensor with oracle labels where possible, or -1. + Tensor *gold_output; + OP_REQUIRES_OK(context, + context->allocate_output( + 1, TensorShape({num_items * num_steps}), &gold_output)); + int item = 0; + for (int batch_ix = 0; batch_ix < batch_size; ++batch_ix) { + for (int beam_ix = 0; beam_ix < beam_size; ++beam_ix, ++item) { + for (int step = 0; step < num_steps; ++step) { + gold_output->vec()(item * num_steps + step) = + step < gold.size() ? gold[step][batch_ix][beam_ix] : -1; + } + } + } + } + + private: + TF_DISALLOW_COPY_AND_ASSIGN(BulkAdvanceFromOracle); +}; + +REGISTER_KERNEL_BUILDER(Name("BulkAdvanceFromOracle").Device(DEVICE_CPU), + BulkAdvanceFromOracle); + +// See docstring in dragnn_bulk_ops.cc. +template +class BulkAdvanceFromPrediction : public ComputeSessionOp { + public: + explicit BulkAdvanceFromPrediction(OpKernelConstruction *context) + : ComputeSessionOp(context) { + const DataType dt = tensorflow::DataTypeToEnum::v(); + OP_REQUIRES_OK(context, + context->MatchSignature({DT_STRING, dt}, {DT_STRING})); + } + + bool OutputsHandle() const override { return true; } + bool RequiresComponentName() const override { return true; } + + // Advances all transition states as much as possible using the given scores. + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + const Tensor &scores_tensor = context->input(1); + const auto &scores = scores_tensor.matrix(); + const int num_items = (session->BatchSize(component_name()) * + session->BeamSize(component_name())); + const int num_actions = scores_tensor.shape().dim_size(1); + const int num_steps = scores_tensor.shape().dim_size(0) / num_items; + vector scores_per_step(num_items * num_actions); + for (int step = 0; step < num_steps; ++step) { + for (int item = 0; item < num_items; ++item) { + for (int action = 0; action < num_actions; ++action) { + scores_per_step[item * num_actions + action] = + scores(item * num_steps + step, action); + } + } + if (!session->IsTerminal(component_name())) { + session->AdvanceFromPrediction(component_name(), scores_per_step.data(), + scores_per_step.size()); + } + } + } + + private: + TF_DISALLOW_COPY_AND_ASSIGN(BulkAdvanceFromPrediction); +}; + +#define REGISTER_BULK_ADVANCE(type) \ + REGISTER_KERNEL_BUILDER(Name("BulkAdvanceFromPrediction") \ + .Device(DEVICE_CPU) \ + .TypeConstraint("T"), \ + BulkAdvanceFromPrediction) + +REGISTER_BULK_ADVANCE(float); +REGISTER_BULK_ADVANCE(quint8); +REGISTER_BULK_ADVANCE(uint8); + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/ops/dragnn_bulk_op_kernels_test.cc b/syntaxnet/dragnn/core/ops/dragnn_bulk_op_kernels_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..355d99fbf025bc58735c8ac17e8e1df6842be460 --- /dev/null +++ b/syntaxnet/dragnn/core/ops/dragnn_bulk_op_kernels_test.cc @@ -0,0 +1,588 @@ +#include "dragnn/components/util/bulk_feature_extractor.h" +#include "dragnn/core/compute_session_pool.h" +#include "dragnn/core/resource_container.h" +#include "dragnn/core/test/mock_compute_session.h" + +#include +#include "tensorflow/core/framework/fake_input.h" +#include "tensorflow/core/framework/node_def_builder.h" +#include "tensorflow/core/kernels/ops_testutil.h" + +namespace syntaxnet { +namespace dragnn { + +using tensorflow::AllocatorAttributes; +using tensorflow::DT_FLOAT; +using tensorflow::DT_STRING; +using tensorflow::FrameAndIter; +using tensorflow::NodeDefBuilder; +using tensorflow::OpKernelContext; +using tensorflow::ResourceMgr; +using tensorflow::ScopedStepContainer; +using tensorflow::Status; +using tensorflow::TensorShape; +using tensorflow::checkpoint::TensorSliceReaderCacheWrapper; +using tensorflow::test::SetOutputAttrs; + +using testing::Return; +using testing::_; + +typedef ResourceContainer ComputeSessionResource; +typedef ResourceContainer ComputeSessionPoolResource; + +class DragnnBulkOpKernelsTest : public tensorflow::OpsTestBase { + public: + static const int kEmbeddingSize = 2; + static const int kNumActions = 3; + static const int kNumChannels = 2; + static const int kNumIds = 8; + static const int kNumItems = 3; + static const int kNumSteps = 3; + const string kComponentName = "TESTING_COMPONENT_NAME"; + + MockComputeSession *GetMockSession() { + TF_CHECK_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + const string id_string = "id_str"; + AddInputFromList(TensorShape({2}), {container_string, id_string}); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + // Create a MockComputeSession and set expectations. + std::unique_ptr mock_session(new MockComputeSession()); + MockComputeSession *mock_session_ptr = mock_session.get(); + + // Wrap the ComputeSessionResource and put it into the resource manager. + TF_CHECK_OK(resource_mgr()->Create( + container_string, id_string, + new ComputeSessionResource(std::move(mock_session)))); + return mock_session_ptr; + } + + void ResetOpKernelContext() { + params_.reset(new OpKernelContext::Params); + params_->device = device_.get(); + params_->frame_iter = FrameAndIter(0, 0); + params_->inputs = &inputs_; + params_->op_kernel = kernel_.get(); + step_container_.reset(new ScopedStepContainer(0, [](const string &) {})); + params_->step_container = step_container_.get(); + attrs_.clear(); + SetOutputAttrs(params_.get(), &attrs_); + TensorSliceReaderCacheWrapper slice_reader_cache_wrapper; + params_->slice_reader_cache = &slice_reader_cache_wrapper; + params_->resource_manager = device_->resource_manager(); + context_.reset(new OpKernelContext(params_.get())); + } + + Status RunOpKernelWithContext() { + device_->Compute(kernel_.get(), context_.get()); + return context_->status(); + } + + // Accessor for the underlying resource manager. + ResourceMgr *resource_mgr() { return params_->resource_manager; } + /* + // Returns a vector with dimensions: channel x batch x step. + // For each item we return features for three steps: + // feature step 0: (5, 1) + // feature step 1: (5, 0.5), (6, 0.7) + // feature step 2: (3, 0.1), (7, [empty]) <- Default weight is 1.0. + void ExpectFeatures(MockComputeSession *mock_session) { + vector feature_step_zero, feature_step_one, feature_step_two; + for (int item = 0; item < kNumItems; ++item) { + feature_step_zero.emplace_back(); + feature_step_zero.back().add_id(5); + feature_step_zero.back().add_weight(1.0); + feature_step_one.emplace_back(); + feature_step_one.back().add_id(5); + feature_step_one.back().add_weight(0.5); + feature_step_one.back().add_id(6); + feature_step_one.back().add_weight(0.7); + feature_step_two.emplace_back(); + feature_step_two.back().add_id(3); + feature_step_two.back().add_weight(0.1); + feature_step_two.back().add_id(7); + } + for (int channel = 0; channel < kNumChannels; ++channel) { + EXPECT_CALL(*mock_session, GetInputFeatures(kComponentName, channel)) + .Times(3) + .WillOnce(Return(feature_step_zero)) + .WillOnce(Return(feature_step_one)) + .WillOnce(Return(feature_step_two)); + } + } + + // Returns a vector with dimensions: channel x batch x step. + // For each item we return features for three steps with ids only: + // feature step 0: id=5 + // feature step 1: id=6 + // feature step 2: id=3 + void ExpectFeatureIds(MockComputeSession *mock_session) { + vector feature_step_zero, feature_step_one, feature_step_two; + for (int item = 0; item < kNumItems; ++item) { + feature_step_zero.emplace_back(); + feature_step_zero.back().add_id(5); + feature_step_one.emplace_back(); + feature_step_one.back().add_id(6); + feature_step_two.emplace_back(); + feature_step_two.back().add_id(3); + } + for (int channel = 0; channel < kNumChannels; ++channel) { + EXPECT_CALL(*mock_session, GetInputFeatures(kComponentName, channel)) + .Times(3) + .WillOnce(Return(feature_step_zero)) + .WillOnce(Return(feature_step_one)) + .WillOnce(Return(feature_step_two)); + } + } + */ + // This needs to maintain its existence throughout the compute call. + std::vector attrs_; +}; + +const int DragnnBulkOpKernelsTest::kEmbeddingSize; +const int DragnnBulkOpKernelsTest::kNumActions; +const int DragnnBulkOpKernelsTest::kNumChannels; +const int DragnnBulkOpKernelsTest::kNumIds; +const int DragnnBulkOpKernelsTest::kNumItems; +const int DragnnBulkOpKernelsTest::kNumSteps; + +// The ExtractFixedFeatures op should return a set of fixed feature vectors +// as described below. +TEST_F(DragnnBulkOpKernelsTest, BulkFixedFeatures) { + // Create and initialize the kernel under test. + TF_ASSERT_OK( + NodeDefBuilder("BulkFixedFeatures", "BulkFixedFeatures") + .Attr("component", kComponentName) + .Attr("num_channels", kNumChannels) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Finalize(node_def())); + + MockComputeSession *mock_session = GetMockSession(); + const std::vector expected_indices({0, 2, 1, 0, 1}); + const std::vector expected_ids({5, 5, 6, 3, 7}); + const std::vector expected_weights({1.0, 0.5, 0.7, 0.1, 1.0}); + + // This function takes the allocator functions passed into GetBulkFF, uses + // them to allocate a tensor, then fills that tensor based on channel. + auto assigner_function = [=](string, const BulkFeatureExtractor &extractor) { + constexpr int kFeatureCount = 3; + constexpr int kTotalFeatures = 5; + constexpr int kNumSteps = 3; + for (int i = 0; i < kNumChannels; ++i) { + // Allocate a new tensor set for every channel. + int32 *indices = + extractor.AllocateIndexMemory(i, kTotalFeatures * kNumSteps); + int64 *ids = extractor.AllocateIdMemory(i, kTotalFeatures * kNumSteps); + float *weights = + extractor.AllocateWeightMemory(i, kTotalFeatures * kNumSteps); + + // Fill the tensor. + int array_index = 0; + for (int step = 0; step < kNumSteps; step++) { + for (int j = 0; j < kTotalFeatures; ++j) { + int offset = i + 1; + indices[array_index] = + (expected_indices[j] + step * kFeatureCount) * offset; + ids[array_index] = expected_ids[j] * offset; + weights[array_index] = expected_weights[j] * offset; + ++array_index; + } + } + } + return kNumSteps; + }; + + EXPECT_CALL(*mock_session, BulkGetInputFeatures(kComponentName, _)) + .WillOnce(testing::Invoke(assigner_function)); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); + + // Validate the outputs. + // In this case, for every channel we should have: + // indices = [0 , 2 , 1 , 0 , 1 ] + // [3 , 5 , 4 , 3 , 4 ] + // [6 , 8 , 7 , 6 , 7 ] + // ids = [5 , 5 , 6 , 3 , 7 ] + // [5 , 5 , 6 , 3 , 7 ] + // [5 , 5 , 6 , 3 , 7 ] + // weights = [1.0, 0.5, 0.7, 0.1, 1.0] + // [1.0, 0.5, 0.7, 0.1, 1.0] + // [1.0, 0.5, 0.7, 0.1, 1.0] + + for (int i = 0; i < kNumChannels * 3; ++i) { + EXPECT_EQ(expected_indices.size() * kNumSteps, + GetOutput(i + 1)->NumElements()); + } + for (int channel = 0; channel < kNumChannels; ++channel) { + LOG(INFO) << "Channel " << channel; + for (int step = 0; step < kNumSteps; ++step) { + for (int i = 0; i < expected_indices.size(); ++i) { + const int j = i + step * expected_indices.size(); + + // Note that the expectation on the indices changes per step, unlike the + // expectation for ids and weights. + int offset = channel + 1; + EXPECT_EQ((expected_indices[i] + step * kNumItems) * offset, + GetOutput(channel + 1)->vec()(j)); + EXPECT_EQ(expected_ids[i] * offset, + GetOutput(kNumChannels + channel + 1)->vec()(j)); + EXPECT_EQ(expected_weights[i] * offset, + GetOutput(2 * kNumChannels + channel + 1)->vec()(j)); + } + } + } + EXPECT_EQ(kNumSteps, GetOutput(3 * kNumChannels + 1)->scalar()()); +} + +TEST_F(DragnnBulkOpKernelsTest, BulkFixedEmbeddings) { + // Create and initialize the kernel under test. + TF_ASSERT_OK( + NodeDefBuilder("BulkFixedEmbeddings", "BulkFixedEmbeddings") + .Attr("component", kComponentName) + .Attr("num_channels", kNumChannels) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Input(FakeInput(DT_FLOAT)) // Embedding matrices. + .Finalize(node_def())); + MockComputeSession *mock_session = GetMockSession(); + ComponentSpec spec; + spec.set_name(kComponentName); + auto chan0_spec = spec.add_fixed_feature(); + chan0_spec->set_size(2); + auto chan1_spec = spec.add_fixed_feature(); + chan1_spec->set_size(1); + EXPECT_CALL(*mock_session, Spec(kComponentName)) + .WillOnce(testing::ReturnRef(spec)); + + EXPECT_CALL(*mock_session, BatchSize(kComponentName)) + .WillOnce(Return(kNumItems)); + + const std::vector feature_step_1({0, 1, 2, 1, 2, 2, 1, 0, 1, 0}); + const std::vector feature_index_1({0, 0, 0, 0, 0, 1, 1, 1, 1, 1}); + const std::vector feature_ids_1({5, 6, 3, 5, 7, 5, 6, 3, 5, 7}); + const std::vector feature_weights_1( + {1.0, 0.7, 0.1, 0.5, 1.0, 10, 7, 1, 5, 10}); + + const std::vector feature_step_2({0, 1, 2, 1, 2}); + const std::vector feature_index_2({0, 0, 0, 0, 0}); + const std::vector feature_ids_2({5, 6, 3, 5, 7}); + const std::vector feature_weights_2({1.0, 0.7, 0.1, 0.5, 1.0}); + + const std::vector> feature_steps_by_channel( + {feature_step_1, feature_step_2}); + const std::vector> feature_index_by_channel( + {feature_index_1, feature_index_2}); + const std::vector> feature_ids_by_channel( + {feature_ids_1, feature_ids_2}); + const std::vector> feature_weights_by_channel( + {feature_weights_1, feature_weights_2}); + + // This function takes the allocator functions passed into GetBulkFF, uses + // them to allocate a tensor, then fills that tensor based on channel. + auto assigner_function = [=](string, const BulkFeatureExtractor &extractor) { + constexpr int kNumElements = 3; + constexpr int kNumSteps = 3; + for (int i = 0; i < kNumChannels; ++i) { + auto feature_step = feature_steps_by_channel.at(i); + auto feature_index = feature_index_by_channel.at(i); + auto feature_ids = feature_ids_by_channel.at(i); + auto feature_weights = feature_weights_by_channel.at(i); + + // Allocate a new tensor set for every channel. + int32 *indices = + extractor.AllocateIndexMemory(i, kNumElements * feature_step.size()); + int64 *ids = + extractor.AllocateIdMemory(i, kNumElements * feature_step.size()); + float *weights = + extractor.AllocateWeightMemory(i, kNumElements * feature_step.size()); + + // Fill the tensor. + int array_index = 0; + + for (int element = 0; element < kNumElements; ++element) { + for (int feature = 0; feature < feature_step.size(); ++feature) { + indices[array_index] = extractor.GetIndex( + kNumSteps, kNumElements, feature_index[feature], element, + feature_step[feature]); + ids[array_index] = feature_ids[feature]; + weights[array_index] = feature_weights[feature]; + ++array_index; + } + } + } + return kNumSteps; + }; + + EXPECT_CALL(*mock_session, BulkGetInputFeatures(kComponentName, _)) + .WillOnce(testing::Invoke(assigner_function)); + + // Embedding matrices as additional inputs. + // For channel 0, the embeddings are [id, 0]. + // For channel 1, the embeddings are [0, id]. + vector embedding_matrix_a; + vector embedding_matrix_b; + for (int id = 0; id < kNumIds; ++id) { + embedding_matrix_a.push_back(id); + embedding_matrix_a.push_back(0); + embedding_matrix_b.push_back(0); + embedding_matrix_b.push_back(id); + } + AddInputFromArray(TensorShape({8, 2}), embedding_matrix_a); + AddInputFromArray(TensorShape({8, 2}), embedding_matrix_b); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); + + // Validate the outputs. + // In this case we should have, for every item, these three steps: + const vector> expected_embeddings = {{5.0, 0, 73, 0, 0, 5.0}, + {6.7, 0, 67, 0, 0, 6.7}, + {7.3, 0, 50, 0, 0, 7.3}}; + EXPECT_EQ(kNumSteps * kNumItems, GetOutput(1)->shape().dim_size(0)); + constexpr int kNumFeatures = 3; + EXPECT_EQ(kNumFeatures * kEmbeddingSize, GetOutput(1)->shape().dim_size(1)); + for (int item = 0; item < kNumItems; ++item) { + for (int step = 0; step < kNumSteps; ++step) { + for (int col = 0; col < kNumChannels * kEmbeddingSize; ++col) { + const int row = item * kNumSteps + step; + EXPECT_EQ(expected_embeddings[step][col], + GetOutput(1)->matrix()(row, col)) + << "step: " << step << ", row: " << row << ", col: " << col; + } + } + } + + EXPECT_EQ(kNumSteps, GetOutput(2)->scalar()()); +} + +TEST_F(DragnnBulkOpKernelsTest, BulkFixedEmbeddingsWithPadding) { + // Create and initialize the kernel under test. + constexpr int kPaddedNumSteps = 5; + constexpr int kPaddedBatchSize = 4; + TF_ASSERT_OK( + NodeDefBuilder("BulkFixedEmbeddings", "BulkFixedEmbeddings") + .Attr("component", kComponentName) + .Attr("num_channels", kNumChannels) + .Attr("pad_to_steps", kPaddedNumSteps) + .Attr("pad_to_batch", kPaddedBatchSize) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Input(FakeInput(DT_FLOAT)) // Embedding matrices. + .Finalize(node_def())); + MockComputeSession *mock_session = GetMockSession(); + ComponentSpec spec; + spec.set_name(kComponentName); + auto chan0_spec = spec.add_fixed_feature(); + chan0_spec->set_size(2); + auto chan1_spec = spec.add_fixed_feature(); + chan1_spec->set_size(1); + EXPECT_CALL(*mock_session, Spec(kComponentName)) + .WillOnce(testing::ReturnRef(spec)); + + EXPECT_CALL(*mock_session, BatchSize(kComponentName)) + .WillOnce(Return(kNumItems)); + + const std::vector feature_step_1({0, 1, 2, 1, 2, 2, 1, 0, 1, 0}); + const std::vector feature_index_1({0, 0, 0, 0, 0, 1, 1, 1, 1, 1}); + const std::vector feature_ids_1({5, 6, 3, 5, 7, 5, 6, 3, 5, 7}); + const std::vector feature_weights_1( + {1.0, 0.7, 0.1, 0.5, 1.0, 10, 7, 1, 5, 10}); + + const std::vector feature_step_2({0, 1, 2, 1, 2}); + const std::vector feature_index_2({0, 0, 0, 0, 0}); + const std::vector feature_ids_2({5, 6, 3, 5, 7}); + const std::vector feature_weights_2({1.0, 0.7, 0.1, 0.5, 1.0}); + + const std::vector> feature_steps_by_channel( + {feature_step_1, feature_step_2}); + const std::vector> feature_index_by_channel( + {feature_index_1, feature_index_2}); + const std::vector> feature_ids_by_channel( + {feature_ids_1, feature_ids_2}); + const std::vector> feature_weights_by_channel( + {feature_weights_1, feature_weights_2}); + + // This function takes the allocator functions passed into GetBulkFF, uses + // them to allocate a tensor, then fills that tensor based on channel. + auto assigner_function = [=](string, const BulkFeatureExtractor &extractor) { + constexpr int kNumElements = 3; + constexpr int kNumSteps = 3; + for (int i = 0; i < kNumChannels; ++i) { + auto feature_step = feature_steps_by_channel.at(i); + auto feature_index = feature_index_by_channel.at(i); + auto feature_ids = feature_ids_by_channel.at(i); + auto feature_weights = feature_weights_by_channel.at(i); + + // Allocate a new tensor set for every channel. + int32 *indices = + extractor.AllocateIndexMemory(i, kNumElements * feature_step.size()); + int64 *ids = + extractor.AllocateIdMemory(i, kNumElements * feature_step.size()); + float *weights = + extractor.AllocateWeightMemory(i, kNumElements * feature_step.size()); + + // Fill the tensor. + int array_index = 0; + + for (int element = 0; element < kNumElements; ++element) { + for (int feature = 0; feature < feature_step.size(); ++feature) { + indices[array_index] = extractor.GetIndex( + kNumSteps, kNumElements, feature_index[feature], element, + feature_step[feature]); + ids[array_index] = feature_ids[feature]; + weights[array_index] = feature_weights[feature]; + ++array_index; + } + } + } + return kNumSteps; + }; + + EXPECT_CALL(*mock_session, BulkGetInputFeatures(kComponentName, _)) + .WillOnce(testing::Invoke(assigner_function)); + + // Embedding matrices as additional inputs. + // For channel 0, the embeddings are [id, 0]. + // For channel 1, the embeddings are [0, id]. + vector embedding_matrix_a; + vector embedding_matrix_b; + for (int id = 0; id < kNumIds; ++id) { + embedding_matrix_a.push_back(id); + embedding_matrix_a.push_back(0); + embedding_matrix_b.push_back(0); + embedding_matrix_b.push_back(id); + } + AddInputFromArray(TensorShape({8, 2}), embedding_matrix_a); + AddInputFromArray(TensorShape({8, 2}), embedding_matrix_b); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); + + // Validate the outputs. + // In this case we should have, for every item, these three steps: + const vector> expected_embeddings = {{5.0, 0, 73, 0, 0, 5.0}, + {6.7, 0, 67, 0, 0, 6.7}, + {7.3, 0, 50, 0, 0, 7.3}}; + EXPECT_EQ(kPaddedNumSteps * kPaddedBatchSize, + GetOutput(1)->shape().dim_size(0)); + + constexpr int kNumFeatures = 3; + EXPECT_EQ(kNumFeatures * kEmbeddingSize, GetOutput(1)->shape().dim_size(1)); + for (int item = 0; item < kNumItems; ++item) { + for (int step = 0; step < kNumSteps; ++step) { + for (int col = 0; col < kNumChannels * kEmbeddingSize; ++col) { + const int row = item * kPaddedNumSteps + step; + EXPECT_EQ(expected_embeddings[step][col], + GetOutput(1)->matrix()(row, col)) + << "step: " << step << ", row: " << row << ", col: " << col; + } + } + } + + EXPECT_EQ(kNumSteps, GetOutput(2)->scalar()()); +} + +TEST_F(DragnnBulkOpKernelsTest, BulkAdvanceFromOracle) { + // Create and initialize the kernel under test. + TF_ASSERT_OK( + NodeDefBuilder("BulkAdvanceFromOracle", "BulkAdvanceFromOracle") + .Attr("component", kComponentName) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Finalize(node_def())); + MockComputeSession *mock_session = GetMockSession(); + EXPECT_CALL(*mock_session, IsTerminal(kComponentName)) + .WillOnce(Return(false)) + .WillOnce(Return(false)) + .WillOnce(Return(false)) + .WillOnce(Return(true)); + EXPECT_CALL(*mock_session, AdvanceFromOracle(kComponentName)) + .Times(kNumSteps); + const vector>> gold = { + {{1}, {1}, {1}}, {{2}, {2}, {2}}, {{3}, {3}, {3}}, + }; + EXPECT_CALL(*mock_session, EmitOracleLabels(kComponentName)) + .WillOnce(Return(gold[0])) + .WillOnce(Return(gold[1])) + .WillOnce(Return(gold[2])); + EXPECT_CALL(*mock_session, BeamSize(kComponentName)).WillOnce(Return(1)); + EXPECT_CALL(*mock_session, BatchSize(kComponentName)) + .WillOnce(Return(kNumItems)); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); + + // Validate the outputs. + // For every item we should have: + const vector expected_gold = {1, 2, 3}; + EXPECT_EQ(kNumSteps * kNumItems, GetOutput(1)->NumElements()); + for (int item = 0; item < kNumItems; ++item) { + for (int step = 0; step < kNumSteps; ++step) { + EXPECT_EQ(expected_gold[step], + GetOutput(1)->vec()(step + item * kNumSteps)); + } + } +} + +string ArrayToString(const float *array, const int size) { + string str = "[ "; + for (int i = 0; i < size; ++i) { + str += tensorflow::strings::Printf("%.1f ", array[i]); + } + return str + "]"; +} + +MATCHER(CheckScoresAreConsecutiveIntegersDivTen, "") { + const int size = + DragnnBulkOpKernelsTest::kNumItems * DragnnBulkOpKernelsTest::kNumActions; + for (int i(0), score(arg[0] * 10); i < size; ++i, ++score) { + EXPECT_NEAR(score / 10.0f, arg[i], 1e-4) + << "i: " << i << ", scores: " << ArrayToString(arg, size); + } + return true; +} + +TEST_F(DragnnBulkOpKernelsTest, BulkAdvanceFromPrediction) { + // Create and initialize the kernel under test. + TF_ASSERT_OK( + NodeDefBuilder("BulkAdvanceFromPrediction", "BulkAdvanceFromPrediction") + .Attr("component", kComponentName) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Input(FakeInput(DT_FLOAT)) // Prediction scores for advancing. + .Finalize(node_def())); + MockComputeSession *mock_session = GetMockSession(); + + // Creates an input tensor such that each step will see a list of consecutive + // integers divided by 10 as scores. + vector scores(kNumItems * kNumSteps * kNumActions); + for (int step(0), cnt(0); step < kNumSteps; ++step) { + for (int item = 0; item < kNumItems; ++item) { + for (int action = 0; action < kNumActions; ++action, ++cnt) { + scores[action + kNumActions * (step + item * kNumSteps)] = cnt / 10.0f; + } + } + } + AddInputFromArray(TensorShape({kNumItems * kNumSteps, kNumActions}), + scores); + + EXPECT_CALL(*mock_session, BeamSize(kComponentName)).WillOnce(Return(1)); + EXPECT_CALL(*mock_session, BatchSize(kComponentName)) + .WillOnce(Return(kNumItems)); + EXPECT_CALL(*mock_session, IsTerminal(kComponentName)) + .Times(kNumSteps) + .WillRepeatedly(Return(false)); + EXPECT_CALL(*mock_session, + AdvanceFromPrediction(kComponentName, + CheckScoresAreConsecutiveIntegersDivTen(), + kNumItems * kNumActions)) + .Times(kNumSteps); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/ops/dragnn_bulk_ops.cc b/syntaxnet/dragnn/core/ops/dragnn_bulk_ops.cc new file mode 100644 index 0000000000000000000000000000000000000000..c526edb86868435019f8b095e12539d9f351decd --- /dev/null +++ b/syntaxnet/dragnn/core/ops/dragnn_bulk_ops.cc @@ -0,0 +1,115 @@ +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/shape_inference.h" + +namespace syntaxnet { +namespace dragnn { + +REGISTER_OP("BulkFixedFeatures") + .Input("handle: string") + .Output("output_handle: string") + .Output("indices: num_channels * int32") + .Output("ids: num_channels * int64") + .Output("weights: num_channels * float") + .Output("num_steps: int32") + .Attr("component: string") + .Attr("num_channels: int") + .Doc(R"doc( +Given a ComputeSession and a component, outputs fixed features for all steps. + +This op outputs features for the entire oracle path of the component. Unlike +ExtractFixedFeatures, this op mutates the master state, advancing all of its +states until they are final. For every channel, indices[channel], ids[channel], +and weights[channel] have the same length, ie. the number of predicates, +ordered by batch, beam, step. + +handle: A handle to a ComputeSession. +output_handle: A handle to the same ComputeSession after advancement. +indices: (num_channels vectors of int32) If indices[i] = j, then + embedding_sum[j] += embedding_matrix[ids[i]] * weights[i]. +ids: (num_channels vectors of int64) Ids to lookup in embedding matrices. +weights: (num_channels vectors of float) Weight for each embedding. +num_steps: (int32 scalar) The batch was unrolled for this many steps. +component: The name of a Component instance, matching the ComponentSpec.name. +num_channels: The number of FixedFeature channels. +)doc"); + +REGISTER_OP("BulkFixedEmbeddings") + .Input("handle: string") + .Input("embedding_matrix: num_channels * T") + .Output("output_handle: string") + .Output("embedding_vectors: T") + .Output("num_steps: int32") + .Attr("component: string") + .Attr("num_channels: int") + .Attr("T: type") + .Attr("pad_to_batch: int=-1") + .Attr("pad_to_steps: int=-1") + .SetIsStateful() + .Doc(R"doc( +This op is a more efficient version of BulkFixedFeatures. + +It is intended to be run with large batch sizes at inference time. The op takes +a handle to ComputeSession and embedding matrices as tensor inputs, and directly +outputs concatenated embedding vectors. + +handle: A handle to ComputeSession. +embedding_matrix: Embedding matrices. +output_handle: A handle to the same ComputeSession after advancement. +embedding_vectors: (matrix of float) Concatenated embeddings, + shaped as (batch * beam * token) x sum_channel(embedding_dim[channel]). +num_steps: The batch was unrolled for these many steps. +component: The name of a Component instance, matching the ComponentSpec.name. +num_channels: The number of FixedFeature channels. +T: The datatype to emit. +pad_to_batch: If set, the op will pad/truncate to this number of elements. +pad_to_steps: If set, the op will pad/truncate to this number of steps. +)doc"); + +REGISTER_OP("BulkAdvanceFromOracle") + .Input("handle: string") + .Output("output_handle: string") + .Output("gold_labels: int32") + .Attr("component: string") + .Doc(R"doc( +Given a ComputeSession, advances until all states are final. + +Note that, unlike AdvanceFromOracle, this op does mutate the master state, by +advancing all of its states until they are final. + +handle: A handle to a ComputeSession. +output_handle: A handle to the same ComputeSession, after it has advanced. +gold_labels: [batch_size * beam_size * max_num_steps] vector of oracle actions, + where max_num_steps is the maximum number of steps in the oracle + action sequences for every state in the batch of beams. Each + sub-segment of length max_num_steps provides the oracle action + sequence for the corresponding state in the batch of beams, padded + with trailing -1s. +component: The name of a Component instance, matching the ComponentSpec.name. +)doc"); + +REGISTER_OP("BulkAdvanceFromPrediction") + .Input("handle: string") + .Input("scores: T") + .Output("output_handle: string") + .Attr("component: string") + .Attr("T: type") + .SetShapeFn([](tensorflow::shape_inference::InferenceContext *context) { + auto scores = context->input(1); + TF_RETURN_IF_ERROR(context->WithRank(scores, 2, &scores)); + return tensorflow::Status::OK(); + }) + .Doc(R"doc( +Given a ComputeSession and a tensor of scores, advances the state. + +The state will be advanced until all scores are used up or all states are final. + +handle: A handle to a ComputeSession. +scores: A tensor of scores with shape + {batch_size * beam_size * num_steps, num_actions}. +output_handle: handle to the same ComputeSession after advancement. +component: The name of a Component instance, matching the ComponentSpec.name. +T: The datatype to emit. +)doc"); + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/ops/dragnn_op_kernels.cc b/syntaxnet/dragnn/core/ops/dragnn_op_kernels.cc new file mode 100644 index 0000000000000000000000000000000000000000..a06b65170595b8d0d27594bb330307ea39727a69 --- /dev/null +++ b/syntaxnet/dragnn/core/ops/dragnn_op_kernels.cc @@ -0,0 +1,631 @@ +#include +#include +#include + +#include "dragnn/core/compute_session.h" +#include "dragnn/core/compute_session_pool.h" +#include "dragnn/core/ops/compute_session_op.h" +#include "dragnn/core/resource_container.h" +#include "dragnn/protos/data.pb.h" +#include "dragnn/protos/spec.pb.h" +#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/resource_mgr.h" +#include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_shape.h" +#include "tensorflow/core/lib/core/status.h" +#include "tensorflow/core/lib/core/threadpool.h" +#include "tensorflow/core/platform/logging.h" +#include "tensorflow/core/platform/mutex.h" + +using tensorflow::DEVICE_CPU; +using tensorflow::DT_BOOL; +using tensorflow::DT_FLOAT; +using tensorflow::DT_INT32; +using tensorflow::DT_INT64; +using tensorflow::DT_STRING; +using tensorflow::DataType; +using tensorflow::OpKernel; +using tensorflow::OpKernelConstruction; +using tensorflow::OpKernelContext; +using tensorflow::ResourceMgr; +using tensorflow::Status; +using tensorflow::Tensor; +using tensorflow::TensorShape; + +namespace syntaxnet { +namespace dragnn { + +typedef ResourceContainer ComputeSessionResource; +typedef ResourceContainer ComputeSessionPoolResource; + +// Given a MasterSpec proto, outputs a handle to a ComputeSession. +class GetSession : public OpKernel { + public: + explicit GetSession(OpKernelConstruction *context) : OpKernel(context) { + string master_spec_str; + string grid_point_spec_str; + OP_REQUIRES_OK(context, context->GetAttr("master_spec", &master_spec_str)); + OP_REQUIRES_OK(context, + context->GetAttr("grid_point", &grid_point_spec_str)); + CHECK(master_spec_.ParseFromString(master_spec_str)); + CHECK(grid_point_.ParseFromString(grid_point_spec_str)); + OP_REQUIRES_OK(context, context->MatchSignature({DT_STRING}, {DT_STRING})); + } + + void Compute(OpKernelContext *context) override { + const string container = context->input(0).scalar()(); + ResourceMgr *rmgr = context->resource_manager(); + + // Create the pool for this container, or re-use one that was allocated in a + // previous call. + auto create_pool = [this, + &container](ComputeSessionPoolResource **resource) { + LOG(INFO) << "Creating new ComputeSessionPool in container handle: " + << container; + std::unique_ptr pool( + new ComputeSessionPool(master_spec_, grid_point_)); + *resource = new ComputeSessionPoolResource(std::move(pool)); + return Status::OK(); + }; + + ComputeSessionPoolResource *pool_resource; + + // Synchronize access to the resource manager when getting or creating the + // ComputeSessionPool. + // Scoping for minimal mutex locking. + { + mutex_lock lock(lock_); + OP_REQUIRES_OK(context, + rmgr->LookupOrCreate( + container, "pool", &pool_resource, create_pool)); + } + ComputeSessionPool *pool = pool_resource->get(); + CHECK(pool != nullptr); + + // Get a new Session for this computation from the pool. + std::unique_ptr session = pool->GetSession(); + const string id = std::to_string(session->Id()); + + // Store it in the ResourceManager. + OP_REQUIRES_OK( + context, + rmgr->Create( + container, id, new ComputeSessionResource(std::move(session)))); + + Tensor *output; + OP_REQUIRES_OK(context, + context->allocate_output(0, TensorShape({2}), &output)); + output->vec()(0) = container; + output->vec()(1) = id; + + // Unref the pool so it gets destroyed properly. + pool_resource->Unref(); + VLOG(1) << "Returning session: " << id; + } + + private: + MasterSpec master_spec_; + GridPoint grid_point_; + + // Mutex that serializes accesses to the resource manager. (These would block + // in the compute session pool anyways, so there's no regression there, and + // we need to protect from racy multiple initialization.) + tensorflow::mutex lock_; + + TF_DISALLOW_COPY_AND_ASSIGN(GetSession); +}; + +REGISTER_KERNEL_BUILDER(Name("GetSession").Device(DEVICE_CPU), GetSession); + +// Given a handle to a ComputeSession, returns it to the pool. As long as we +// start with "GetSession", DRAGNN graphs are thread-safe and there is no need +// for explicit multi-thread logic. As long as we end with "ReleaseSession", +// then memory usage will be constrained to the maximum number of concurrent +// requests. +class ReleaseSession : public OpKernel { + public: + explicit ReleaseSession(OpKernelConstruction *context) : OpKernel(context) { + string master_spec_str; + OP_REQUIRES_OK(context, context->MatchSignature({DT_STRING}, {})); + } + + void Compute(OpKernelContext *context) override { + auto handle = context->input(0).vec(); + const string &container = handle(0); + const string &id = handle(1); + VLOG(1) << "Releasing session: " << id; + ResourceMgr *rmgr = context->resource_manager(); + + // Get the pool for this container. + ComputeSessionPoolResource *pool_resource; + TF_CHECK_OK(rmgr->Lookup(container, "pool", + &pool_resource)); + auto *pool = pool_resource->get(); + CHECK(pool != nullptr); + + // Get the compute session. + ComputeSessionResource *session_resource; + TF_CHECK_OK( + rmgr->Lookup(container, id, &session_resource)); + + // We need to release the ComputeSession from both the ResourceMgr and + // the ComputeSessionPool. The order of release is critical. If the + // resource is not first Delete()-ed from the ResourceMgr, then another + // thread may try to Create() the same resource, resulting in an + // "Already exists" error. + // + // First, delete the ResourceMgr reference so it can be used in the future. + TF_CHECK_OK(rmgr->Delete(container, id)); + + // Second, return the ComputeSession to the pool. + pool->ReturnSession(session_resource->release()); + + // Unref the resources so they get destroyed properly. + session_resource->Unref(); + pool_resource->Unref(); + } + + private: + TF_DISALLOW_COPY_AND_ASSIGN(ReleaseSession); +}; + +REGISTER_KERNEL_BUILDER(Name("ReleaseSession").Device(DEVICE_CPU), + ReleaseSession); + +/******************************************************************************* + * ComputeSessionOps below here. + ******************************************************************************/ + +// Given a handle to a BatchedBeamComponentState, advances based on the next +// oracle (gold) action. +class AdvanceFromOracle : public ComputeSessionOp { + public: + explicit AdvanceFromOracle(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK(context, context->MatchSignature({DT_STRING}, {DT_STRING})); + } + + bool OutputsHandle() const override { return true; } + bool RequiresComponentName() const override { return true; } + + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + session->AdvanceFromOracle(component_name()); + } + + private: + TF_DISALLOW_COPY_AND_ASSIGN(AdvanceFromOracle); +}; + +REGISTER_KERNEL_BUILDER(Name("AdvanceFromOracle").Device(DEVICE_CPU), + AdvanceFromOracle); + +// Given a handle to a BatchedBeamComponentState and a tensor of scores, +// advances the state. The tensor of scores has shape batch_size x beam_size +// x num_actions. +class AdvanceFromPrediction : public ComputeSessionOp { + public: + explicit AdvanceFromPrediction(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK(context, + context->MatchSignature({DT_STRING, DT_FLOAT}, {DT_STRING})); + } + + bool OutputsHandle() const override { return true; } + bool RequiresComponentName() const override { return true; } + + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + const Tensor &scores = context->input(1); + session->AdvanceFromPrediction(component_name(), + scores.tensor().data(), + scores.NumElements()); + } + + private: + TF_DISALLOW_COPY_AND_ASSIGN(AdvanceFromPrediction); +}; + +REGISTER_KERNEL_BUILDER(Name("AdvanceFromPrediction").Device(DEVICE_CPU), + AdvanceFromPrediction); + +// Given a handle to a ComputeSession and a channel index, outputs fixed +// features. +// Fixed features are returned as 3 vectors or equal length: +// - ids: specifies which rows should be looked up in the embedding +// matrix, +// - weights: specifies a scale for each embedding vector, +// - indices: sorted vector that assigns the same index to embedding +// vectors +// that should be summed together. +// +// For example if we have 3 features, for a given channel, we might have: +// feature a: (5, 1) +// feature b: (5, 0.5), (6, 0.5) +// feature c: (7, 1) +// In this case: +// indices should look like: [0, 1, 1, 2] +// ids should be [5, 5, 6, 7] +// weights should be [1, 0.5, 0.5, 1] +class ExtractFixedFeatures : public ComputeSessionOp { + public: + explicit ExtractFixedFeatures(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK(context, context->GetAttr("channel_id", &channel_id_)); + OP_REQUIRES_OK(context, context->MatchSignature( + {DT_STRING}, {DT_INT32, DT_INT64, DT_FLOAT})); + } + + bool OutputsHandle() const override { return false; } + bool RequiresComponentName() const override { return true; } + + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + // Allocates output tensors. + auto indices_allocator = [context](int num_elements) { + Tensor *output; + CHECK(context->allocate_output(0, TensorShape({num_elements}), &output) + .ok()); + return output->vec().data(); + }; + auto ids_allocator = [context](int num_elements) { + Tensor *ids_tensor; + CHECK( + context->allocate_output(1, TensorShape({num_elements}), &ids_tensor) + .ok()); + return ids_tensor->vec().data(); + }; + auto weights_allocator = [context](int num_elements) { + Tensor *output; + CHECK(context->allocate_output(2, TensorShape({num_elements}), &output) + .ok()); + return output->vec().data(); + }; + int num_features = session->GetInputFeatures( + component_name(), indices_allocator, ids_allocator, weights_allocator, + channel_id_); + VLOG(2) << "Extracted " << num_features; + } + + private: + int channel_id_; + TF_DISALLOW_COPY_AND_ASSIGN(ExtractFixedFeatures); +}; + +REGISTER_KERNEL_BUILDER(Name("ExtractFixedFeatures").Device(DEVICE_CPU), + ExtractFixedFeatures); + +// Given a handle to a ComputeSession and a channel index, outputs link +// features. Link features are returned as two vectors of length batch_size * +// beam_size * channel_size: +// - step_idx: specifies the element to read in a tensor array of activations, +// - idx: specifies the row within the tensor array element. +class ExtractLinkFeatures : public ComputeSessionOp { + public: + explicit ExtractLinkFeatures(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK(context, context->GetAttr("channel_id", &channel_id_)); + OP_REQUIRES_OK(context, + context->MatchSignature({DT_STRING}, {DT_INT32, DT_INT32})); + } + + bool OutputsHandle() const override { return false; } + bool RequiresComponentName() const override { return true; } + + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + auto features = + session->GetTranslatedLinkFeatures(component_name(), channel_id_); + + // Computes output size. + const int64 num_indices = features.size(); + + // Allocates output tensors. + Tensor *step_idx_output; + Tensor *idx_output; + OP_REQUIRES_OK(context, + context->allocate_output(0, TensorShape({num_indices}), + &step_idx_output)); + OP_REQUIRES_OK(context, context->allocate_output( + 1, TensorShape({num_indices}), &idx_output)); + + const int source_beam_size = + session->SourceComponentBeamSize(component_name(), channel_id_); + VLOG(2) << "source_beam_size:" << source_beam_size; + + // Clip step_idx for all features. If a feature is empty, set the step + // index to -1. + for (int i = 0; i < features.size(); ++i) { + if (!features[i].has_step_idx() || features[i].step_idx() < -1) { + features[i].set_step_idx(-1); + } + } + + // Fills output tensors. + for (int i = 0; i < features.size(); ++i) { + // Sets the element to read from a tensor array of activations. + step_idx_output->vec()(i) = features[i].step_idx(); + + // Within the tensor array element the id has to account for beam index + // and batch index for this specific component state. + idx_output->vec()(i) = + features[i].step_idx() >= 0 + ? OutputLinearIndex(features[i], source_beam_size) + : 0; + + VLOG(2) << "features[" << i << "]: " << features[i].ShortDebugString(); + } + } + + private: + // Given the beam index and the batch index in a LinkFeatures proto, returns + // the corresponding linear index, assuming that the matrix we're indexing + // into has shape {batch_size * beam_size, activation_size}, reshaped from a + // tensor of shape {batch_size, beam_size, activation_size}. + static uint64 OutputLinearIndex(const LinkFeatures &feature, + const int beam_size) { + VLOG(2) << "OutputLinearIndex batch_idx:" << feature.batch_idx() + << " beam_size:" << beam_size << " beam_idx:" << feature.beam_idx(); + return feature.batch_idx() * beam_size + feature.beam_idx(); + } + + int channel_id_; + TF_DISALLOW_COPY_AND_ASSIGN(ExtractLinkFeatures); +}; + +REGISTER_KERNEL_BUILDER(Name("ExtractLinkFeatures").Device(DEVICE_CPU), + ExtractLinkFeatures); + +// Given a handle to a BatchedBeamComponentState, emits a vector of gold +// labels. +// The vector of gold labels has size batch_size * beam_size. +class EmitOracleLabels : public ComputeSessionOp { + public: + explicit EmitOracleLabels(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK(context, context->MatchSignature({DT_STRING}, {DT_INT32})); + } + bool OutputsHandle() const override { return false; } + bool RequiresComponentName() const override { return true; } + + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + VLOG(2) << "state->BatchSize: " << session->BatchSize(component_name()); + VLOG(2) << "state->BeamSize: " << session->BeamSize(component_name()); + Tensor *output; + OP_REQUIRES_OK(context, + context->allocate_output( + 0, + TensorShape({session->BatchSize(component_name()) * + session->BeamSize(component_name())}), + &output)); + std::vector> batched_labels = + session->EmitOracleLabels(component_name()); + int raw_index = 0; + for (const auto &batch_vector : batched_labels) { + for (const auto &label : batch_vector) { + output->vec()(raw_index) = label; + ++raw_index; + } + } + } + + private: + TF_DISALLOW_COPY_AND_ASSIGN(EmitOracleLabels); +}; + +REGISTER_KERNEL_BUILDER(Name("EmitOracleLabels").Device(DEVICE_CPU), + EmitOracleLabels); + +// Given a handle to a ComponentState, emits a single bool indicating +// whether all elements in the batch contain beams containing all final states. +class EmitAllFinal : public ComputeSessionOp { + public: + explicit EmitAllFinal(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK(context, context->MatchSignature({DT_STRING}, {DT_BOOL})); + } + + bool OutputsHandle() const override { return false; } + bool RequiresComponentName() const override { return true; } + + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + Tensor *output; + OP_REQUIRES_OK(context, + context->allocate_output(0, TensorShape({1}), &output)); + const bool is_terminal = session->IsTerminal(component_name()); + VLOG(2) << "EmitAllFinal: is_terminal = " << is_terminal; + output->vec()(0) = is_terminal; + } + + private: + TF_DISALLOW_COPY_AND_ASSIGN(EmitAllFinal); +}; + +REGISTER_KERNEL_BUILDER(Name("EmitAllFinal").Device(DEVICE_CPU), EmitAllFinal); + +// Prepares the given component for computation. +class InitComponentData : public ComputeSessionOp { + public: + explicit InitComponentData(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK(context, + context->MatchSignature({DT_STRING, DT_INT32}, {DT_STRING})); + } + + bool OutputsHandle() const override { return true; } + + bool RequiresComponentName() const override { return true; } + + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + const int beam_size = context->input(1).scalar()(); + session->InitializeComponentData(component_name(), beam_size); + } +}; + +REGISTER_KERNEL_BUILDER(Name("InitComponentData").Device(DEVICE_CPU), + InitComponentData); + +// Returns the given component's batch size. +class BatchSize : public ComputeSessionOp { + public: + explicit BatchSize(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK(context, context->MatchSignature({DT_STRING}, {DT_INT32})); + } + + bool OutputsHandle() const override { return false; } + bool RequiresComponentName() const override { return true; } + + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + Tensor *output; + OP_REQUIRES_OK(context, + context->allocate_output(0, TensorShape({}), &output)); + output->scalar()() = session->BatchSize(component_name()); + } +}; + +REGISTER_KERNEL_BUILDER(Name("BatchSize").Device(DEVICE_CPU), BatchSize); + +// Attaches a data source to the master. +class AttachDataReader : public ComputeSessionOp { + public: + explicit AttachDataReader(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK( + context, context->MatchSignature({DT_STRING, DT_STRING}, {DT_STRING})); + } + + bool OutputsHandle() const override { return true; } + bool RequiresComponentName() const override { return false; } + + // Calls SetInputData() on the ComputeSession. + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + auto input_data(context->input(1).vec()); + + std::vector data; + for (int i = 0; i < input_data.size(); ++i) { + data.push_back(input_data(i)); + } + session->SetInputData(data); + } +}; + +REGISTER_KERNEL_BUILDER(Name("AttachDataReader").Device(DEVICE_CPU), + AttachDataReader); + +// Sets the tracing flag on the master state, which will enable or disable +// tracing as inference / training is run. +class SetTracing : public ComputeSessionOp { + public: + explicit SetTracing(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK(context, + context->MatchSignature({DT_STRING, DT_BOOL}, {DT_STRING})); + } + + bool OutputsHandle() const override { return true; } + bool RequiresComponentName() const override { return false; } + + // Calls SetTracing() on the ComputeSession. + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + auto tracing_on = context->input(1).scalar()(); + session->SetTracing(tracing_on); + } +}; + +REGISTER_KERNEL_BUILDER(Name("SetTracing").Device(DEVICE_CPU), SetTracing); + +class WriteAnnotations : public ComputeSessionOp { + public: + explicit WriteAnnotations(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK(context, context->MatchSignature({DT_STRING}, {DT_STRING})); + } + + bool OutputsHandle() const override { return true; } + bool RequiresComponentName() const override { return true; } + + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + session->FinalizeData(component_name()); + } +}; + +REGISTER_KERNEL_BUILDER(Name("WriteAnnotations").Device(DEVICE_CPU), + WriteAnnotations); + +// Given a handle to a ComponentState, emits a vector of strings +// corresponding to the serialized predictions of the model. +class EmitAnnotations : public ComputeSessionOp { + public: + explicit EmitAnnotations(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK(context, context->MatchSignature({DT_STRING}, {DT_STRING})); + } + + bool OutputsHandle() const override { return false; } + bool RequiresComponentName() const override { return true; } + + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + // Get the annotations from the state. + auto annotations = session->GetSerializedPredictions(); + + // Copy annotations to the output. + Tensor *output; + const int64 output_size = annotations.size(); + OP_REQUIRES_OK(context, context->allocate_output( + 0, TensorShape({output_size}), &output)); + auto annotations_output = output->vec(); + for (int i = 0; i < annotations.size(); ++i) { + annotations_output(i) = annotations[i]; + } + } + + private: + TF_DISALLOW_COPY_AND_ASSIGN(EmitAnnotations); +}; + +REGISTER_KERNEL_BUILDER(Name("EmitAnnotations").Device(DEVICE_CPU), + EmitAnnotations); + +// Get the component trace. +class GetComponentTrace : public ComputeSessionOp { + public: + explicit GetComponentTrace(OpKernelConstruction *context) + : ComputeSessionOp(context) { + OP_REQUIRES_OK(context, context->MatchSignature({DT_STRING}, {DT_STRING})); + } + + bool OutputsHandle() const override { return false; } + bool RequiresComponentName() const override { return true; } + + void ComputeWithState(OpKernelContext *context, + ComputeSession *session) override { + auto traces = session->GetTraceProtos(); + + const int64 size = traces.size(); + Tensor *trace_output_tensor; + OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({size}), + &trace_output_tensor)); + auto trace_output = trace_output_tensor->vec(); + for (int i = 0; i < size; ++i) { + CHECK(traces[i].SerializeToString(&trace_output(i))); + } + } + + TF_DISALLOW_COPY_AND_ASSIGN(GetComponentTrace); +}; + +REGISTER_KERNEL_BUILDER(Name("GetComponentTrace").Device(DEVICE_CPU), + GetComponentTrace); + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/ops/dragnn_op_kernels_test.cc b/syntaxnet/dragnn/core/ops/dragnn_op_kernels_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..09efeb0b7085be0eb095d35994c7faffdb8dca27 --- /dev/null +++ b/syntaxnet/dragnn/core/ops/dragnn_op_kernels_test.cc @@ -0,0 +1,851 @@ +#include +#include +#include + +#include "dragnn/core/compute_session.h" +#include "dragnn/core/compute_session_pool.h" +#include "dragnn/core/resource_container.h" +#include "dragnn/core/test/generic.h" +#include "dragnn/core/test/mock_compute_session.h" + +#include + +#include "tensorflow/core/framework/allocator.h" +#include "tensorflow/core/framework/control_flow.h" +#include "tensorflow/core/framework/fake_input.h" +#include "tensorflow/core/framework/node_def_builder.h" +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_testutil.h" +#include "tensorflow/core/framework/types.h" +#include "tensorflow/core/framework/types.pb.h" +#include "tensorflow/core/kernels/ops_testutil.h" +#include "tensorflow/core/kernels/ops_util.h" +#include "tensorflow/core/lib/core/status_test_util.h" +#include "tensorflow/core/platform/test.h" + +namespace syntaxnet { +namespace dragnn { + +using tensorflow::AllocatorAttributes; +using tensorflow::checkpoint::TensorSliceReaderCacheWrapper; +using tensorflow::DT_BOOL; +using tensorflow::DT_FLOAT; +using tensorflow::DT_STRING; +using tensorflow::DT_INT32; +using tensorflow::FrameAndIter; +using tensorflow::DataType; +using tensorflow::NodeDefBuilder; +using tensorflow::OpKernelContext; +using tensorflow::ResourceMgr; +using tensorflow::ScopedStepContainer; +using tensorflow::Status; +using tensorflow::test::SetOutputAttrs; +using tensorflow::TensorShape; + +using testing::_; +using testing::ElementsAreArray; +using testing::Invoke; +using testing::Pointwise; +using testing::Return; + +typedef ResourceContainer ComputeSessionResource; +typedef ResourceContainer ComputeSessionPoolResource; + +class DragnnOpKernelsTest : public tensorflow::OpsTestBase { + public: + void ResetOpKernelContext() { + params_.reset(new OpKernelContext::Params); + params_->device = device_.get(); + params_->frame_iter = FrameAndIter(0, 0); + params_->inputs = &inputs_; + params_->op_kernel = kernel_.get(); + step_container_.reset(new ScopedStepContainer(0, [](const string &) {})); + params_->step_container = step_container_.get(); + attrs_.clear(); + SetOutputAttrs(params_.get(), &attrs_); + TensorSliceReaderCacheWrapper slice_reader_cache_wrapper; + params_->slice_reader_cache = &slice_reader_cache_wrapper; + params_->resource_manager = device_->resource_manager(); + context_.reset(new OpKernelContext(params_.get())); + } + + Status RunOpKernelWithContext() { + device_->Compute(kernel_.get(), context_.get()); + return context_->status(); + } + + // Accessor for the underlying resource manager. + ResourceMgr *resource_mgr() { return params_->resource_manager; } + + // This needs to maintain its existence throughout the compute call. + std::vector attrs_; +}; + +// Helper function to build LinkFeatures. +LinkFeatures MakeFeatures(int batch_index, int beam_index, int step) { + LinkFeatures features; + features.set_batch_idx(batch_index); + features.set_beam_idx(beam_index); + features.set_step_idx(step); + return features; +} + +// The GetSessionOp should +// 1. create a ComputeSessionPool resource and store it in the ResourceMgr, +// 2. create a ComputeSession resource and store it in the ResourceMgr, +// 3. return the container and id strings in its output. +TEST_F(DragnnOpKernelsTest, GetSessionOpTest) { + // Create a MasterSpec and GridPoint string to pass into the attrs for this + // op. + MasterSpec spec; + spec.set_debug_tracing(true); + string master_spec_str; + spec.SerializeToString(&master_spec_str); + + GridPoint hyperparams; + string hyperparams_str; + hyperparams.SerializeToString(&hyperparams_str); + + // Create and initialize the kernel under test. + TF_ASSERT_OK( + NodeDefBuilder("get_session", "GetSession") + .Attr("master_spec", master_spec_str) + .Attr("grid_point", hyperparams_str) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Finalize(node_def())); + TF_ASSERT_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + AddInputFromList(TensorShape({1}), {container_string}); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); + + // Expect that the 0th output contains two strings, and that the ResourceMgr + // contains a ComputeSessionResource associated with those two strings. + const string container_str = GetOutput(0)->vec()(0); + const string id_str = GetOutput(0)->vec()(1); + VLOG(2) << "container: " << container_str << " id: " << id_str; + + // The first compute session should have id "0". + EXPECT_EQ("0", id_str); + ComputeSessionResource *session_resource; + TF_EXPECT_OK(resource_mgr()->Lookup( + container_str, id_str, &session_resource)); + + // Expect that the ResourceMgr also contains a ComputeSessionPoolResource. + const string pool_id_str = "pool"; + ComputeSessionPoolResource *pool_resource; + TF_EXPECT_OK(resource_mgr()->Lookup( + container_str, pool_id_str, &pool_resource)); + + // Unref the managed resources so they get destroyed properly. + session_resource->Unref(); + pool_resource->Unref(); +} + +// The GetSessionOp should take a session stored in the resource manager +// and return it to the ComputeSessionPool. +TEST_F(DragnnOpKernelsTest, ReleaseSessionOpTest) { + // Create and initialize the kernel under test. + TF_ASSERT_OK( + NodeDefBuilder("release_session", "ReleaseSession") + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Finalize(node_def())); + TF_ASSERT_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + const string id_string = "id_str"; + AddInputFromList(TensorShape({2}), {container_string, id_string}); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + // Create a ComputeSessionPool. + MasterSpec spec; + GridPoint hyperparams; + std::unique_ptr pool( + new ComputeSessionPool(spec, hyperparams)); + + // Get an unowned pointer to the ComputeSessionPool before moving + // the pool to the resource manager. + ComputeSessionPool *pool_ptr = pool.get(); + TF_ASSERT_OK(resource_mgr()->Create( + container_string, "pool", + new ComputeSessionPoolResource(std::move(pool)))); + + // Create a ComputeSession and move it to the resource manager. + TF_ASSERT_OK(resource_mgr()->Create( + container_string, id_string, + new ComputeSessionResource(pool_ptr->GetSession()))); + + // At this point, the pool should report that it has one outstanding session. + EXPECT_EQ(1, pool_ptr->num_outstanding_sessions()); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); + + // At this point, the pool should report that it has no outstanding sessions. + EXPECT_EQ(0, pool_ptr->num_outstanding_sessions()); + + // The resource manager should no longer contain the session object. + ComputeSessionResource *null_resource = nullptr; + auto result = resource_mgr()->Lookup( + container_string, id_string, &null_resource); + EXPECT_NE(Status::OK(), result); + EXPECT_EQ(null_resource, nullptr); +} + +// The AdvanceFromOracle op should call AdvanceFromOracle on the specified +// component name. +TEST_F(DragnnOpKernelsTest, AdvanceFromOracleOpTest) { + // Create and initialize the kernel under test. + const string component_name = "TESTING_COMPONENT_NAME"; + TF_ASSERT_OK( + NodeDefBuilder("advance_from_oracle", "AdvanceFromOracle") + .Attr("component", component_name) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Finalize(node_def())); + TF_ASSERT_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + const string id_string = "id_str"; + AddInputFromList(TensorShape({2}), {container_string, id_string}); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + // Create a MockComputeSession and set expectations. + std::unique_ptr mock_session(new MockComputeSession()); + MockComputeSession *mock_session_ptr = mock_session.get(); + + // Wrap the ComputeSessionResource and put it into the resource manager. + TF_ASSERT_OK(resource_mgr()->Create( + container_string, id_string, + new ComputeSessionResource(std::move(mock_session)))); + + // Set expectations on the mock session. + EXPECT_CALL(*mock_session_ptr, AdvanceFromOracle(component_name)); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); +} + +// The AdvanceFromPredicton op should call AdvanceFromPrediction on the +// specified component with the passed scores. +TEST_F(DragnnOpKernelsTest, AdvanceFromPredictionOpTest) { + // Create and initialize the kernel under test. + const string component_name = "TESTING_COMPONENT_NAME"; + TF_ASSERT_OK( + NodeDefBuilder("advance_from_prediction", "AdvanceFromPrediction") + .Attr("component", component_name) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Input(FakeInput(DT_FLOAT)) // The prediction tensor. + .Finalize(node_def())); + TF_ASSERT_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + const string id_string = "id_str"; + AddInputFromList(TensorShape({2}), {container_string, id_string}); + const std::vector weights = {1.1, 2.2, 3.3, 4.4}; + AddInputFromArray(TensorShape({2, 2}), weights); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + // Create a MockComputeSession and set expectations. + std::unique_ptr mock_session(new MockComputeSession()); + MockComputeSession *mock_session_ptr = mock_session.get(); + + // Wrap the ComputeSessionResource and put it into the resource manager. + TF_ASSERT_OK(resource_mgr()->Create( + container_string, id_string, + new ComputeSessionResource(std::move(mock_session)))); + + // Set expectations on the mock session. + auto validator_function = [weights](const string &component_name, + const float score_matrix[], + int score_matrix_length) { + EXPECT_EQ(weights.size(), score_matrix_length); + for (int i = 0; i < weights.size(); ++i) { + EXPECT_EQ(weights[i], score_matrix[i]); + } + }; + EXPECT_CALL(*mock_session_ptr, AdvanceFromPrediction(component_name, _, _)) + .WillOnce(Invoke(validator_function)); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); +} + +// The ExtractFixedFeatures op should return a set of fixed feature vectors +// as described below. +TEST_F(DragnnOpKernelsTest, ExtractFixedFeaturesOpTest) { + // Create and initialize the kernel under test. + const string component_name = "TESTING_COMPONENT_NAME"; + constexpr int kChannelId = 78; + TF_ASSERT_OK( + NodeDefBuilder("advance_from_prediction", "ExtractFixedFeatures") + .Attr("component", component_name) + .Attr("channel_id", kChannelId) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Finalize(node_def())); + TF_ASSERT_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + const string id_string = "id_str"; + AddInputFromList(TensorShape({2}), {container_string, id_string}); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + // Create a MockComputeSession and set expectations. + std::unique_ptr mock_session(new MockComputeSession()); + MockComputeSession *mock_session_ptr = mock_session.get(); + + // Wrap the ComputeSessionResource and put it into the resource manager. + TF_ASSERT_OK(resource_mgr()->Create( + container_string, id_string, + new ComputeSessionResource(std::move(mock_session)))); + + // If we have 3 features, for a given channel, we might have: + // feature a: (5, 1) + // feature b: (5, 0.5), (6, 0.7) + // feature c: (3, 0.1), (7, [empty]) <- Empty weights are equivalent to 1.0. + // In this case: + // indices should look like [0 , 1 , 1 , 2 , 2 ] + // ids should be [5 , 5 , 6 , 3 , 7 ] + // weights should be [1.0, 0.5, 0.7, 0.1, 1.0] + const std::vector expected_indices({0, 1, 1, 2, 2}); + const std::vector expected_ids({5, 5, 6, 3, 7}); + const std::vector expected_weights({1.0, 0.5, 0.7, 0.1, 1.0}); + + auto assigner_function = + [=](string, std::function indices_allocator, + std::function ids_allocator, + std::function weights_allocator, int) { + constexpr int kFeatureCount = 5; + int32 *indices = indices_allocator(kFeatureCount); + int64 *ids = ids_allocator(kFeatureCount); + float *weights = weights_allocator(kFeatureCount); + for (int i = 0; i < kFeatureCount; ++i) { + indices[i] = expected_indices[i]; + ids[i] = expected_ids[i]; + weights[i] = expected_weights[i]; + } + return kFeatureCount; + }; + + EXPECT_CALL(*mock_session_ptr, + GetInputFeatures(component_name, _, _, _, kChannelId)) + .WillOnce(testing::Invoke(assigner_function)); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); + + // Validate the outputs. + EXPECT_EQ(expected_indices.size(), GetOutput(0)->NumElements()); + for (int i = 0; i < expected_indices.size(); ++i) { + EXPECT_EQ(expected_indices[i], GetOutput(0)->vec()(i)); + } + EXPECT_EQ(expected_ids.size(), GetOutput(1)->NumElements()); + for (int i = 0; i < expected_ids.size(); ++i) { + EXPECT_EQ(expected_ids[i], GetOutput(1)->vec()(i)); + } + EXPECT_EQ(expected_weights.size(), GetOutput(2)->NumElements()); + for (int i = 0; i < expected_weights.size(); ++i) { + EXPECT_EQ(expected_weights[i], GetOutput(2)->vec()(i)); + } +} + +// The ExtractLinkFeatures op should return a set of linked feature vectors +// as described below. +TEST_F(DragnnOpKernelsTest, ExtractLinkFeaturesOpTest) { + // TODO(googleuser): Is a 2-vector output the correct way to do this? + // Why reshape instead of passing [batch, beam, index] or just + // [batch,index] ? + // Create and initialize the kernel under test. + const string component_name = "TESTING_COMPONENT_NAME"; + constexpr int kChannelId = 3421; + TF_ASSERT_OK( + NodeDefBuilder("extract_link_features", "ExtractLinkFeatures") + .Attr("component", component_name) + .Attr("channel_id", kChannelId) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Finalize(node_def())); + TF_ASSERT_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + const string id_string = "id_str"; + AddInputFromList(TensorShape({2}), {container_string, id_string}); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + std::unique_ptr mock_session(new MockComputeSession()); + MockComputeSession *mock_session_ptr = mock_session.get(); + + // This op will return link features in two flat arrays using batch-major + // ordering. So, if we have a batch of 2 and a beam of 3, with data as follows + // (note that the features are {batch,beam,step} and [] is 'empty') + // batch 1 features: {{02,03,[]},{01,00,04},{08,06,01}} + // batch 2 features: {{12,13,14},{11,12,-1},{18,16,20}} + // + // and a **source component** beam size of 5 should result in output tensors: + // step_idx (tensor 0): {-1, 4, 1, 14, -1, 20} + // array_idx (tensor 1): { 0, 5, 46, 73, 0, 106} + // (0 [step=-1]),(5=1*5+0),(46=8*5+6),(73=12*5+13),(0 [step=-1]),(96=18*5+16) + constexpr int kSourceComponentBeamSize = 5; + + std::vector features; + features.push_back(MakeFeatures(2, 3, -1)); + features.back().clear_step_idx(); // step_idx is now empty. + features.push_back(MakeFeatures(1, 0, 4)); + features.push_back(MakeFeatures(8, 6, 1)); + features.push_back(MakeFeatures(12, 13, 14)); + features.push_back(MakeFeatures(11, 12, -1)); + features.push_back(MakeFeatures(18, 16, 20)); + + const std::vector expected_step_idx({-1, 4, 1, 14, -1, 20}); + const std::vector expected_array_idx({0, 5, 46, 73, 0, 106}); + + EXPECT_CALL(*mock_session_ptr, + SourceComponentBeamSize(component_name, kChannelId)) + .WillRepeatedly(Return(kSourceComponentBeamSize)); + EXPECT_CALL(*mock_session_ptr, + GetTranslatedLinkFeatures(component_name, kChannelId)) + .WillOnce(Return(features)); + + // Wrap the ComputeSessionResource and put it into the resource manager. + TF_ASSERT_OK(resource_mgr()->Create( + container_string, id_string, + new ComputeSessionResource(std::move(mock_session)))); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); + + // Validate the outputs. + EXPECT_EQ(expected_step_idx.size(), GetOutput(0)->NumElements()); + for (int i = 0; i < expected_step_idx.size(); ++i) { + EXPECT_EQ(expected_step_idx[i], GetOutput(0)->vec()(i)); + } + EXPECT_EQ(expected_array_idx.size(), GetOutput(1)->NumElements()); + for (int i = 0; i < expected_array_idx.size(); ++i) { + EXPECT_EQ(expected_array_idx[i], GetOutput(1)->vec()(i)); + } +} + +// The EmitOracleLabels op should return a set of oracle labels for all +// elements in all beams in all batches. +TEST_F(DragnnOpKernelsTest, EmitOracleLabelsOpTest) { + // Create and initialize the kernel under test. + const string component_name = "TESTING_COMPONENT_NAME"; + TF_ASSERT_OK( + NodeDefBuilder("emit_oracle_labels", "EmitOracleLabels") + .Attr("component", component_name) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Finalize(node_def())); + TF_ASSERT_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + const string id_string = "id_str"; + AddInputFromList(TensorShape({2}), {container_string, id_string}); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + // Create a MockComputeSession and set expectations. + std::unique_ptr mock_session(new MockComputeSession()); + MockComputeSession *mock_session_ptr = mock_session.get(); + + // Wrap the ComputeSessionResource and put it into the resource manager. + TF_ASSERT_OK(resource_mgr()->Create( + container_string, id_string, + new ComputeSessionResource(std::move(mock_session)))); + + // The op should request the batch and beam size, then request the oracle + // labels. They should be returned batch major, so: + // batch 1 oracle labels: {1, 3, 5, 7} + // batch 2 oracle labels: {2, 4, 6, 8} + // should result in an output tensor as follows: + // {1, 3, 5, 7, 2, 4, 6, 8} + + constexpr int kBatchSize = 2; + constexpr int kBeamSize = 4; + const std::vector> oracle_labels( + {{1, 3, 5, 7}, {2, 4, 6, 8}}); + + EXPECT_CALL(*mock_session_ptr, BatchSize(component_name)) + .WillRepeatedly(Return(kBatchSize)); + EXPECT_CALL(*mock_session_ptr, BeamSize(component_name)) + .WillRepeatedly(Return(kBeamSize)); + EXPECT_CALL(*mock_session_ptr, EmitOracleLabels(component_name)) + .WillOnce(Return(oracle_labels)); + + const std::vector expected_labels({1, 3, 5, 7, 2, 4, 6, 8}); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); + + // Validate the outputs. + EXPECT_EQ(expected_labels.size(), GetOutput(0)->NumElements()); + for (int i = 0; i < expected_labels.size(); ++i) { + EXPECT_EQ(expected_labels[i], GetOutput(0)->vec()(i)); + } +} + +// The EmitAllFinal op should return the result of IsTerminal(component_name). +TEST_F(DragnnOpKernelsTest, EmitAllFinalOpTest) { + // Create and initialize the kernel under test. + const string component_name = "TESTING_COMPONENT_NAME"; + TF_ASSERT_OK( + NodeDefBuilder("emit_all_final", "EmitAllFinal") + .Attr("component", component_name) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Finalize(node_def())); + TF_ASSERT_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + const string id_string = "id_str"; + AddInputFromList(TensorShape({2}), {container_string, id_string}); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + // Create a MockComputeSession and set expectations. + std::unique_ptr mock_session(new MockComputeSession()); + MockComputeSession *mock_session_ptr = mock_session.get(); + + // Wrap the ComputeSessionResource and put it into the resource manager. + TF_ASSERT_OK(resource_mgr()->Create( + container_string, id_string, + new ComputeSessionResource(std::move(mock_session)))); + + // Set up mocks. + constexpr bool kIsTerminal = true; + EXPECT_CALL(*mock_session_ptr, IsTerminal(component_name)) + .WillOnce(Return(kIsTerminal)); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); + + // Validate the outputs. + EXPECT_EQ(1, GetOutput(0)->NumElements()); + EXPECT_EQ(kIsTerminal, GetOutput(0)->vec()(0)); +} + +// The InitComponent op should initialize the given component with the given +// beam size. +// TODO(googleuser): Should we just store the beam size somewhere in the +// ComputeSession? +TEST_F(DragnnOpKernelsTest, InitComponentDataOpTest) { + // Create and initialize the kernel under test. + const string component_name = "TESTING_COMPONENT_NAME"; + TF_ASSERT_OK( + NodeDefBuilder("init_component_data", "InitComponentData") + .Attr("component", component_name) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Input(FakeInput(DT_INT32)) // The beam size. + .Finalize(node_def())); + TF_ASSERT_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + const string id_string = "id_str"; + AddInputFromList(TensorShape({2}), {container_string, id_string}); + constexpr int32 kBeamSize = 9001; + AddInputFromList(TensorShape({1}), {kBeamSize}); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + // Create a MockComputeSession and set expectations. + std::unique_ptr mock_session(new MockComputeSession()); + MockComputeSession *mock_session_ptr = mock_session.get(); + + // Wrap the ComputeSessionResource and put it into the resource manager. + TF_ASSERT_OK(resource_mgr()->Create( + container_string, id_string, + new ComputeSessionResource(std::move(mock_session)))); + + // Set up mocks. + EXPECT_CALL(*mock_session_ptr, + InitializeComponentData(component_name, kBeamSize)); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); + + // Output should be the input handle. + EXPECT_EQ(container_string, GetOutput(0)->vec()(0)); + EXPECT_EQ(id_string, GetOutput(0)->vec()(1)); +} + +// The BatchSize op should call BatchSize on the ComputeSession with the given +// component as argument. +TEST_F(DragnnOpKernelsTest, BatchSizeOpTest) { + // Create and initialize the kernel under test. + const string component_name = "TESTING_COMPONENT_NAME"; + TF_ASSERT_OK( + NodeDefBuilder("batch_size", "BatchSize") + .Attr("component", component_name) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Finalize(node_def())); + TF_ASSERT_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + const string id_string = "id_str"; + AddInputFromList(TensorShape({2}), {container_string, id_string}); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + // Create a MockComputeSession and set expectations. + std::unique_ptr mock_session(new MockComputeSession()); + MockComputeSession *mock_session_ptr = mock_session.get(); + + // Wrap the ComputeSessionResource and put it into the resource manager. + TF_ASSERT_OK(resource_mgr()->Create( + container_string, id_string, + new ComputeSessionResource(std::move(mock_session)))); + + // Set up mocks. + constexpr int kBatchSize = 8; + EXPECT_CALL(*mock_session_ptr, BatchSize(component_name)) + .WillOnce(Return(kBatchSize)); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); + + // Output should be the batch size. + EXPECT_EQ(kBatchSize, GetOutput(0)->scalar()()); +} + +// The AttachDataReader op should push the given vector of strings into the +// session. +TEST_F(DragnnOpKernelsTest, AttachDataReaderOpTest) { + // Create and initialize the kernel under test. + const string component_name = "TESTING_COMPONENT_NAME"; + TF_ASSERT_OK( + NodeDefBuilder("attach_data_reader", "AttachDataReader") + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Input(FakeInput(DT_STRING)) // The data to pass to the session. + .Finalize(node_def())); + TF_ASSERT_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + const string id_string = "id_str"; + AddInputFromList(TensorShape({2}), {container_string, id_string}); + + const std::vector data( + {"one string", "two string", "red string", "blue string"}); + AddInputFromArray(TensorShape({4}), data); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + // Create a MockComputeSession and set expectations. + std::unique_ptr mock_session(new MockComputeSession()); + MockComputeSession *mock_session_ptr = mock_session.get(); + + // Wrap the ComputeSessionResource and put it into the resource manager. + TF_ASSERT_OK(resource_mgr()->Create( + container_string, id_string, + new ComputeSessionResource(std::move(mock_session)))); + + // Set up mocks. + EXPECT_CALL(*mock_session_ptr, SetInputData(ElementsAreArray(data))); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); +} + +// The SetTracingOp should pass its argument through to the underlying +// ComputeSession. +TEST_F(DragnnOpKernelsTest, SetTracingOpTest) { + // Create and initialize the kernel under test. + const string component_name = "TESTING_COMPONENT_NAME"; + TF_ASSERT_OK( + NodeDefBuilder("set_tracing", "SetTracing") + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Input(FakeInput(DT_BOOL)) // The boolean to set tracing to. + .Finalize(node_def())); + TF_ASSERT_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + const string id_string = "id_str"; + AddInputFromList(TensorShape({2}), {container_string, id_string}); + constexpr bool kSetTracing = true; + AddInputFromList(TensorShape({1}), {kSetTracing}); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + // Create a MockComputeSession and set expectations. + std::unique_ptr mock_session(new MockComputeSession()); + MockComputeSession *mock_session_ptr = mock_session.get(); + + // Wrap the ComputeSessionResource and put it into the resource manager. + TF_ASSERT_OK(resource_mgr()->Create( + container_string, id_string, + new ComputeSessionResource(std::move(mock_session)))); + + // Set expectations on the mock session. + EXPECT_CALL(*mock_session_ptr, SetTracing(kSetTracing)); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); +} + +// The WriteAnnotations op should call FinalizeData on the current component. +TEST_F(DragnnOpKernelsTest, WriteAnnotationsOpTest) { + // Create and initialize the kernel under test. + const string component_name = "TESTING_COMPONENT_NAME"; + TF_ASSERT_OK( + NodeDefBuilder("write_annotations", "WriteAnnotations") + .Attr("component", component_name) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Finalize(node_def())); + TF_ASSERT_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + const string id_string = "id_str"; + AddInputFromList(TensorShape({2}), {container_string, id_string}); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + // Create a MockComputeSession and set expectations. + std::unique_ptr mock_session(new MockComputeSession()); + MockComputeSession *mock_session_ptr = mock_session.get(); + + // Wrap the ComputeSessionResource and put it into the resource manager. + TF_ASSERT_OK(resource_mgr()->Create( + container_string, id_string, + new ComputeSessionResource(std::move(mock_session)))); + + // Set expectations on the mock session. + EXPECT_CALL(*mock_session_ptr, FinalizeData(component_name)); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); +} + +// The EmitAnnotations op should return a vector of annotated strings as +// described below. +TEST_F(DragnnOpKernelsTest, EmitAnnotationsOpTest) { + // Create and initialize the kernel under test. + const string component_name = "TESTING_COMPONENT_NAME"; + TF_ASSERT_OK( + NodeDefBuilder("emit_annotations", "EmitAnnotations") + .Attr("component", component_name) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Finalize(node_def())); + TF_ASSERT_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + const string id_string = "id_str"; + AddInputFromList(TensorShape({2}), {container_string, id_string}); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + std::unique_ptr mock_session(new MockComputeSession()); + MockComputeSession *mock_session_ptr = mock_session.get(); + + constexpr int kBatchSize = 2; + std::vector predictions({"one", "two"}); + + EXPECT_CALL(*mock_session_ptr, BatchSize(component_name)) + .WillRepeatedly(Return(kBatchSize)); + EXPECT_CALL(*mock_session_ptr, GetSerializedPredictions()) + .WillOnce(Return(predictions)); + + // The output vector is batch_size. + const std::vector expected_output({"one", "two"}); + + // Wrap the ComputeSessionResource and put it into the resource manager. + TF_ASSERT_OK(resource_mgr()->Create( + container_string, id_string, + new ComputeSessionResource(std::move(mock_session)))); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); + + // Validate the outputs. + EXPECT_EQ(expected_output.size(), GetOutput(0)->NumElements()); + for (int i = 0; i < expected_output.size(); ++i) { + EXPECT_EQ(expected_output[i], GetOutput(0)->vec()(i)); + } +} + +// The GetComponentTrace op should return a vector of serialized trace protos. +TEST_F(DragnnOpKernelsTest, GetComponentTraceOpTest) { + // Create and initialize the kernel under test. + const string component_name = "TESTING_COMPONENT_NAME"; + TF_ASSERT_OK( + NodeDefBuilder("get_component_trace", "GetComponentTrace") + .Attr("component", component_name) + .Input(FakeInput(DT_STRING)) // The handle for the ComputeSession. + .Finalize(node_def())); + TF_ASSERT_OK(InitOp()); + + // Set the input data. + const string container_string = "container_str"; + const string id_string = "id_str"; + AddInputFromList(TensorShape({2}), {container_string, id_string}); + + // Reset the test context to ensure it's clean. + ResetOpKernelContext(); + + std::unique_ptr mock_session(new MockComputeSession()); + MockComputeSession *mock_session_ptr = mock_session.get(); + + // This op will request a set of MasterTraces from GetTraceProtos(), then + // return them. + + MasterTrace trace; + auto component_trace = trace.add_component_trace(); + component_trace->set_name("arbitrary_component_name_for_html"); + auto component_trace_2 = trace.add_component_trace(); + component_trace_2->set_name("arbitrary_component_name_2_for_html"); + const std::vector master_traces({trace}); + + EXPECT_CALL(*mock_session_ptr, GetTraceProtos()) + .WillOnce(Return(master_traces)); + + // Wrap the ComputeSessionResource and put it into the resource manager. + TF_ASSERT_OK(resource_mgr()->Create( + container_string, id_string, + new ComputeSessionResource(std::move(mock_session)))); + + // Run the kernel. + TF_EXPECT_OK(RunOpKernelWithContext()); + + // Validate the outputs. + EXPECT_EQ(master_traces.size(), GetOutput(0)->NumElements()); + for (int i = 0; i < master_traces.size(); ++i) { + string expected; + master_traces.at(i).SerializeToString(&expected); + EXPECT_EQ(expected, GetOutput(0)->vec()(i)); + } +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/ops/dragnn_ops.cc b/syntaxnet/dragnn/core/ops/dragnn_ops.cc new file mode 100644 index 0000000000000000000000000000000000000000..602acaeda0fe0afbf8661656f3e86c5d40f7d73f --- /dev/null +++ b/syntaxnet/dragnn/core/ops/dragnn_ops.cc @@ -0,0 +1,239 @@ +#include "tensorflow/core/framework/op.h" + +namespace syntaxnet { +namespace dragnn { + +REGISTER_OP("GetSession") + .Input("container: string") + .Attr("master_spec: string") + .Attr("grid_point: string") + .Output("handle: string") + .SetIsStateful() + .Doc(R"doc( +Given MasterSpec and GridPoint protos, outputs a handle to a ComputeSession. + +container: A unique identifier for the ComputeSessionPool from which a + ComputeSession will be allocated. +master_spec: A serialized syntaxnet.dragnn.MasterSpec proto. +grid_point: A serialized syntaxnet.dragnn.GridPoint proto. +handle: A string handle to a ComputeSession. +)doc"); + +REGISTER_OP("ReleaseSession").Input("handle: string").SetIsStateful().Doc(R"doc( +Given a ComputeSession, return it to the ComputeSession pool. + +This ComputeSession will no longer be available after this op returns. + +handle: A handle to a ComputeSession that will be returned to the backing pool. +)doc"); + +REGISTER_OP("InitComponentData") + .Input("handle: string") + .Input("beam_size: int32") + .Attr("component: string") + .Output("output_handle: string") + .Doc(R"doc( +Initialize a component with the given beam size for a given ComputeSession. + +handle: A handle to a ComputeSession. +beam_size: The size of the beam to use on the component. +component: The name of a Component instance, matching the ComponentSpec.name. +output_handle: The handle to the same ComputeSession after initialization. +)doc"); + +REGISTER_OP("BatchSize") + .Input("handle: string") + .Attr("component: string") + .Output("batch_size: int32") + .Doc(R"doc( +Given a ComputeSession and a component name,return the component batch size. + +handle: A handle to a ComputeSession. +component: The name of a Component instance, matching the ComponentSpec.name. +batch_size: The size of the given component's batch. +)doc"); + +REGISTER_OP("SetTracing") + .Input("handle: string") + .Input("tracing_on: bool") + .Attr("component: string = 'NOT_USED_FOR_THIS_OP'") + .Output("output_handle: string") + .Doc(R"doc( +Given a ComputeSession, turns on or off tracing for all components. + +handle: A handle to a ComputeSession. +tracing_on: Whether or not to record traces. +output_handle: The handle to the same ComputeSession, with the tracing status changed. +)doc"); + +REGISTER_OP("AttachDataReader") + .Input("handle: string") + .Input("input_spec: string") + .Attr("component: string = 'NOT_USED_FOR_THIS_OP'") + .Output("output_handle: string") + .Doc(R"doc( +Given a ComputeSession, attach a data source. + +This op is agnostic to the type of input data. The vector of input strings is +interpreted by the backend. + +handle: A handle to a ComputeSession. +input_spec: A vector of strings, where each string represents one batch item. +output_handle: The handle to the same ComputeSession after attachment. +)doc"); + +REGISTER_OP("AdvanceFromOracle") + .Input("handle: string") + .Attr("component: string") + .Output("output_handle: string") + .Doc(R"doc( +Given a ComputeSession and a Component name, advance the component via oracle. + +handle: A handle to a ComputeSession. +component: The name of a Component instance, matching the ComponentSpec.name. +output_handle: The handle to the same ComputeSession after advancement. +)doc"); + +REGISTER_OP("AdvanceFromPrediction") + .Input("handle: string") + .Input("scores: float") + .Attr("component: string") + .Output("output_handle: string") + .Doc(R"doc( +Given a ComputeSession, a Component name, and a score tensor, advance the state. + +handle: A handle to a ComputeSession. +scores: A tensor of scores, ordered by {batch_size, beam_size, num_actions}. +component: The name of a Component instance, matching the ComponentSpec.name. +output_handle: A handle to the same ComputeSession after advancement. +)doc"); + +REGISTER_OP("DragnnEmbeddingInitializer") + .Output("embeddings: float") + .Attr("embedding_input: string") + .Attr("vocab: string") + .Attr("scaling_coefficient: float = 1.0") + .Doc(R"doc( +*** PLACEHOLDER OP - FUNCTIONALITY NOT YET IMPLEMENTED *** + +Read embeddings from an an input for every key specified in a text vocab file. + +embeddings: A tensor containing embeddings from the specified sstable. +embedding_input: Path to location with embedding vectors. +vocab: Path to list of keys corresponding to the input. +scaling_coefficient: A scaling coefficient for the embedding matrix. +)doc"); + +REGISTER_OP("ExtractFixedFeatures") + .Input("handle: string") + .Output("indices: int32") + .Output("ids: int64") + .Output("weights: float") + .Attr("component: string") + .Attr("channel_id: int") + .Doc(R"doc( +Given a ComputeSession, Component, and channel index, output fixed features. + +Fixed features returned as 3 vectors, 'indices', 'ids', and 'weights' of equal +length. 'ids' specifies which rows should be looked up in the embedding +matrix. 'weights' specifies a scale for each embedding vector. 'indices' is a +sorted vector that assigns the same index to embedding vectors that should be +summed together. + +handle: A handle to a ComputeSession. +indices: The row to add the feature to. +ids: The indices into embedding matrices for each feature. +weights: The weight for each looked up feature. +component: The name of a Component instance, matching the ComponentSpec.name. +channel_id: The feature channel to extract features for. +)doc"); + +REGISTER_OP("ExtractLinkFeatures") + .Input("handle: string") + .Output("step_idx: int32") + .Output("idx: int32") + .Attr("component: string") + .Attr("channel_id: int") + .Doc(R"doc( +Given a ComputeSession, Component, and a channel index, outputs link features. + +Output indices have shape {batch_size * beam_size * channel_size}. + +handle: A handle to a ComputeSession. +step_idx: The step indices to read activations from. +idx: indices The index within a step to read the activations from. +component: The name of a Component instance, matching the ComponentSpec.name. +channel_id: The feature channel to extract features for. +)doc"); + +REGISTER_OP("EmitOracleLabels") + .Input("handle: string") + .Output("gold_labels: int32") + .Attr("component: string") + .Doc(R"doc( +Given a ComputeSession and Component, emit a vector of gold labels. + +handle: A handle to a ComputeSession. +gold_labels: A [batch_size * beam_size] vector of gold labels for the current + ComputeSession. +component: The name of a Component instance, matching the ComponentSpec.name. +)doc"); + +REGISTER_OP("EmitAllFinal") + .Input("handle: string") + .Output("all_final: bool") + .Attr("component: string") + .Doc(R"doc( +Given a ComputeSession and Component, returns whether the Component is final. + +A component is considered final when all elements in the batch have beams +containing all final states. + +handle: A handle to a ComputeSession. +all_final: Whether every element in the specified component is 'final'. +component: The name of a Component instance, matching the ComponentSpec.name. +)doc"); + +REGISTER_OP("WriteAnnotations") + .Input("handle: string") + .Output("output_handle: string") + .Attr("component: string") + .Doc(R"doc( +Given a ComputeSession, has the given component write out its annotations. + +The annotations are written to the underlying data objects passed in at the +beginning of the computation. + +handle: A handle to a ComputeSession. +output_handle: A handle to the same ComputeSession after writing. +component: The name of a Component instance, matching the ComponentSpec.name. +)doc"); + +REGISTER_OP("EmitAnnotations") + .Input("handle: string") + .Output("annotations: string") + .Attr("component: string") + .Doc(R"doc( +Given a ComputeSession, emits strings with final predictions for the model. + +Predictions are given for each element in the final component's batch. + +handle: A handle to a ComputeSession. +annotations: A vector of strings representing the annotated data. +component: The name of a Component instance, matching the ComponentSpec.name. +)doc"); + +REGISTER_OP("GetComponentTrace") + .Input("handle: string") + .Output("trace: string") + .Attr("component: string") + .Doc(R"doc( +Gets the raw MasterTrace proto for each batch, state, and beam slot. + +handle: A handle to a ComputeSession. +trace: A vector of MasterTrace protos. +component: The name of a Component instance, matching the ComponentSpec.name. +)doc"); + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/resource_container.h b/syntaxnet/dragnn/core/resource_container.h new file mode 100644 index 0000000000000000000000000000000000000000..66f26904981eb84cf8780161a2d66b2c72cf07d0 --- /dev/null +++ b/syntaxnet/dragnn/core/resource_container.h @@ -0,0 +1,36 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_RESOURCE_CONTAINER_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_RESOURCE_CONTAINER_H_ + +#include + +#include "syntaxnet/base.h" +#include "tensorflow/core/framework/resource_mgr.h" + +namespace syntaxnet { +namespace dragnn { + +using tensorflow::strings::StrCat; + +// Wrapper to store a data type T in the ResourceMgr. There should be one per +// Session->Run() call that may happen concurrently. +template +class ResourceContainer : public tensorflow::ResourceBase { + public: + explicit ResourceContainer(std::unique_ptr data) + : data_(std::move(data)) {} + + ~ResourceContainer() override {} + + T *get() { return data_.get(); } + std::unique_ptr release() { return std::move(data_); } + + string DebugString() override { return "ResourceContainer"; } + + private: + std::unique_ptr data_; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_RESOURCE_CONTAINER_H_ diff --git a/syntaxnet/dragnn/core/resource_container_test.cc b/syntaxnet/dragnn/core/resource_container_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..6a1d4e97c7004eccf8e1038bd0946e8e2b78fe99 --- /dev/null +++ b/syntaxnet/dragnn/core/resource_container_test.cc @@ -0,0 +1,49 @@ +// Tests the methods of ResourceContainer. +// +// NOTE(danielandor): For all tests: ResourceContainer is derived from +// RefCounted, which requires the use of Unref to reduce the ref count +// to zero and automatically delete the pointer. +#include "dragnn/core/resource_container.h" + +#include +#include "tensorflow/core/platform/test.h" + +namespace syntaxnet { +namespace dragnn { + +class MockDatatype {}; + +TEST(ResourceContainerTest, Get) { + std::unique_ptr data(new MockDatatype()); + MockDatatype *data_ptr = data.get(); + auto *container = new ResourceContainer(std::move(data)); + EXPECT_EQ(data_ptr, container->get()); + container->Unref(); +} + +TEST(ResourceContainerTest, Release) { + std::unique_ptr data(new MockDatatype()); + MockDatatype *data_ptr = data.get(); + auto *container = new ResourceContainer(std::move(data)); + std::unique_ptr data_again = container->release(); + container->Unref(); + EXPECT_EQ(data_ptr, data_again.get()); +} + +TEST(ResourceContainerTest, NullptrOnGetAfterRelease) { + std::unique_ptr data(new MockDatatype()); + auto *container = new ResourceContainer(std::move(data)); + container->release(); + EXPECT_EQ(nullptr, container->get()); + container->Unref(); +} + +TEST(ResourceContainerTest, DebugString) { + std::unique_ptr data(new MockDatatype()); + auto *container = new ResourceContainer(std::move(data)); + EXPECT_EQ("ResourceContainer", container->DebugString()); + container->Unref(); +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/test/BUILD b/syntaxnet/dragnn/core/test/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..3e3dea34fc61863e1ae37062708aac9bb85f65e7 --- /dev/null +++ b/syntaxnet/dragnn/core/test/BUILD @@ -0,0 +1,57 @@ +package(default_visibility = ["//visibility:public"]) + +cc_library( + name = "mock_component", + testonly = True, + hdrs = ["mock_component.h"], + deps = [ + "//dragnn/components/util:bulk_feature_extractor", + "//dragnn/core:index_translator", + "//dragnn/core/interfaces:component", + "//dragnn/core/interfaces:transition_state", + "//dragnn/protos:data_proto", + "//dragnn/protos:spec_proto", + "//syntaxnet:base", + "//syntaxnet:test_main", + "@org_tensorflow//tensorflow/core:test", + ], +) + +cc_library( + name = "mock_compute_session", + testonly = True, + hdrs = ["mock_compute_session.h"], + deps = [ + "//dragnn/components/util:bulk_feature_extractor", + "//dragnn/core:compute_session", + "//dragnn/protos:data_proto", + "//dragnn/protos:spec_proto", + "//syntaxnet:base", + "@org_tensorflow//tensorflow/core:test", + ], +) + +cc_library( + name = "mock_transition_state", + testonly = True, + hdrs = ["mock_transition_state.h"], + deps = [ + "//dragnn/core/interfaces:transition_state", + "//syntaxnet:base", + "//syntaxnet:test_main", + "@org_tensorflow//tensorflow/core:test", + ], +) + +cc_library( + name = "generic", + testonly = True, + srcs = ["generic.cc"], + hdrs = ["generic.h"], + deps = [ + "//syntaxnet:base", + "@org_tensorflow//tensorflow/core:lib", + "@org_tensorflow//tensorflow/core:test", + "@org_tensorflow//tensorflow/core:testlib", + ], +) diff --git a/syntaxnet/dragnn/core/test/generic.cc b/syntaxnet/dragnn/core/test/generic.cc new file mode 100644 index 0000000000000000000000000000000000000000..799e1a80d97c748a896b116f45938c1f6d7850b4 --- /dev/null +++ b/syntaxnet/dragnn/core/test/generic.cc @@ -0,0 +1,21 @@ +#include "dragnn/core/test/generic.h" + +#include "tensorflow/core/lib/io/path.h" + +namespace syntaxnet { +namespace test { + +string GetTestDataPrefix() { + const char *env = getenv("TEST_SRCDIR"); + const char *workspace = getenv("TEST_WORKSPACE"); + if (!env || env[0] == '\0' || !workspace || workspace[0] == '\0') { + LOG(FATAL) << "Test directories not set up"; + } + return tensorflow::io::JoinPath( + + env, workspace + ); +} + +} // namespace test +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/core/test/generic.h b/syntaxnet/dragnn/core/test/generic.h new file mode 100644 index 0000000000000000000000000000000000000000..5ec5dd61211ea8538168e9c14f37e5c26c551990 --- /dev/null +++ b/syntaxnet/dragnn/core/test/generic.h @@ -0,0 +1,25 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_TEST_GENERIC_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_TEST_GENERIC_H_ + +#include + +#include + +#include "syntaxnet/base.h" +#include "tensorflow/core/platform/protobuf.h" +#include "tensorflow/core/platform/test.h" + +namespace syntaxnet { +namespace test { + +MATCHER_P(EqualsProto, a, "Protos are not equivalent:") { + return a.DebugString() == arg.DebugString(); +} + +// Returns the prefix for where the test data is stored. +string GetTestDataPrefix(); + +} // namespace test +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_TEST_GENERIC_H_ diff --git a/syntaxnet/dragnn/core/test/mock_component.h b/syntaxnet/dragnn/core/test/mock_component.h new file mode 100644 index 0000000000000000000000000000000000000000..28e3e82e42d1c338c0d01321ba73afd6c4e7f681 --- /dev/null +++ b/syntaxnet/dragnn/core/test/mock_component.h @@ -0,0 +1,63 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_TEST_MOCK_COMPONENT_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_TEST_MOCK_COMPONENT_H_ + +#include + +#include "dragnn/components/util/bulk_feature_extractor.h" +#include "dragnn/core/index_translator.h" +#include "dragnn/core/interfaces/component.h" +#include "dragnn/core/interfaces/transition_state.h" +#include "dragnn/protos/data.pb.h" +#include "dragnn/protos/spec.pb.h" +#include "syntaxnet/base.h" +#include "tensorflow/core/platform/test.h" + +namespace syntaxnet { +namespace dragnn { + +class MockComponent : public Component { + public: + MOCK_METHOD1(InitializeComponent, void(const ComponentSpec &spec)); + MOCK_METHOD3( + InitializeData, + void(const std::vector> &states, + int max_beam_size, InputBatchCache *input_data)); + MOCK_CONST_METHOD0(IsReady, bool()); + MOCK_METHOD0(InitializeTracing, void()); + MOCK_METHOD0(DisableTracing, void()); + MOCK_CONST_METHOD0(Name, string()); + MOCK_CONST_METHOD0(BatchSize, int()); + MOCK_CONST_METHOD0(BeamSize, int()); + MOCK_CONST_METHOD1(StepsTaken, int(int batch_index)); + MOCK_CONST_METHOD3(GetBeamIndexAtStep, + int(int step, int current_index, int batch)); + MOCK_CONST_METHOD2(GetSourceBeamIndex, int(int current_index, int batch)); + MOCK_METHOD2(AdvanceFromPrediction, + void(const float transition_matrix[], int matrix_length)); + MOCK_METHOD0(AdvanceFromOracle, void()); + MOCK_CONST_METHOD0(IsTerminal, bool()); + MOCK_METHOD0(GetBeam, std::vector>()); + MOCK_CONST_METHOD4(GetFixedFeatures, + int(std::function allocate_indices, + std::function allocate_ids, + std::function allocate_weights, + int channel_id)); + MOCK_METHOD1(BulkGetFixedFeatures, + int(const BulkFeatureExtractor &extractor)); + MOCK_CONST_METHOD1(GetRawLinkFeatures, + std::vector(int channel_id)); + MOCK_CONST_METHOD0(GetOracleLabels, std::vector>()); + MOCK_METHOD0(ResetComponent, void()); + MOCK_METHOD1(GetStepLookupFunction, + std::function(const string &method)); + MOCK_METHOD0(FinalizeData, void()); + MOCK_CONST_METHOD0(GetTraceProtos, + std::vector>()); + MOCK_METHOD2(AddTranslatedLinkFeaturesToTrace, + void(const std::vector &features, int channel_id)); +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_TEST_MOCK_COMPONENT_H_ diff --git a/syntaxnet/dragnn/core/test/mock_compute_session.h b/syntaxnet/dragnn/core/test/mock_compute_session.h new file mode 100644 index 0000000000000000000000000000000000000000..8cc849770c40640c58769bf9d8641ede55c55d4d --- /dev/null +++ b/syntaxnet/dragnn/core/test/mock_compute_session.h @@ -0,0 +1,61 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_TEST_MOCK_COMPUTE_SESSION_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_TEST_MOCK_COMPUTE_SESSION_H_ + +#include + +#include "dragnn/components/util/bulk_feature_extractor.h" +#include "dragnn/core/compute_session.h" +#include "dragnn/protos/data.pb.h" +#include "dragnn/protos/spec.pb.h" +#include "syntaxnet/base.h" +#include "tensorflow/core/platform/test.h" + +namespace syntaxnet { +namespace dragnn { + +class MockComputeSession : public ComputeSession { + public: + MOCK_METHOD2(Init, void(const MasterSpec &master_spec, + const GridPoint &hyperparams)); + MOCK_METHOD2(InitializeComponentData, + void(const string &component_name, int max_beam_size)); + MOCK_CONST_METHOD1(BatchSize, int(const string &component_name)); + MOCK_CONST_METHOD1(BeamSize, int(const string &component_name)); + MOCK_CONST_METHOD1(Spec, const ComponentSpec &(const string &component_name)); + MOCK_METHOD2(SourceComponentBeamSize, + int(const string &component_name, int channel_id)); + MOCK_METHOD1(AdvanceFromOracle, void(const string &component_name)); + MOCK_METHOD3(AdvanceFromPrediction, + void(const string &component_name, const float score_matrix[], + int score_matrix_length)); + MOCK_CONST_METHOD5(GetInputFeatures, + int(const string &component_name, + std::function allocate_indices, + std::function allocate_ids, + std::function allocate_weights, + int channel_id)); + MOCK_METHOD2(BulkGetInputFeatures, + int(const string &component_name, + const BulkFeatureExtractor &extractor)); + MOCK_METHOD2(GetTranslatedLinkFeatures, + std::vector(const string &component_name, + int channel_id)); + MOCK_METHOD1(EmitOracleLabels, + std::vector>(const string &component_name)); + MOCK_METHOD1(IsTerminal, bool(const string &component_name)); + MOCK_METHOD1(FinalizeData, void(const string &component_name)); + MOCK_METHOD0(GetSerializedPredictions, std::vector()); + MOCK_METHOD0(GetTraceProtos, std::vector()); + MOCK_METHOD1(SetInputData, void(const std::vector &data)); + MOCK_METHOD0(ResetSession, void()); + MOCK_METHOD1(SetTracing, void(bool tracing_on)); + MOCK_CONST_METHOD0(Id, int()); + MOCK_CONST_METHOD1(GetDescription, string(const string &component_name)); + MOCK_CONST_METHOD1(Translators, const std::vector( + const string &component_name)); +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_TEST_MOCK_COMPUTE_SESSION_H_ diff --git a/syntaxnet/dragnn/core/test/mock_transition_state.h b/syntaxnet/dragnn/core/test/mock_transition_state.h new file mode 100644 index 0000000000000000000000000000000000000000..43690891f297b413a8c975c7ec6762b2db932a9a --- /dev/null +++ b/syntaxnet/dragnn/core/test/mock_transition_state.h @@ -0,0 +1,30 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_CORE_TEST_MOCK_TRANSITION_STATE_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_CORE_TEST_MOCK_TRANSITION_STATE_H_ + +#include + +#include + +#include "dragnn/core/interfaces/transition_state.h" +#include "syntaxnet/base.h" +#include "tensorflow/core/platform/test.h" + +namespace syntaxnet { +namespace dragnn { + +class MockTransitionState : public TransitionState { + public: + MOCK_METHOD1(Init, void(const TransitionState &parent)); + MOCK_CONST_METHOD0(Clone, std::unique_ptr()); + MOCK_CONST_METHOD0(ParentBeamIndex, const int()); + MOCK_METHOD1(SetBeamIndex, void(const int index)); + MOCK_CONST_METHOD0(GetBeamIndex, const int()); + MOCK_CONST_METHOD0(GetScore, const float()); + MOCK_METHOD1(SetScore, void(const float score)); + MOCK_CONST_METHOD0(HTMLRepresentation, string()); +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_CORE_TEST_MOCK_TRANSITION_STATE_H_ diff --git a/syntaxnet/dragnn/core/testdata/brain-parser-model b/syntaxnet/dragnn/core/testdata/brain-parser-model new file mode 100644 index 0000000000000000000000000000000000000000..0ad278116f27a30400b9add779f99ba848fce803 Binary files /dev/null and b/syntaxnet/dragnn/core/testdata/brain-parser-model differ diff --git a/syntaxnet/dragnn/core/testdata/master_spec_link.textproto b/syntaxnet/dragnn/core/testdata/master_spec_link.textproto new file mode 100644 index 0000000000000000000000000000000000000000..aea09690c5f6c9f31684a87909167838fa03da02 --- /dev/null +++ b/syntaxnet/dragnn/core/testdata/master_spec_link.textproto @@ -0,0 +1,86 @@ +component { + name: "parser" + num_actions: 100 + transition_system { + registered_name: "arc-standard" + parameters { + key: "entity_name_tokenizer" + value: "pre-tokenized" + } + parameters { + key: "language" + value: "en" + } + parameters { + key: "neurosis_feature_syntax_version" + value: "2" + } + parameters { + key: "parser_skip_deterministic" + value: "false" + } + parameters { + key: "parser_transition_system" + value: "arc-standard" + } + } + resource { + name: "tag-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.tag-map" + file_format: "text" + } + } + resource { + name: "word-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.word-map" + file_format: "text" + } + } + resource { + name: "label-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.label-map" + file_format: "text" + } + } + fixed_feature { + name: "tags" + fml: "input.tag stack.tag stack(1).tag" + embedding_dim: 32 + vocabulary_size: 51 + size: 3 + predicate_map: "hashed" + } + fixed_feature { + name: "words" + fml: "input.word" + embedding_dim: 64 + vocabulary_size: 39396 + size: 1 + predicate_map: "hashed" + } + linked_feature { + name: "rnn" + fml: "stack.focus" + embedding_dim: 32 + size: 1 + source_component: "parser" + source_translator: "shift-reduce-step" + source_layer: "layer_0" + } + backend { + registered_name: 'SyntaxNetComponent' + } + component_builder { + registered_name: 'DynamicComponentBuilder' + } + network_unit { + registered_name: 'FeedForwardNetwork' + parameters { + key: 'hidden_layer_sizes' + value: '64' + } + } +} diff --git a/syntaxnet/dragnn/core/testdata/repository b/syntaxnet/dragnn/core/testdata/repository new file mode 100644 index 0000000000000000000000000000000000000000..1bb1180529fce623cdc07a704372318fe7ba9efb Binary files /dev/null and b/syntaxnet/dragnn/core/testdata/repository differ diff --git a/syntaxnet/dragnn/core/testdata/simple-tagger.brain-parser-model b/syntaxnet/dragnn/core/testdata/simple-tagger.brain-parser-model new file mode 100644 index 0000000000000000000000000000000000000000..8fa7dfca794eeb6c1584a25f87ad2b1fb2f9b34e Binary files /dev/null and b/syntaxnet/dragnn/core/testdata/simple-tagger.brain-parser-model differ diff --git a/syntaxnet/dragnn/core/testdata/simple-tagger.repository b/syntaxnet/dragnn/core/testdata/simple-tagger.repository new file mode 100644 index 0000000000000000000000000000000000000000..b3cc9e04b07d009b0faff002d8c02c4ca3a84449 Binary files /dev/null and b/syntaxnet/dragnn/core/testdata/simple-tagger.repository differ diff --git a/syntaxnet/dragnn/core/testdata/simple-tagger.tag-map b/syntaxnet/dragnn/core/testdata/simple-tagger.tag-map new file mode 100644 index 0000000000000000000000000000000000000000..8fac8c5c6f1a02cb6a1ec3b40fdec276e56a1f3b --- /dev/null +++ b/syntaxnet/dragnn/core/testdata/simple-tagger.tag-map @@ -0,0 +1,46 @@ +45 +NN 132998 +IN 98554 +NNP 91466 +DT 81832 +JJ 61217 +NNS 59856 +, 48727 +. 39478 +CD 36568 +RB 30907 +VBD 29889 +VB 26438 +CC 23959 +TO 22357 +VBZ 21672 +VBN 20024 +PRP 17436 +VBG 14846 +VBP 12491 +MD 9803 +POS 8701 +PRP$ 8407 +$ 7372 +`` 7092 +'' 6919 +: 4772 +WDT 4294 +JJR 3238 +NNPS 2673 +RP 2662 +WP 2363 +WRB 2143 +JJS 1947 +RBR 1768 +-RRB- 1376 +-LRB- 1366 +EX 863 +RBS 451 +PDT 368 +FW 234 +WP$ 168 +# 142 +UH 97 +SYM 58 +LS 36 diff --git a/syntaxnet/dragnn/core/testdata/simple_parser_master_spec.textproto b/syntaxnet/dragnn/core/testdata/simple_parser_master_spec.textproto new file mode 100644 index 0000000000000000000000000000000000000000..2f98236c206345e6eaa53f0967a5f412e1de04f7 --- /dev/null +++ b/syntaxnet/dragnn/core/testdata/simple_parser_master_spec.textproto @@ -0,0 +1,59 @@ +component { + name: "parser" + num_actions : 93 + transition_system { + registered_name: "arc-standard" + } + resource { + name: "tag-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.tag-map" + file_format: "text" + } + } + resource { + name: "word-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.word-map" + file_format: "text" + } + } + resource { + name: "label-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.label-map" + file_format: "text" + } + } + fixed_feature { + name: "words" + fml: "input(-1).word input(-2).word input(-3).word input.word input(1).word input(2).word input(3).word" + embedding_dim: 64 + vocabulary_size: 39397 + size: 7 + } + linked_feature { + name: "rnn" + fml: "stack.focus stack(1).focus" + embedding_dim: 32 + size: 2 + source_component: "parser" + source_translator: "shift-reduce-step" + source_layer: "layer_0" + } + backend { + registered_name: "SyntaxNetComponent" + } + component_builder { + registered_name: 'DynamicComponentBuilder' + } + network_unit { + registered_name: 'FeedForwardNetwork' + parameters { + key: 'hidden_layer_sizes' + value: '64' + } + } + + inference_beam_size: 4 +} diff --git a/syntaxnet/dragnn/core/testdata/simple_tagger_lstm_master_spec.textproto b/syntaxnet/dragnn/core/testdata/simple_tagger_lstm_master_spec.textproto new file mode 100644 index 0000000000000000000000000000000000000000..a418f7e2d7f675d99f8d39a6a5dc4dd5d43d5c3e --- /dev/null +++ b/syntaxnet/dragnn/core/testdata/simple_tagger_lstm_master_spec.textproto @@ -0,0 +1,52 @@ +component { + name: "tagger" + num_actions : 49 + transition_system { + registered_name: "tagger" + parameters { + key: "join_category_to_pos" + value: "true" + } + } + resource { + name: "tag-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.tag-map" + file_format: "text" + } + } + resource { + name: "word-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.word-map" + file_format: "text" + } + } + resource { + name: "label-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.label-map" + file_format: "text" + } + } + fixed_feature { + name: "words" + fml: "input(-1).word input(-2).word input(-3).word input.word input(1).word input(2).word input(3).word" + embedding_dim: 64 + vocabulary_size: 39397 + size: 7 + } + backend { + registered_name: "SyntaxNetComponent" + } + component_builder { + registered_name: "DynamicComponentBuilder" + } + network_unit { + registered_name: "LSTMNetwork" + parameters { + key: "hidden_layer_sizes" + value: "64" + } + } +} diff --git a/syntaxnet/dragnn/core/testdata/simple_tagger_master_spec.textproto b/syntaxnet/dragnn/core/testdata/simple_tagger_master_spec.textproto new file mode 100644 index 0000000000000000000000000000000000000000..502286335a58794d569a9313b87feeea6b37b37a --- /dev/null +++ b/syntaxnet/dragnn/core/testdata/simple_tagger_master_spec.textproto @@ -0,0 +1,63 @@ +component { + name: "tagger" + num_actions : 49 + transition_system { + registered_name: "tagger" + parameters { + key: "join_category_to_pos" + value: "true" + } + } + resource { + name: "tag-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.tag-map" + file_format: "text" + } + } + resource { + name: "word-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.word-map" + file_format: "text" + } + } + resource { + name: "label-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.label-map" + file_format: "text" + } + } + fixed_feature { + name: "words" + fml: "input(-1).word input(-2).word input(-3).word input.word input(1).word input(2).word input(3).word" + embedding_dim: 64 + vocabulary_size: 39397 + size: 7 + } + linked_feature { + name: "rnn" + fml: "stack.focus" + embedding_dim: 32 + size: 1 + source_component: "tagger" + source_translator: "shift-reduce-step" + source_layer: "layer_0" + } + backend { + registered_name: "SyntaxNetComponent" + } + component_builder { + registered_name: "DynamicComponentBuilder" + } + network_unit { + registered_name: "FeedForwardNetwork" + parameters { + key: "hidden_layer_sizes" + value: "64" + } + } + training_beam_size: 1 + inference_beam_size: 3 +} diff --git a/syntaxnet/dragnn/core/testdata/simple_tagger_wrapped_lstm_master_spec.textproto b/syntaxnet/dragnn/core/testdata/simple_tagger_wrapped_lstm_master_spec.textproto new file mode 100644 index 0000000000000000000000000000000000000000..24d9e35abd4936f96709f2a5e808462d9facdaa8 --- /dev/null +++ b/syntaxnet/dragnn/core/testdata/simple_tagger_wrapped_lstm_master_spec.textproto @@ -0,0 +1,65 @@ +component { + name: "tagger" + num_actions : 49 + transition_system { + registered_name: "tagger" + parameters { + key: "join_category_to_pos" + value: "true" + } + } + resource { + name: "tag-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.tag-map" + file_format: "text" + } + } + resource { + name: "word-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.word-map" + file_format: "text" + } + } + resource { + name: "label-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.label-map" + file_format: "text" + } + } + fixed_feature { + name: "words" + fml: "input(-1).word input(-2).word input(-3).word input.word input(1).word input(2).word input(3).word" + embedding_dim: 64 + vocabulary_size: 39397 + size: 7 + predicate_map: "hashed" + } + backend { + registered_name: "SyntaxNetComponent" + } + component_builder { + registered_name: "DynamicComponentBuilder" + } + network_unit { + registered_name: "wrapped_units.LayerNormBasicLSTMNetwork" + parameters { + key: "hidden_layer_sizes" + value: "64,64,64" + } + parameters { + key: "input_dropout_rate" + value: "0.9" + } + parameters { + key: "recurrent_dropout_rate" + value: "0.9" + } + parameters { + key: "layer_norm" + value: "true" + } + } +} diff --git a/syntaxnet/dragnn/core/testdata/split_tagger_master_spec.textproto b/syntaxnet/dragnn/core/testdata/split_tagger_master_spec.textproto new file mode 100644 index 0000000000000000000000000000000000000000..388405c744ca2eacc97a036570f4e4d3ebef5e45 --- /dev/null +++ b/syntaxnet/dragnn/core/testdata/split_tagger_master_spec.textproto @@ -0,0 +1,111 @@ +component { + name: "tagger-features" + num_actions : 49 + transition_system { + registered_name: "tagger" + parameters { + key: "join_category_to_pos" + value: "true" + } + } + resource { + name: "tag-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.tag-map" + file_format: "text" + } + } + resource { + name: "word-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.word-map" + file_format: "text" + } + } + resource { + name: "label-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.label-map" + file_format: "text" + } + } + fixed_feature { + name: "words" + fml: "input(-1).word input(-2).word input(-3).word input.word input(1).word input(2).word input(3).word" + embedding_dim: 64 + vocabulary_size: 39397 + size: 7 + } + linked_feature { + name: "rnn" + fml: "stack.focus" + embedding_dim: 32 + size: 1 + source_component: "tagger-features" + source_translator: "shift-reduce-step" + source_layer: "layer_0" + } + backend { + registered_name: "SyntaxNetComponent" + } + component_builder { + registered_name: "DynamicComponentBuilder" + } + network_unit { + registered_name: "FeedForwardNetwork" + parameters { + key: "hidden_layer_sizes" + value: "64" + } + } +} +component { + name: "tagger" + num_actions : 49 + transition_system { + registered_name: "tagger" + parameters { + key: "join_category_to_pos" + value: "true" + } + } + resource { + name: "tag-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.tag-map" + file_format: "text" + } + } + resource { + name: "word-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.word-map" + file_format: "text" + } + } + resource { + name: "label-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.label-map" + file_format: "text" + } + } + linked_feature { + name: "features" + fml: "input.focus" + embedding_dim: -1 + size: 1 + source_component: "tagger-features" + source_translator: "identity" + source_layer: "logits" + } + backend { + registered_name: "SyntaxNetComponent" + } + component_builder { + registered_name: "DynamicComponentBuilder" + } + network_unit { + registered_name: "IdentityNetwork" + } +} diff --git a/syntaxnet/dragnn/core/testdata/syntaxnet_tagger.label-map b/syntaxnet/dragnn/core/testdata/syntaxnet_tagger.label-map new file mode 100644 index 0000000000000000000000000000000000000000..8fdd1fc86d9f33e2e639d794bb2b719a0767bc75 --- /dev/null +++ b/syntaxnet/dragnn/core/testdata/syntaxnet_tagger.label-map @@ -0,0 +1,47 @@ +46 +punct 243160 +prep 194627 +pobj 186958 +det 170592 +nsubj 144821 +nn 144800 +amod 117242 +ROOT 90592 +dobj 88551 +aux 76523 +advmod 72893 +conj 59384 +cc 57532 +num 36350 +poss 35117 +dep 34986 +ccomp 29470 +cop 25991 +mark 25141 +xcomp 25111 +rcmod 16234 +auxpass 15740 +advcl 14996 +possessive 14866 +nsubjpass 14133 +pcomp 12488 +appos 11112 +partmod 11106 +neg 11090 +number 10658 +prt 7123 +quantmod 6653 +tmod 5418 +infmod 5134 +npadvmod 3213 +parataxis 3012 +mwe 2793 +expl 2712 +iobj 1642 +acomp 1632 +discourse 1381 +csubj 1225 +predet 1160 +preconj 749 +goeswith 146 +csubjpass 41 diff --git a/syntaxnet/dragnn/core/testdata/syntaxnet_tagger.tag-map b/syntaxnet/dragnn/core/testdata/syntaxnet_tagger.tag-map new file mode 100644 index 0000000000000000000000000000000000000000..2cad1a73b010ace29854dc80296c79728e9b3c52 --- /dev/null +++ b/syntaxnet/dragnn/core/testdata/syntaxnet_tagger.tag-map @@ -0,0 +1,50 @@ +49 +NN 285194 +IN 228165 +DT 179147 +NNP 175147 +JJ 125667 +NNS 115732 +, 97481 +. 85938 +RB 78513 +VB 63952 +CC 57554 +VBD 56635 +CD 55674 +PRP 55244 +VBZ 48126 +VBN 44458 +VBG 34524 +VBP 33669 +TO 28772 +MD 22364 +PRP$ 20706 +HYPH 18526 +POS 14905 +`` 12193 +'' 12154 +WDT 10267 +: 8713 +$ 7993 +WP 7336 +RP 7335 +WRB 6634 +JJR 6295 +NNPS 5917 +-RRB- 3904 +-LRB- 3840 +JJS 3596 +RBR 3186 +EX 2733 +UH 1521 +RBS 1467 +PDT 1271 +FW 928 +NFP 844 +SYM 652 +ADD 476 +LS 392 +WP$ 332 +GW 184 +AFX 42 diff --git a/syntaxnet/dragnn/core/testdata/syntaxnet_tagger.word-map b/syntaxnet/dragnn/core/testdata/syntaxnet_tagger.word-map new file mode 100644 index 0000000000000000000000000000000000000000..4b9e22b6f7ddd35776803739392c31328612f2b7 --- /dev/null +++ b/syntaxnet/dragnn/core/testdata/syntaxnet_tagger.word-map @@ -0,0 +1,4 @@ +3 +sentence 4 +. 3 +0 2 diff --git a/syntaxnet/dragnn/core/testdata/tagger_parser_master_spec.textproto b/syntaxnet/dragnn/core/testdata/tagger_parser_master_spec.textproto new file mode 100644 index 0000000000000000000000000000000000000000..5a60df0044c3f8cbf395f19742690b117af86280 --- /dev/null +++ b/syntaxnet/dragnn/core/testdata/tagger_parser_master_spec.textproto @@ -0,0 +1,185 @@ +component { + name: "features" + num_actions : 1 + transition_system { + registered_name: "shift-only" + } + resource { + name: "tag-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.tag-map" + file_format: "text" + } + } + resource { + name: "word-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.word-map" + file_format: "text" + } + } + resource { + name: "label-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.label-map" + file_format: "text" + } + } + fixed_feature { + name: "words" + fml: "input(-1).word input(-2).word input(-3).word input.word input(1).word input(2).word input(3).word" + embedding_dim: 64 + vocabulary_size: 39397 + size: 7 + } + backend { + registered_name: "SyntaxNetComponent" + } + component_builder { + registered_name: "DynamicComponentBuilder" + } + network_unit { + registered_name: "IdentityNetwork" + } + inference_beam_size: 1 +} +component { + name: "tagger" + num_actions : 49 + transition_system { + registered_name: "tagger" + parameters { + key: "join_category_to_pos" + value: "true" + } + } + resource { + name: "tag-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.tag-map" + file_format: "text" + } + } + resource { + name: "word-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.word-map" + file_format: "text" + } + } + resource { + name: "label-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.label-map" + file_format: "text" + } + } + linked_feature { + name: "words" + fml: "input.focus" + embedding_dim: -1 + size: 1 + source_component: "features" + source_translator: "identity" + source_layer: "input_embeddings" + } + linked_feature { + name: "rnn" + fml: "stack.focus" + embedding_dim: 32 + size: 1 + source_component: "tagger" + source_translator: "shift-reduce-step" + source_layer: "layer_0" + } + backend { + registered_name: "SyntaxNetComponent" + } + component_builder { + registered_name: "DynamicComponentBuilder" + } + network_unit { + registered_name: "FeedForwardNetwork" + parameters { + key: "hidden_layer_sizes" + value: "64" + } + } + inference_beam_size: 1 +} +component { + name: "parser" + num_actions : 93 + transition_system { + registered_name: "arc-standard" + } + resource { + name: "tag-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.tag-map" + file_format: "text" + } + } + resource { + name: "word-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.word-map" + file_format: "text" + } + } + resource { + name: "label-map" + part { + file_pattern: "TESTDATA/syntaxnet_tagger.label-map" + file_format: "text" + } + } + fixed_feature { + name: "action" + fml: "last-action" + embedding_dim: 32 + vocabulary_size: 93 + size: 1 + } + linked_feature { + name: "words" + fml: "input.focus" + embedding_dim: -1 + size: 1 + source_component: "features" + source_translator: "identity" + source_layer: "input_embeddings" + } + linked_feature { + name: "tagger" + fml: "stack.focus" + embedding_dim: 32 + size: 1 + source_component: "tagger" + source_translator: "identity" + source_layer: "layer_0" + } + linked_feature { + name: "rnn" + fml: "stack.focus" + embedding_dim: 32 + size: 1 + source_component: "parser" + source_translator: "shift-reduce-step" + source_layer: "layer_0" + } + backend { + registered_name: "SyntaxNetComponent" + } + component_builder { + registered_name: "DynamicComponentBuilder" + } + network_unit { + registered_name: "FeedForwardNetwork" + parameters { + key: "hidden_layer_sizes" + value: "64" + } + } + inference_beam_size: 5 +} diff --git a/syntaxnet/dragnn/core/testdata/ud-hungarian.master-spec b/syntaxnet/dragnn/core/testdata/ud-hungarian.master-spec new file mode 100644 index 0000000000000000000000000000000000000000..9090c225974211b598c3e941db53c8965c0e2bf8 --- /dev/null +++ b/syntaxnet/dragnn/core/testdata/ud-hungarian.master-spec @@ -0,0 +1,213 @@ +component { + name: "rl_rnn" + transition_system { + registered_name: "shift-only" + parameters { + key: "left-to-right" + value: "false" + } + parameters { + key: "parser_skip_deterministic" + value: "false" + } + } + resource { + name: "char-ngram-map" + part { + file_pattern: "TOPDIR/ud-hungarian.char-ngram-map" + file_format: "text" + record_format: "" + } + } + resource { + name: "word-map" + part { + file_pattern: "TOPDIR/ud-hungarian.word-map" + file_format: "text" + record_format: "" + } + } + resource { + name: "label-map" + part { + file_pattern: "TOPDIR/ud-hungarian.label-map" + file_format: "text" + record_format: "" + } + } + fixed_feature { + name: "char_ngram" + fml: "input.token.char-ngram" + embedding_dim: 16 + vocabulary_size: 9943 + size: 1 + } + fixed_feature { + name: "other" + fml: "input.token {digit hyphen punctuation-amount quote }" + embedding_dim: 8 + vocabulary_size: 5 + size: 4 + } + fixed_feature { + name: "words" + fml: "input.word" + embedding_dim: 64 + vocabulary_size: 11090 + size: 1 + } + network_unit { + registered_name: "wrapped_units.LayerNormBasicLSTMNetwork" + parameters { + key: "hidden_layer_sizes" + value: "256" + } + } + component_builder { + registered_name: 'DynamicComponentBuilder' + } + backend { + registered_name: "SyntaxNetComponent" + } + num_actions: 1 + attention_component: "" +} +component { + name: "tagger" + transition_system { + registered_name: "tagger" + parameters { + key: "join_category_to_pos" + value: "true" + } + parameters { + key: "parser_skip_deterministic" + value: "false" + } + } + resource { + name: "tag-map" + part { + file_pattern: "TOPDIR/ud-hungarian.tag-map" + file_format: "text" + record_format: "" + } + } + resource { + name: "label-map" + part { + file_pattern: "TOPDIR/ud-hungarian.label-map" + file_format: "text" + record_format: "" + } + } + fixed_feature { + name: "action" + fml: "last-action" + embedding_dim: 32 + vocabulary_size: 100 + size: 1 + } + linked_feature { + name: "encoder" + fml: "input.focus" + embedding_dim: 64 + size: 1 + source_component: "rl_rnn" + source_translator: "reverse-token" + source_layer: "state_h_0" + } + network_unit { + registered_name: "wrapped_units.LayerNormBasicLSTMNetwork" + parameters { + key: "hidden_layer_sizes" + value: "256" + } + } + component_builder { + registered_name: 'DynamicComponentBuilder' + } + backend { + registered_name: "SyntaxNetComponent" + } + num_actions: 642 + attention_component: "" +} +component { + name: "parser" + transition_system { + registered_name: "arc-standard" + parameters { + key: "parser_skip_deterministic" + value: "false" + } + } + resource { + name: "label-map" + part { + file_pattern: "TOPDIR/ud-hungarian.label-map" + file_format: "text" + record_format: "" + } + } + fixed_feature { + name: "action" + fml: "last-action" + embedding_dim: 32 + vocabulary_size: 100 + size: 1 + } + fixed_feature { + name: "labels" + fml: "stack.child(1).label stack.child(1).sibling(-1).label stack.child(-1).label stack.child(-1).sibling(1).label stack(1).child(1).label stack(1).child(1).sibling(-1).label stack(1).child(-1).label stack(1).child(-1).sibling(1).label stack.child(2).label stack.child(-2).label stack(1).child(2).label stack(1).child(-2).label" + embedding_dim: 16 + vocabulary_size: 57 + size: 12 + } + linked_feature { + name: "encoder" + fml: "input.focus" + embedding_dim: 64 + size: 1 + source_component: "rl_rnn" + source_translator: "reverse-token" + source_layer: "state_h_0" + } + linked_feature { + name: "parser-rnn" + fml: "stack.focus stack(1).focus" + embedding_dim: 64 + size: 2 + source_component: "parser" + source_translator: "shift-reduce-step" + source_layer: "layer_0" + } + linked_feature { + name: "tagger" + fml: "input.focus stack.focus stack(1).focus" + embedding_dim: 64 + size: 3 + source_component: "tagger" + source_translator: "identity" + source_layer: "state_h_0" + } + network_unit { + registered_name: 'FeedForwardNetwork' + parameters { + key: "hidden_layer_sizes" + value: "256,256" + } + parameters { + key: "layer_norm_hidden" + value: "True" + } + } + component_builder { + registered_name: 'DynamicComponentBuilder' + } + backend { + registered_name: "SyntaxNetComponent" + } + num_actions: 109 + attention_component: "" +} diff --git a/syntaxnet/dragnn/io/BUILD b/syntaxnet/dragnn/io/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..6a101679045fb366cd48217eaeecaec86b061c91 --- /dev/null +++ b/syntaxnet/dragnn/io/BUILD @@ -0,0 +1,34 @@ +package(default_visibility = ["//visibility:public"]) + +cc_library( + name = "sentence_input_batch", + srcs = ["sentence_input_batch.cc"], + hdrs = ["sentence_input_batch.h"], + deps = [ + ":syntaxnet_sentence", + "//dragnn/core/interfaces:input_batch", + "//syntaxnet:base", + "//syntaxnet:sentence_proto", + ], +) + +cc_library( + name = "syntaxnet_sentence", + hdrs = ["syntaxnet_sentence.h"], + deps = [ + "//syntaxnet:sentence_proto", + "//syntaxnet:workspace", + ], +) + +cc_test( + name = "sentence_input_batch_test", + srcs = ["sentence_input_batch_test.cc"], + deps = [ + ":sentence_input_batch", + "//dragnn/core/test:generic", + "//syntaxnet:sentence_proto", + "//syntaxnet:test_main", + "@org_tensorflow//tensorflow/core:test", + ], +) diff --git a/syntaxnet/dragnn/io/sentence_input_batch.cc b/syntaxnet/dragnn/io/sentence_input_batch.cc new file mode 100644 index 0000000000000000000000000000000000000000..ebd11fa8de532412c1c83e053ee42a5d31358088 --- /dev/null +++ b/syntaxnet/dragnn/io/sentence_input_batch.cc @@ -0,0 +1,31 @@ +#include "dragnn/io/sentence_input_batch.h" + +#include "syntaxnet/sentence.pb.h" + +namespace syntaxnet { +namespace dragnn { + +void SentenceInputBatch::SetData( + const std::vector &stringified_sentence_protos) { + for (const auto &stringified_proto : stringified_sentence_protos) { + std::unique_ptr sentence(new Sentence); + std::unique_ptr workspace_set(new WorkspaceSet); + CHECK(sentence->ParseFromString(stringified_proto)) + << "Unable to parse string input as syntaxnet.Sentence."; + SyntaxNetSentence aug_sentence(std::move(sentence), + std::move(workspace_set)); + data_.push_back(std::move(aug_sentence)); + } +} + +const std::vector SentenceInputBatch::GetSerializedData() const { + std::vector output_data; + output_data.resize(data_.size()); + for (int i = 0; i < data_.size(); ++i) { + data_[i].sentence()->SerializeToString(&(output_data[i])); + } + return output_data; +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/io/sentence_input_batch.h b/syntaxnet/dragnn/io/sentence_input_batch.h new file mode 100644 index 0000000000000000000000000000000000000000..757c5142b20b0d7d025b7d2261ab769a643cdcec --- /dev/null +++ b/syntaxnet/dragnn/io/sentence_input_batch.h @@ -0,0 +1,37 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_IO_SENTENCE_INPUT_BATCH_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_IO_SENTENCE_INPUT_BATCH_H_ + +#include +#include + +#include "dragnn/core/interfaces/input_batch.h" +#include "dragnn/io/syntaxnet_sentence.h" +#include "syntaxnet/base.h" + +namespace syntaxnet { +namespace dragnn { + +// Data accessor backed by a syntaxnet::Sentence object. +class SentenceInputBatch : public InputBatch { + public: + SentenceInputBatch() {} + + // Translates from a vector of stringified Sentence protos. + void SetData( + const std::vector &stringified_sentence_protos) override; + + // Translates to a vector of stringified Sentence protos. + const std::vector GetSerializedData() const override; + + // Get the underlying Sentences. + std::vector *data() { return &data_; } + + private: + // The backing Sentence protos. + std::vector data_; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_IO_SENTENCE_INPUT_BATCH_H_ diff --git a/syntaxnet/dragnn/io/sentence_input_batch_test.cc b/syntaxnet/dragnn/io/sentence_input_batch_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..a45b4c3b96043a5c53589c1ec00129309e74ef05 --- /dev/null +++ b/syntaxnet/dragnn/io/sentence_input_batch_test.cc @@ -0,0 +1,54 @@ +#include "dragnn/io/sentence_input_batch.h" + +#include "dragnn/core/test/generic.h" +#include "syntaxnet/sentence.pb.h" +#include "tensorflow/core/platform/test.h" + +namespace syntaxnet { +namespace dragnn { + +using syntaxnet::test::EqualsProto; + +TEST(SentenceInputBatchTest, ConvertsFromStringifiedProtos) { + // Create some distinct Sentence protos. + Sentence sentence_one; + sentence_one.set_docid("foo"); + Sentence sentence_two; + sentence_two.set_docid("bar"); + std::vector protos({sentence_one, sentence_two}); + + // Create stringified versions. + std::vector strings; + for (const auto &sentence : protos) { + string str; + sentence.SerializeToString(&str); + strings.push_back(str); + } + + // Create a SentenceInputBatch. The data inside it should match the protos. + SentenceInputBatch set; + set.SetData(strings); + auto converted_data = set.data(); + for (int i = 0; i < protos.size(); ++i) { + EXPECT_THAT(*(converted_data->at(i).sentence()), EqualsProto(protos.at(i))); + EXPECT_NE(converted_data->at(i).workspace(), nullptr); + } + + // Get the data back out. The strings should be identical. + auto output = set.GetSerializedData(); + EXPECT_EQ(output.size(), strings.size()); + EXPECT_NE(output.size(), 0); + for (int i = 0; i < output.size(); ++i) { + EXPECT_EQ(strings.at(i), output.at(i)); + } +} + +TEST(SentenceInputBatchTest, BadlyFormedProtosDie) { + // Create a input batch with malformed data. This should cause a CHECK fail. + SentenceInputBatch set; + EXPECT_DEATH(set.SetData({"BADLY FORMATTED DATA. SHOULD CAUSE A CHECK"}), + "Unable to parse string input"); +} + +} // namespace dragnn +} // namespace syntaxnet diff --git a/syntaxnet/dragnn/io/syntaxnet_sentence.h b/syntaxnet/dragnn/io/syntaxnet_sentence.h new file mode 100644 index 0000000000000000000000000000000000000000..cb967288d5302977dacb650c7f8dc6472c31303f --- /dev/null +++ b/syntaxnet/dragnn/io/syntaxnet_sentence.h @@ -0,0 +1,27 @@ +#ifndef NLP_SAFT_OPENSOURCE_DRAGNN_IO_SYNTAXNET_SENTENCE_H_ +#define NLP_SAFT_OPENSOURCE_DRAGNN_IO_SYNTAXNET_SENTENCE_H_ + +#include "syntaxnet/sentence.pb.h" +#include "syntaxnet/workspace.h" + +namespace syntaxnet { +namespace dragnn { + +class SyntaxNetSentence { + public: + SyntaxNetSentence(std::unique_ptr sentence, + std::unique_ptr workspace) + : sentence_(std::move(sentence)), workspace_(std::move(workspace)) {} + + Sentence *sentence() const { return sentence_.get(); } + WorkspaceSet *workspace() const { return workspace_.get(); } + + private: + std::unique_ptr sentence_; + std::unique_ptr workspace_; +}; + +} // namespace dragnn +} // namespace syntaxnet + +#endif // NLP_SAFT_OPENSOURCE_DRAGNN_IO_SYNTAXNET_SENTENCE_H_ diff --git a/syntaxnet/dragnn/protos/BUILD b/syntaxnet/dragnn/protos/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..1559232760bb8a70651667c514829daf98d827f0 --- /dev/null +++ b/syntaxnet/dragnn/protos/BUILD @@ -0,0 +1,43 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "//syntaxnet:syntaxnet.bzl", + "tf_proto_library", + "tf_proto_library_py", +) + +# Protos. + +tf_proto_library( + name = "data_proto", + srcs = ["data.proto"], +) + +tf_proto_library( + name = "trace_proto", + srcs = ["trace.proto"], + deps = [ + ":data_proto", + ], +) + +tf_proto_library( + name = "spec_proto", + srcs = ["spec.proto"], +) + +tf_proto_library_py( + name = "data_py_pb2", + srcs = ["data.proto"], +) + +tf_proto_library_py( + name = "trace_py_pb2", + srcs = ["trace.proto"], + deps = [":data_py_pb2"], +) + +tf_proto_library_py( + name = "spec_py_pb2", + srcs = ["spec.proto"], +) diff --git a/syntaxnet/dragnn/protos/data.proto b/syntaxnet/dragnn/protos/data.proto new file mode 100644 index 0000000000000000000000000000000000000000..222bfea5726ec29e5fc16852e4d78b774f37eae3 --- /dev/null +++ b/syntaxnet/dragnn/protos/data.proto @@ -0,0 +1,39 @@ +// DRAGNN data proto. See go/dragnn-design for more information. + +syntax = "proto2"; + +package syntaxnet.dragnn; + +// A fixed sparse bag of features in DRAGNN. The id, weight, and description +// fields are all aligned if present (ie, any of these that are non-empty should +// have the same # items). If weight is omitted, 1.0 is used. +// +// These features as interepreted as multiple firings of a single feature +// template: e.g., for a single focus word, a bag of ngrams. +message FixedFeatures { + repeated uint64 id = 1; + repeated float weight = 2; + + // string-valued description of each *feature value*. (Only used for + // debugging.) + repeated string value_name = 3; + + // string-valued name of feature. (Only used for debugging.) + optional string feature_name = 4; +} + +// A feature in DRAGNN thats link a component to another or a component to +// itself recurrently. If batch_idx or beam_idx are omitted, 0 is used. +message LinkFeatures { + // Index into the {step x batch x beam} activations workspace generated by + // the previous computation. + optional int64 batch_idx = 1; + optional int64 beam_idx = 2; + optional int64 step_idx = 3; + + // Values in the original feature space. This is ignored in TensorFlow. + optional int64 feature_value = 4; + + // string-valued name of feature. (Only used for debugging.) + optional string feature_name = 5; +} diff --git a/syntaxnet/dragnn/protos/spec.proto b/syntaxnet/dragnn/protos/spec.proto new file mode 100644 index 0000000000000000000000000000000000000000..5faac142c65f962dd28dcb8f2deb792fc3f74766 --- /dev/null +++ b/syntaxnet/dragnn/protos/spec.proto @@ -0,0 +1,278 @@ +// DRAGNN Configuration proto. See go/dragnn-design for more information. + +syntax = "proto2"; + +package syntaxnet.dragnn; + +// Proto to specify a set of DRAGNN components (transition systems) that are +// trained and evaluated jointly. Each component gets one ComponentSpec. +// +// The order of component is important: a component can only link to components +// that come before (for now.) +// NEXT ID: 6 +message MasterSpec { + repeated ComponentSpec component = 1; + + // DEPRECATED: Use the "batch_size" param of DragnnTensorFlowTrainer instead. + optional int32 deprecated_batch_size = 2 [default = 1, deprecated = true]; + + // DEPRECATED: Use ComponentSpec.*_beam_size instead. + optional int32 deprecated_beam_size = 3 [default = 1, deprecated = true]; + + // Whether to extract debug traces. + optional bool debug_tracing = 4 [default = false]; +} + +// Complete specification for a single task. +message ComponentSpec { + // Name for this component: this is used in linked features via the + // "source_component" field. + optional string name = 1; + + // TransitionSystem to use. + optional RegisteredModuleSpec transition_system = 2; + + // Resources that this component depends on. These are copied to TaskInputs + // when calling SAFT code. + repeated Resource resource = 3; + + // Feature space configurations. + repeated FixedFeatureChannel fixed_feature = 4; + repeated LinkedFeatureChannel linked_feature = 5; + + // Neural Network builder specification. + optional RegisteredModuleSpec network_unit = 6; + + // The registered C++ implementation of the dragnn::Component class; e.g. + // "SyntaxNetComponent". + optional RegisteredModuleSpec backend = 7; + + // Number of possible actions from every state. + optional int32 num_actions = 8; + + // Specify the name of the lower level component on which it has attention. + optional string attention_component = 9 [default = ""]; + + // Options for the ComponentBuilder. If this is empty, the regular + // tf.while_loop based builder is assumed. + optional RegisteredModuleSpec component_builder = 10; + + // Default max number of active states for beam training. + optional int32 training_beam_size = 11 [default = 1]; + + // Default max number of active states for beam inference. + optional int32 inference_beam_size = 12 [default = 1]; +} + +// Super generic container for any registered sub-piece of DRAGNN. +message RegisteredModuleSpec { + // Name of the registered class. + optional string registered_name = 1; + + // Parameters to set while initializing this system; these are copied to + // Parameters in a TaskSpec when calling SAFT code, or via kwargs in TF Python + // code. + map parameters = 2; +} + +// Fixed resources that will be converted into TaskInput's when calling SAFT +// code. +message Resource { + optional string name = 1; + repeated Part part = 2; +} + +// The Parts here should be more or less compatible with TaskInput. +message Part { + optional string file_pattern = 1; + optional string file_format = 2; + optional string record_format = 3; +} + +// ------------------------------------------------------------------------ +// Feature specifications. +// +// A *feature channel* is a named collection of feature templates that share an +// embedding matrix. Thus all features in the channel are assumed to use the +// same vocabulary: e.g., words, POS tags, hidden layer activations, etc. These +// are extracted, embedded, and then concatenated together as a group. + +// Specification for a feature channel that is a *fixed* function of the input. +// NEXT_ID: 10 +message FixedFeatureChannel { + // Interpretable name for this feature channel. NN builders might depend on + // this to determine how to hook different channels up internally. + optional string name = 1; + + // String describing the FML for this feature channel. + optional string fml = 2; + + // Size of parameters for this space: + + // Dimensions of embedding space, or -1 if the feature should not be embedded. + optional int32 embedding_dim = 3; + + // No. of possible values returned. + optional int32 vocabulary_size = 4; + + // No. of different feature templates in the channel, i.e. the # of features + // that will be concatenated but share the embedding for this channel. + optional int32 size = 5; + + // Whether the embeddings for this channel should be held constant at their + // pretrained values, instead of being trained. Pretrained embeddings are + // required when true. + optional bool is_constant = 9; + + // Resources for this space: + + // Predicate map for compacting feature values. + optional string predicate_map = 6; + + // Pointer to a pretrained embedding matrix for this feature set. + optional Resource pretrained_embedding_matrix = 7; + + // Vocab file, containing all vocabulary words one per line. + optional Resource vocab = 8; +} + +// Specification for a feature channel that *links* to component +// activations. Note that the "vocabulary" of these features is the activations +// that they are linked to, so it is determined by the other components in the +// spec. +message LinkedFeatureChannel { + // Interpretable name for this feature channel. NN builders might depend on + // this to determine how to hook different channels up internally. + optional string name = 1; + + // Feature function specification. Note: these should all be of type + // LinkedFeatureType. + optional string fml = 2; + + // Embedding dimension, or -1 if the link should not be embedded. + optional int32 embedding_dim = 3; + + // No. of different feature templates in the channel, i.e. the # of features + // that will be concatenated but share the embedding for this channel. + optional int32 size = 4; + + // Component to use for translation, e.g. "tagger" + optional string source_component = 5; + + // Translator target, e.g. "token" or "last_action", to translate raw feature + // values into indices. This must be interpretable by the Component referenced + // by source_component. + optional string source_translator = 6; + + // Layer that these features should connect to. + optional string source_layer = 7; +} + +// A vector of hyperparameter configurations to search over. +message TrainingGridSpec { + // Grid points to search over. + repeated GridPoint grid_point = 1; + + // Training targets to create in the graph builder stage. + repeated TrainTarget target = 2; +} + +// A hyperparameter configuration for a training run. +// NEXT ID: 22 +message GridPoint { + // Global learning rate initialization point. + optional double learning_rate = 1 [default = 0.1]; + + // Momentum coefficient when using MomentumOptimizer. + optional double momentum = 2 [default = 0.9]; + + // Decay rate and base for global learning rate decay. The learning rate is + // reduced by a factor of |decay_base| every |decay_steps|. + optional double decay_base = 16 [default = 0.96]; + optional int32 decay_steps = 3 [default = 1000]; + + // Whether to decay the learning rate in a "staircase" manner. If true, the + // rate is adjusted exactly once every |decay_steps|. Otherwise, the rate is + // adjusted in smaller increments on every step, such that the overall rate of + // decay is still |decay_base| every |decay_steps|. + optional bool decay_staircase = 17 [default = true]; + + // Random seed to initialize parameters. + optional int32 seed = 4 [default = 0]; + + // Specify the optimizer used in training, the default is MomentumOptimizer. + optional string learning_method = 7 [default = 'momentum']; + + // Whether or not to use a moving average of the weights in inference time. + optional bool use_moving_average = 8 [default = false]; + + // Rolling average update co-efficient. + optional double average_weight = 9 [default = 0.9999]; + + // The dropout *keep* probability rate used in the model. 1.0 = no dropout. + optional double dropout_rate = 10 [default = 1.0]; + + // The dropout *keep* probability rate for recurrent connections. If < 0.0, + // recurrent connections should use |dropout_rate| instead. 1.0 = no dropout. + optional double recurrent_dropout_rate = 20 [default = -1.0]; + + // Gradient clipping threshold, applied if greater than zero. A value in the + // range 1-20 seems to work well to prevent large learning rates from causing + // problems for updates at the start of training. + optional double gradient_clip_norm = 11 [default = 0.0]; + + // DEPRECATED: Use TrainTarget instead. + repeated double component_weights = 5; + repeated bool unroll_using_oracle = 6; + + // A spec for using multiple optimization methods. + message CompositeOptimizerSpec { + // First optimizer. + optional GridPoint method1 = 1; + + // Second optimizer. + optional GridPoint method2 = 2; + + // After this number of steps, switch from first to second. + optional int32 switch_after_steps = 3; + } + optional CompositeOptimizerSpec composite_optimizer_spec = 12; + + // Parameters for Adam training. + optional double adam_beta1 = 13 [default = 0.01]; + optional double adam_beta2 = 14 [default = 0.9999]; + optional double adam_eps = 15 [default = 1e-8]; + + // Coefficient for global L2 regularization. + optional double l2_regularization_coefficient = 18 [default = 1e-4]; + + // Coefficient for global self normalization regularization. + // A value of zero turns it off. + optional double self_norm_alpha = 19 [default = 0.0]; + + // Comma separated list of components to which self_norm_alpha + // should be restricted. If left empty, no filtering will take + // place. Typically a single component. + optional string self_norm_components_filter = 21; +} + +// Training target to be built into the graph. +message TrainTarget { + // Name for this target. This should be unique across all targets. + optional string name = 1; + + // Specify the weights for different components. This should be the same size + // as the number of components in the spec, or empty (defaults to equal + // weights). Weights are normalized across the components being trained to sum + // to one. + repeated double component_weights = 2; + + // Specify whether to train a component using supervised signal or not. This + // should be the same size as the number of components in the spec, or empty + // (defaults to all true). + repeated bool unroll_using_oracle = 3; + + // Maximum length of the pipeline to train. E.g. if max_index is 1, then only + // the first component will be trained via this target. + optional int32 max_index = 4 [default = -1]; +} diff --git a/syntaxnet/dragnn/protos/trace.proto b/syntaxnet/dragnn/protos/trace.proto new file mode 100644 index 0000000000000000000000000000000000000000..2da051fe93baca72dd8e0e17a80fa5e76986407e --- /dev/null +++ b/syntaxnet/dragnn/protos/trace.proto @@ -0,0 +1,78 @@ +syntax = "proto2"; + +import "dragnn/protos/data.proto"; + + +package syntaxnet.dragnn; + +// Describes single embedding "group", e.g., 'words', 'tags'. Each group shares +// an embedding space. +message FixedFeatureChannelTrace { + // string-valued name of the group, e.g., 'words'. + optional string name = 1; + + // The feature functions active in this embedding group. + repeated FixedFeatures value_trace = 2; +} + +// Trace for an entire linked feature channel. +message LinkedFeatureChannelTrace { + // Name of the embedding space. + optional string name = 1; + + // The component that this feature links to. + optional string source_component = 2; + + // The string-valued name of the translator function that maps a feature value + // to a step index. + optional string source_translator = 3; + + // The name of the layer that we are extracting from the identified step. + optional string source_layer = 4; + + // Individual features within this group. + repeated LinkFeatures value_trace = 5; +} + +// The trace for a single step of a single Component. +message ComponentStepTrace { + // A caption/description to describe this step. This should fit in a graphical + // node rendered to the screen. + optional string caption = 1; + + repeated FixedFeatureChannelTrace fixed_feature_trace = 2; + repeated LinkedFeatureChannelTrace linked_feature_trace = 3; + + // An *HTML-language* representation of the current state. + optional string html_representation = 4; + + // The scores for each potential decision. (The mapping from index to name is + // managed by the component.) + repeated double outcome_score = 5; + + // Set to true once the step is finished. (This allows us to open a step after + // each transition, without having to know if it will be used.) + optional bool step_finished = 6 [default = false]; +} + +// The traces for all steps for a single Component. +message ComponentTrace { + // Name of the component; should match the ComponentSpec. + optional string name = 1; + + // The steps that have been taken by this Component. + repeated ComponentStepTrace step_trace = 2; +} + +// The traces for all Components. +message MasterTrace { + repeated ComponentTrace component_trace = 1; +} + +// Main proto being used to trace parsing. +message DragnnTrace { + + // For each sentence, there is a sequence of state sets storing tracing + // information. + repeated MasterTrace master_trace = 1; +} diff --git a/syntaxnet/dragnn/python/BUILD b/syntaxnet/dragnn/python/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..d25aca907996bd0ea48f8e387ddd1536ed077182 --- /dev/null +++ b/syntaxnet/dragnn/python/BUILD @@ -0,0 +1,372 @@ +package(default_visibility = ["//visibility:public"]) + +cc_binary( + name = "dragnn_cc_impl.so", + linkopts = select({ + "//conditions:default": ["-lm"], + "@org_tensorflow//tensorflow:darwin": [], + }), + linkshared = 1, + linkstatic = 1, + deps = [ + "//dragnn/components/syntaxnet:syntaxnet_component", + "//dragnn/core:dragnn_bulk_ops_cc", + "//dragnn/core:dragnn_ops_cc", + ], +) + +py_library( + name = "load_dragnn_cc_impl_py", + srcs = ["load_dragnn_cc_impl.py"], + data = [":dragnn_cc_impl.so"], +) + +py_library( + name = "bulk_component", + srcs = [ + "bulk_component.py", + ], + deps = [ + ":dragnn_ops", + ":network_units", + "//syntaxnet/util:check", + "@org_tensorflow//tensorflow:tensorflow_py", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +py_library( + name = "components", + srcs = [ + "component.py", + ], + deps = [ + ":bulk_component", + ":dragnn_ops", + ":network_units", + "//syntaxnet/util:check", + "//syntaxnet/util:pyregistry", + "@org_tensorflow//tensorflow:tensorflow_py", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +py_library( + name = "composite_optimizer", + srcs = ["composite_optimizer.py"], + deps = [ + "@org_tensorflow//tensorflow:tensorflow_py", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +py_library( + name = "dragnn_ops", + srcs = ["dragnn_ops.py"], + deps = [], +) + +py_library( + name = "graph_builder", + srcs = ["graph_builder.py"], + deps = [ + ":biaffine_units", + ":components", + ":composite_optimizer", + ":dragnn_ops", + ":network_units", + ":wrapped_units", + "//dragnn/protos:spec_py_pb2", + "//syntaxnet/util:check", + "@org_tensorflow//tensorflow:tensorflow_py", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +py_library( + name = "network_units", + srcs = ["network_units.py"], + deps = [ + ":dragnn_ops", + "//syntaxnet/util:check", + "//syntaxnet/util:pyregistry", + "@org_tensorflow//tensorflow:tensorflow_py", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +py_library( + name = "render_parse_tree_graphviz", + srcs = ["render_parse_tree_graphviz.py"], + deps = [ + ], +) + +py_test( + name = "render_parse_tree_graphviz_test", + srcs = ["render_parse_tree_graphviz_test.py"], + deps = [ + ":render_parse_tree_graphviz", + "//syntaxnet:sentence_py_pb2", + "@org_tensorflow//tensorflow:tensorflow_py", + ], +) + +py_library( + name = "render_spec_with_graphviz", + srcs = ["render_spec_with_graphviz.py"], + deps = [ + "//dragnn/protos:spec_py_pb2", + ], +) + +py_test( + name = "render_spec_with_graphviz_test", + srcs = ["render_spec_with_graphviz_test.py"], + deps = [ + ":render_spec_with_graphviz", + ":spec_builder", + "@org_tensorflow//tensorflow:tensorflow_py", + ], +) + +py_library( + name = "sentence_io", + srcs = ["sentence_io.py"], + deps = [ + "//syntaxnet:parser_ops", + ], +) + +py_binary( + name = "visualization", + srcs = ["visualization.py"], + data = [ + "//dragnn/viz:viz-min-js-gz", + ], + deps = [ + "//dragnn/protos:trace_py_pb2", + ], +) + +py_test( + name = "visualization_test", + srcs = ["visualization_test.py"], + deps = [ + ":visualization", + "//dragnn/protos:trace_py_pb2", + "@org_tensorflow//tensorflow:tensorflow_py", + ], +) + +py_library( + name = "wrapped_units", + srcs = ["wrapped_units.py"], + deps = [ + ":network_units", + "//syntaxnet/util:check", + "@org_tensorflow//tensorflow:tensorflow_py", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +# Tests + +py_test( + name = "bulk_component_test", + srcs = [ + "bulk_component_test.py", + ], + deps = [ + ":bulk_component", + ":components", + ":dragnn_ops", + ":load_dragnn_cc_impl_py", + ":network_units", + "//dragnn/core:dragnn_bulk_ops", + "//dragnn/core:dragnn_ops", + "//dragnn/protos:spec_py_pb2", + "//syntaxnet:load_parser_ops_py", + "//syntaxnet:sentence_py_pb2", + "@org_tensorflow//tensorflow:tensorflow_py", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +py_test( + name = "composite_optimizer_test", + srcs = ["composite_optimizer_test.py"], + deps = [ + ":composite_optimizer", + ":load_dragnn_cc_impl_py", + "//dragnn/core:dragnn_bulk_ops", + "//dragnn/core:dragnn_ops", + "//syntaxnet:load_parser_ops_py", + "@org_tensorflow//tensorflow:tensorflow_py", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +py_test( + name = "graph_builder_test", + size = "large", + srcs = ["graph_builder_test.py"], + data = [ + "//dragnn/core:testdata", + ], + tags = [ + "notsan", + ], + deps = [ + ":dragnn_ops", + ":graph_builder", + ":load_dragnn_cc_impl_py", + "//dragnn/core:dragnn_bulk_ops", + "//dragnn/core:dragnn_ops", + "//dragnn/protos:spec_py_pb2", + "//dragnn/protos:trace_py_pb2", + "//syntaxnet:load_parser_ops_py", + "//syntaxnet:sentence_py_pb2", + "@org_tensorflow//tensorflow:tensorflow_py", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +py_test( + name = "network_units_test", + size = "small", + srcs = ["network_units_test.py"], + deps = [ + ":load_dragnn_cc_impl_py", + ":network_units", + "//dragnn/core:dragnn_bulk_ops", + "//dragnn/core:dragnn_ops", + "//dragnn/protos:spec_py_pb2", + "//syntaxnet:load_parser_ops_py", + "@org_tensorflow//tensorflow:tensorflow_py", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +py_test( + name = "sentence_io_test", + srcs = ["sentence_io_test.py"], + data = ["//syntaxnet:testdata"], + deps = [ + ":sentence_io", + "//syntaxnet:load_parser_ops_py", + "//syntaxnet:parser_ops", + "//syntaxnet:sentence_py_pb2", + "@org_tensorflow//tensorflow:tensorflow_py", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +py_library( + name = "trainer_lib", + srcs = ["trainer_lib.py"], + deps = [ + "//dragnn/protos:spec_py_pb2", + "//syntaxnet:parser_ops", + "//syntaxnet:sentence_py_pb2", + "//syntaxnet:task_spec_py_pb2", + "@org_tensorflow//tensorflow:tensorflow_py", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +py_library( + name = "lexicon", + srcs = ["lexicon.py"], + deps = [ + "//syntaxnet:parser_ops", + "//syntaxnet:task_spec_py_pb2", + "@org_tensorflow//tensorflow:tensorflow_py", + ], +) + +py_test( + name = "lexicon_test", + srcs = ["lexicon_test.py"], + deps = [ + ":lexicon", + "//syntaxnet:load_parser_ops_py", + "//syntaxnet:parser_ops", + "//syntaxnet:parser_trainer", + "@org_tensorflow//tensorflow:tensorflow_py", + ], +) + +py_library( + name = "evaluation", + srcs = ["evaluation.py"], + deps = [ + "//syntaxnet:sentence_py_pb2", + "//syntaxnet/util:check", + "@org_tensorflow//tensorflow:tensorflow_py", + ], +) + +py_test( + name = "evaluation_test", + srcs = ["evaluation_test.py"], + deps = [ + ":evaluation", + "//syntaxnet:sentence_py_pb2", + "@org_tensorflow//tensorflow:tensorflow_py", + ], +) + +py_library( + name = "spec_builder", + srcs = ["spec_builder.py"], + deps = [ + ":lexicon", + "//dragnn/protos:spec_py_pb2", + "//syntaxnet:parser_ops", + "//syntaxnet/util:check", + "@org_tensorflow//tensorflow:tensorflow_py", + ], +) + +py_test( + name = "spec_builder_test", + srcs = ["spec_builder_test.py"], + deps = [ + ":spec_builder", + "//dragnn/protos:spec_py_pb2", + "//syntaxnet:load_parser_ops_py", + "//syntaxnet:parser_ops", + "//syntaxnet:parser_trainer", + "@org_tensorflow//tensorflow:tensorflow_py", + ], +) + +py_library( + name = "digraph_ops", + srcs = ["digraph_ops.py"], + deps = [ + "//syntaxnet/util:check", + "@org_tensorflow//tensorflow:tensorflow_py", + ], +) + +py_test( + name = "digraph_ops_test", + srcs = ["digraph_ops_test.py"], + deps = [ + ":digraph_ops", + "@org_tensorflow//tensorflow:tensorflow_py", + ], +) + +py_library( + name = "biaffine_units", + srcs = ["biaffine_units.py"], + deps = [ + ":digraph_ops", + ":network_units", + "//syntaxnet/util:check", + "@org_tensorflow//tensorflow:tensorflow_py", + ], +) diff --git a/syntaxnet/dragnn/python/biaffine_units.py b/syntaxnet/dragnn/python/biaffine_units.py new file mode 100644 index 0000000000000000000000000000000000000000..f8d9bad2a14b97b7c033802db7b44f61ffddb333 --- /dev/null +++ b/syntaxnet/dragnn/python/biaffine_units.py @@ -0,0 +1,240 @@ +"""Network units used in the Dozat and Manning (2017) biaffine parser.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from dragnn.python import digraph_ops +from dragnn.python import network_units +from syntaxnet.util import check + + +class BiaffineDigraphNetwork(network_units.NetworkUnitInterface): + """Network unit that computes biaffine digraph scores. + + The D&M parser uses two MLPs to create two activation vectors for each token, + which represent the token when it it used as the source or target of an arc. + Arcs are scored using a "biaffine" function that includes a bilinear and + linear term: + + sources[s] * arc_weights * targets[t] + sources[s] * source_weights + + The digraph is "unlabeled" in that there is at most one arc between any pair + of tokens. If labels are required, the BiaffineLabelNetwork can be used to + label a set of selected arcs. + + Note that in the typical use case where the source and target activations are + the same dimension and are produced by single-layer MLPs, it is arithmetically + equivalent to produce the source and target activations using a single MLP of + twice the size, and then split those activations in half. The |SplitNetwork| + can be used for this purpose. + + Parameters: + None. + + Features: + sources: [B * N, S] matrix of batched activations for source tokens. + targets: [B * N, T] matrix of batched activations for target tokens. + + Layers: + adjacency: [B * N, N] matrix where entry b*N+s,t is the score of the arc + from s to t in batch b, if s != t, or the score for selecting t + as a root, if s == t. + """ + + def __init__(self, component): + """Initializes weights and layers. + + Args: + component: Parent ComponentBuilderBase object. + """ + super(BiaffineDigraphNetwork, self).__init__(component) + + check.Eq(len(self._fixed_feature_dims.items()), 0, + 'Expected no fixed features') + check.Eq(len(self._linked_feature_dims.items()), 2, + 'Expected two linked features') + + check.In('sources', self._linked_feature_dims, + 'Missing required linked feature') + check.In('targets', self._linked_feature_dims, + 'Missing required linked feature') + self._source_dim = self._linked_feature_dims['sources'] + self._target_dim = self._linked_feature_dims['targets'] + + # TODO(googleuser): Make parameter initialization configurable. + self._weights = [] + self._weights.append(tf.get_variable( + 'weights_arc', [self._source_dim, self._target_dim], tf.float32, + tf.random_normal_initializer(stddev=1e-4, seed=self._seed))) + self._weights.append(tf.get_variable( + 'weights_source', [self._source_dim], tf.float32, + tf.random_normal_initializer(stddev=1e-4, seed=self._seed))) + self._weights.append(tf.get_variable( + 'root', [self._source_dim], tf.float32, + tf.random_normal_initializer(stddev=1e-4, seed=self._seed))) + + self._params.extend(self._weights) + self._regularized_weights.extend(self._weights) + + # Negative Layer.dim indicates that the dimension is dynamic. + self._layers.append(network_units.Layer(self, 'adjacency', -1)) + + def create(self, + fixed_embeddings, + linked_embeddings, + context_tensor_arrays, + attention_tensor, + during_training, + stride=None): + """Requires |stride|; otherwise see base class.""" + check.NotNone(stride, + 'BiaffineDigraphNetwork requires "stride" and must be called ' + 'in the bulk feature extractor component.') + + # TODO(googleuser): Add dropout during training. + del during_training + + # Retrieve (possibly averaged) weights. + weights_arc = self._component.get_variable('weights_arc') + weights_source = self._component.get_variable('weights_source') + root = self._component.get_variable('root') + + # Extract the source and target token activations. Use |stride| to collapse + # batch and beam into a single dimension. + sources = network_units.lookup_named_tensor('sources', linked_embeddings) + targets = network_units.lookup_named_tensor('targets', linked_embeddings) + source_tokens_bxnxs = tf.reshape(sources.tensor, + [stride, -1, self._source_dim]) + target_tokens_bxnxt = tf.reshape(targets.tensor, + [stride, -1, self._target_dim]) + num_tokens = tf.shape(source_tokens_bxnxs)[1] + + # Compute the arc, source, and root potentials. + arcs_bxnxn = digraph_ops.ArcPotentialsFromTokens( + source_tokens_bxnxs, target_tokens_bxnxt, weights_arc) + sources_bxnxn = digraph_ops.ArcSourcePotentialsFromTokens( + source_tokens_bxnxs, weights_source) + roots_bxn = digraph_ops.RootPotentialsFromTokens( + root, target_tokens_bxnxt, weights_arc) + + # Combine them into a single matrix with the roots on the diagonal. + adjacency_bxnxn = digraph_ops.CombineArcAndRootPotentials( + arcs_bxnxn + sources_bxnxn, roots_bxn) + + return [tf.reshape(adjacency_bxnxn, [-1, num_tokens])] + + +class BiaffineLabelNetwork(network_units.NetworkUnitInterface): + """Network unit that computes biaffine label scores. + + D&M parser uses a slightly modified version of the arc scoring function to + score labels. The differences are: + + 1. Each label has its own source and target MLPs and biaffine weights. + 2. A linear term for the target token is added. + 3. A bias term is added. + + Parameters: + num_labels: The number of dependency labels, L. + + Features: + sources: [B * N, S] matrix of batched activations for source tokens. + targets: [B * N, T] matrix of batched activations for target tokens. + + Layers: + labels: [B * N, L] matrix where entry b*N+t,l is the score of the label of + the inbound arc for token t in batch b. + """ + + def __init__(self, component): + """Initializes weights and layers. + + Args: + component: Parent ComponentBuilderBase object. + """ + super(BiaffineLabelNetwork, self).__init__(component) + + parameters = component.spec.network_unit.parameters + self._num_labels = int(parameters['num_labels']) + + check.Gt(self._num_labels, 0, 'Expected some labels') + check.Eq(len(self._fixed_feature_dims.items()), 0, + 'Expected no fixed features') + check.Eq(len(self._linked_feature_dims.items()), 2, + 'Expected two linked features') + + check.In('sources', self._linked_feature_dims, + 'Missing required linked feature') + check.In('targets', self._linked_feature_dims, + 'Missing required linked feature') + + self._source_dim = self._linked_feature_dims['sources'] + self._target_dim = self._linked_feature_dims['targets'] + + # TODO(googleuser): Make parameter initialization configurable. + self._weights = [] + self._weights.append(tf.get_variable( + 'weights_pair', [self._num_labels, self._source_dim, self._target_dim], + tf.float32, tf.random_normal_initializer(stddev=1e-4, seed=self._seed))) + self._weights.append(tf.get_variable( + 'weights_source', [self._num_labels, self._source_dim], tf.float32, + tf.random_normal_initializer(stddev=1e-4, seed=self._seed))) + self._weights.append(tf.get_variable( + 'weights_target', [self._num_labels, self._target_dim], tf.float32, + tf.random_normal_initializer(stddev=1e-4, seed=self._seed))) + + self._biases = [] + self._biases.append(tf.get_variable( + 'biases', [self._num_labels], tf.float32, + tf.random_normal_initializer(stddev=1e-4, seed=self._seed))) + + self._params.extend(self._weights + self._biases) + self._regularized_weights.extend(self._weights) + + self._layers.append(network_units.Layer(self, 'labels', self._num_labels)) + + def create(self, + fixed_embeddings, + linked_embeddings, + context_tensor_arrays, + attention_tensor, + during_training, + stride=None): + """Requires |stride|; otherwise see base class.""" + check.NotNone(stride, + 'BiaffineLabelNetwork requires "stride" and must be called ' + 'in the bulk feature extractor component.') + + # TODO(googleuser): Add dropout during training. + del during_training + + # Retrieve (possibly averaged) weights. + weights_pair = self._component.get_variable('weights_pair') + weights_source = self._component.get_variable('weights_source') + weights_target = self._component.get_variable('weights_target') + biases = self._component.get_variable('biases') + + # Extract and shape the source and target token activations. Use |stride| + # to collapse batch and beam into a single dimension. + sources = network_units.lookup_named_tensor('sources', linked_embeddings) + targets = network_units.lookup_named_tensor('targets', linked_embeddings) + sources_bxnxs = tf.reshape(sources.tensor, [stride, -1, self._source_dim]) + targets_bxnxt = tf.reshape(targets.tensor, [stride, -1, self._target_dim]) + + # Compute the pair, source, and target potentials. + pairs_bxnxl = digraph_ops.LabelPotentialsFromTokenPairs(sources_bxnxs, + targets_bxnxt, + weights_pair) + sources_bxnxl = digraph_ops.LabelPotentialsFromTokens(sources_bxnxs, + weights_source) + targets_bxnxl = digraph_ops.LabelPotentialsFromTokens(targets_bxnxt, + weights_target) + + # Combine them with the biases. + labels_bxnxl = pairs_bxnxl + sources_bxnxl + targets_bxnxl + biases + + # Flatten out the batch dimension. + return [tf.reshape(labels_bxnxl, [-1, self._num_labels])] diff --git a/syntaxnet/dragnn/python/bulk_component.py b/syntaxnet/dragnn/python/bulk_component.py new file mode 100644 index 0000000000000000000000000000000000000000..6c797857919cdcbd2cb0bb913b1e4e32a3a95cfc --- /dev/null +++ b/syntaxnet/dragnn/python/bulk_component.py @@ -0,0 +1,460 @@ +"""Component builders for non-recurrent networks in DRAGNN.""" + + +import tensorflow as tf +from tensorflow.python.platform import tf_logging as logging + +from dragnn.python import component +from dragnn.python import dragnn_ops +from dragnn.python import network_units +from syntaxnet.util import check + + +def fetch_linked_embedding(comp, network_states, feature_spec): + """Looks up linked embeddings in other components. + + Args: + comp: ComponentBuilder object with respect to which the feature is to be + fetched + network_states: dictionary of NetworkState objects + feature_spec: FeatureSpec proto for the linked feature to be looked up + + Returns: + NamedTensor containing the linked feature tensor + + Raises: + NotImplementedError: if a linked feature with source translator other than + 'identity' is configured. + RuntimeError: if a recurrent linked feature is configured. + """ + if feature_spec.source_translator != 'identity': + raise NotImplementedError(feature_spec.source_translator) + if feature_spec.source_component == comp.name: + raise RuntimeError( + 'Recurrent linked features are not supported in bulk extraction.') + tf.logging.info('[%s] Adding linked feature "%s"', comp.name, + feature_spec.name) + source = comp.master.lookup_component[feature_spec.source_component] + + return network_units.NamedTensor( + network_states[source.name].activations[ + feature_spec.source_layer].bulk_tensor, + feature_spec.name) + + +def _validate_embedded_fixed_features(comp): + """Checks that the embedded fixed features of |comp| are set up properly.""" + for feature in comp.spec.fixed_feature: + check.Gt(feature.embedding_dim, 0, + 'Embeddings requested for non-embedded feature: %s' % feature) + if feature.is_constant: + check.IsTrue(feature.HasField('pretrained_embedding_matrix'), + 'Constant embeddings must be pretrained: %s' % feature) + + +def fetch_differentiable_fixed_embeddings(comp, state, stride): + """Looks up fixed features with separate, differentiable, embedding lookup. + + Args: + comp: Component whose fixed features we wish to look up. + state: live MasterState object for the component. + stride: Tensor containing current batch * beam size. + + Returns: + state handle: updated state handle to be used after this call + fixed_embeddings: list of NamedTensor objects + """ + _validate_embedded_fixed_features(comp) + num_channels = len(comp.spec.fixed_feature) + if not num_channels: + return state.handle, [] + + state.handle, indices, ids, weights, num_steps = ( + dragnn_ops.bulk_fixed_features( + state.handle, component=comp.name, num_channels=num_channels)) + fixed_embeddings = [] + for channel, feature_spec in enumerate(comp.spec.fixed_feature): + differentiable_or_constant = ('constant' if feature_spec.is_constant else + 'differentiable') + tf.logging.info('[%s] Adding %s fixed feature "%s"', comp.name, + differentiable_or_constant, feature_spec.name) + size = stride * num_steps * feature_spec.size + fixed_embedding = network_units.embedding_lookup( + comp.get_variable(network_units.fixed_embeddings_name(channel)), + indices[channel], ids[channel], weights[channel], size) + if feature_spec.is_constant: + fixed_embedding = tf.stop_gradient(fixed_embedding) + fixed_embeddings.append( + network_units.NamedTensor(fixed_embedding, feature_spec.name)) + + return state.handle, fixed_embeddings + + +def fetch_fast_fixed_embeddings(comp, state): + """Looks up fixed features with fast, non-differentiable, op. + + Since BulkFixedEmbeddings is non-differentiable with respect to the + embeddings, the idea is to call this function only when the graph is + not being used for training. + + Args: + comp: Component whose fixed features we wish to look up. + state: live MasterState object for the component. + + Returns: + state handle: updated state handle to be used after this call + fixed_embeddings: list of NamedTensor objects + """ + _validate_embedded_fixed_features(comp) + num_channels = len(comp.spec.fixed_feature) + if not num_channels: + return state.handle, [] + tf.logging.info('[%s] Adding %d fast fixed features', comp.name, num_channels) + + state.handle, bulk_embeddings, _ = dragnn_ops.bulk_fixed_embeddings( + state.handle, [ + comp.get_variable(network_units.fixed_embeddings_name(c)) + for c in range(num_channels) + ], + component=comp.name) + + bulk_embeddings = network_units.NamedTensor(bulk_embeddings, + 'bulk-%s-fixed-features' % + comp.name) + return state.handle, [bulk_embeddings] + + +def extract_fixed_feature_ids(comp, state, stride): + """Extracts fixed feature IDs. + + Args: + comp: Component whose fixed feature IDs we wish to extract. + state: Live MasterState object for the component. + stride: Tensor containing current batch * beam size. + + Returns: + state handle: Updated state handle to be used after this call. + ids: List of [stride * num_steps, 1] feature IDs per channel. Missing IDs + (e.g., due to batch padding) are set to -1. + """ + num_channels = len(comp.spec.fixed_feature) + if not num_channels: + return state.handle, [] + + for feature_spec in comp.spec.fixed_feature: + check.Eq(feature_spec.size, 1, 'All features must have size=1') + check.Lt(feature_spec.embedding_dim, 0, 'All features must be non-embedded') + + state.handle, indices, ids, _, num_steps = dragnn_ops.bulk_fixed_features( + state.handle, component=comp.name, num_channels=num_channels) + size = stride * num_steps + + fixed_ids = [] + for channel, feature_spec in enumerate(comp.spec.fixed_feature): + tf.logging.info('[%s] Adding fixed feature IDs "%s"', comp.name, + feature_spec.name) + + # The +1 and -1 increments ensure that missing IDs default to -1. + # + # TODO(googleuser): This formula breaks if multiple IDs are extracted at some + # step. Try using tf.unique() to enforce the unique-IDS precondition. + sums = tf.unsorted_segment_sum(ids[channel] + 1, indices[channel], size) - 1 + sums = tf.expand_dims(sums, axis=1) + fixed_ids.append(network_units.NamedTensor(sums, feature_spec.name, dim=1)) + return state.handle, fixed_ids + + +def update_network_states(comp, tensors, network_states, stride): + """Stores Tensor objects corresponding to layer outputs. + + For use in subsequent tasks. + + Args: + comp: Component for which the tensor handles are being stored. + tensors: list of Tensors to store + network_states: dictionary of component NetworkState objects + stride: stride of the stored tensor. + """ + network_state = network_states[comp.name] + with tf.name_scope(comp.name + '/stored_act'): + for index, network_tensor in enumerate(tensors): + network_state.activations[comp.network.layers[index].name] = ( + network_units.StoredActivations(tensor=network_tensor, stride=stride, + dim=comp.network.layers[index].dim)) + + +def build_cross_entropy_loss(logits, gold): + """Constructs a cross entropy from logits and one-hot encoded gold labels. + + Supports skipping rows where the gold label is the magic -1 value. + + Args: + logits: float Tensor of scores. + gold: int Tensor of one-hot labels. + + Returns: + cost, correct, total: the total cost, the total number of correctly + predicted labels, and the total number of valid labels. + """ + valid = tf.reshape(tf.where(tf.greater(gold, -1)), [-1]) + gold = tf.gather(gold, valid) + logits = tf.gather(logits, valid) + correct = tf.reduce_sum(tf.to_int32(tf.nn.in_top_k(logits, gold, 1))) + total = tf.size(gold) + cost = tf.reduce_sum( + tf.contrib.nn.deprecated_flipped_sparse_softmax_cross_entropy_with_logits( + logits, tf.cast(gold, tf.int64))) / tf.cast(total, tf.float32) + return cost, correct, total + + +class BulkFeatureExtractorComponentBuilder(component.ComponentBuilderBase): + """A component builder to bulk extract features. + + Both fixed and linked features are supported, with some restrictions: + 1. Fixed features may not be recurrent. Fixed features are extracted along the + gold path, which does not work during inference. + 2. Linked features may not be recurrent and are 'untranslated'. For now, + linked features are extracted without passing them through any transition + system or source translator. + """ + + def build_greedy_training(self, state, network_states): + """Extracts features and advances a batch using the oracle path. + + Args: + state: MasterState from the 'AdvanceMaster' op that advances the + underlying master to this component. + network_states: dictionary of component NetworkState objects + + Returns: + state handle: final state after advancing + cost: regularization cost, possibly associated with embedding matrices + correct: since no gold path is available, 0. + total: since no gold path is available, 0. + """ + logging.info('Building component: %s', self.spec.name) + stride = state.current_batch_size * self.training_beam_size + with tf.variable_scope(self.name, reuse=True): + state.handle, fixed_embeddings = fetch_differentiable_fixed_embeddings( + self, state, stride) + + linked_embeddings = [ + fetch_linked_embedding(self, network_states, spec) + for spec in self.spec.linked_feature + ] + + with tf.variable_scope(self.name, reuse=True): + tensors = self.network.create( + fixed_embeddings, linked_embeddings, None, None, True, stride=stride) + update_network_states(self, tensors, network_states, stride) + cost = self.add_regularizer(tf.constant(0.)) + + return state.handle, cost, 0, 0 + + def build_greedy_inference(self, state, network_states, + during_training=False): + """Extracts features and advances a batch using the oracle path. + + NOTE(danielandor) For now this method cannot be called during training. + That is to say, unroll_using_oracle for this component must be set to true. + This will be fixed by separating train_with_oracle and train_with_inference. + + Args: + state: MasterState from the 'AdvanceMaster' op that advances the + underlying master to this component. + network_states: dictionary of component NetworkState objects + during_training: whether the graph is being constructed during training + + Returns: + state handle: final state after advancing + """ + logging.info('Building component: %s', self.spec.name) + if during_training: + stride = state.current_batch_size * self.training_beam_size + else: + stride = state.current_batch_size * self.inference_beam_size + + with tf.variable_scope(self.name, reuse=True): + if during_training: + state.handle, fixed_embeddings = fetch_differentiable_fixed_embeddings( + self, state, stride) + else: + state.handle, fixed_embeddings = fetch_fast_fixed_embeddings(self, + state) + + linked_embeddings = [ + fetch_linked_embedding(self, network_states, spec) + for spec in self.spec.linked_feature + ] + + with tf.variable_scope(self.name, reuse=True): + tensors = self.network.create( + fixed_embeddings, + linked_embeddings, + None, + None, + during_training=during_training, + stride=stride) + + update_network_states(self, tensors, network_states, stride) + return state.handle + + +class BulkFeatureIdExtractorComponentBuilder(component.ComponentBuilderBase): + """A component builder to bulk extract feature IDs. + + This is a variant of BulkFeatureExtractorComponentBuilder that only supports + fixed features, and extracts raw feature IDs instead of feature embeddings. + Since the extracted feature IDs are integers, the results produced by this + component are in general not differentiable. + """ + + def __init__(self, master, component_spec): + """Initializes the feature ID extractor component. + + Args: + master: dragnn.MasterBuilder object. + component_spec: dragnn.ComponentSpec proto to be built. + """ + super(BulkFeatureIdExtractorComponentBuilder, self).__init__( + master, component_spec) + check.Eq(len(self.spec.linked_feature), 0, 'Linked features are forbidden') + for feature_spec in self.spec.fixed_feature: + check.Lt(feature_spec.embedding_dim, 0, + 'Features must be non-embedded: %s' % feature_spec) + + def build_greedy_training(self, state, network_states): + """See base class.""" + state.handle = self._extract_feature_ids(state, network_states, True) + cost = self.add_regularizer(tf.constant(0.)) + return state.handle, cost, 0, 0 + + def build_greedy_inference(self, state, network_states, + during_training=False): + """See base class.""" + return self._extract_feature_ids(state, network_states, during_training) + + def _extract_feature_ids(self, state, network_states, during_training): + """Extracts feature IDs and advances a batch using the oracle path. + + Args: + state: MasterState from the 'AdvanceMaster' op that advances the + underlying master to this component. + network_states: Dictionary of component NetworkState objects. + during_training: Whether the graph is being constructed during training. + + Returns: + state handle: Final state after advancing. + """ + logging.info('Building component: %s', self.spec.name) + + if during_training: + stride = state.current_batch_size * self.training_beam_size + else: + stride = state.current_batch_size * self.inference_beam_size + + with tf.variable_scope(self.name, reuse=True): + state.handle, ids = extract_fixed_feature_ids(self, state, stride) + + with tf.variable_scope(self.name, reuse=True): + tensors = self.network.create( + ids, [], None, None, during_training, stride=stride) + update_network_states(self, tensors, network_states, stride) + return state.handle + + +class BulkAnnotatorComponentBuilder(component.ComponentBuilderBase): + """A component builder to bulk annotate or compute the cost of a gold path. + + This component can be used with features that don't depend on the + transition system state. + + Since no feature extraction is performed, only non-recurrent + 'identity' linked features are supported. + + If a FeedForwardNetwork is configured with no hidden units, this component + acts as a 'bulk softmax' component. + """ + + def build_greedy_training(self, state, network_states): + """Advances a batch using oracle paths, returning the overall CE cost. + + Args: + state: MasterState from the 'AdvanceMaster' op that advances the + underlying master to this component. + network_states: dictionary of component NetworkState objects + + Returns: + (state handle, cost, correct, total): TF ops corresponding to the final + state after unrolling, the total cost, the total number of correctly + predicted actions, and the total number of actions. + + Raises: + RuntimeError: if fixed features are configured. + """ + logging.info('Building component: %s', self.spec.name) + if self.spec.fixed_feature: + raise RuntimeError( + 'Fixed features are not compatible with bulk annotation. ' + 'Use the "bulk-features" component instead.') + linked_embeddings = [ + fetch_linked_embedding(self, network_states, spec) + for spec in self.spec.linked_feature + ] + + stride = state.current_batch_size * self.training_beam_size + with tf.variable_scope(self.name, reuse=True): + network_tensors = self.network.create([], linked_embeddings, None, None, + True, stride) + + update_network_states(self, network_tensors, network_states, stride) + + logits = self.network.get_logits(network_tensors) + state.handle, gold = dragnn_ops.bulk_advance_from_oracle( + state.handle, component=self.name) + + cost, correct, total = build_cross_entropy_loss(logits, gold) + cost = self.add_regularizer(cost) + + return state.handle, cost, correct, total + + def build_greedy_inference(self, state, network_states, + during_training=False): + """Annotates a batch of documents using network scores. + + Args: + state: MasterState from the 'AdvanceMaster' op that advances the + underlying master to this component. + network_states: dictionary of component NetworkState objects + during_training: whether the graph is being constructed during training + + Returns: + Handle to the state once inference is complete for this Component. + + Raises: + RuntimeError: if fixed features are configured + """ + logging.info('Building component: %s', self.spec.name) + if self.spec.fixed_feature: + raise RuntimeError( + 'Fixed features are not compatible with bulk annotation. ' + 'Use the "bulk-features" component instead.') + linked_embeddings = [ + fetch_linked_embedding(self, network_states, spec) + for spec in self.spec.linked_feature + ] + + if during_training: + stride = state.current_batch_size * self.training_beam_size + else: + stride = state.current_batch_size * self.inference_beam_size + + with tf.variable_scope(self.name, reuse=True): + network_tensors = self.network.create( + [], linked_embeddings, None, None, during_training, stride) + + update_network_states(self, network_tensors, network_states, stride) + + logits = self.network.get_logits(network_tensors) + return dragnn_ops.bulk_advance_from_prediction( + state.handle, logits, component=self.name) diff --git a/syntaxnet/dragnn/python/bulk_component_test.py b/syntaxnet/dragnn/python/bulk_component_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a50e712ddb91967568571295af7e0b721f960122 --- /dev/null +++ b/syntaxnet/dragnn/python/bulk_component_test.py @@ -0,0 +1,463 @@ +"""Tests for bulk_component. + +Verifies that: +1. BulkFeatureExtractor and BulkAnnotator both raise NotImplementedError when + non-identity translator configured. +2. BulkFeatureExtractor and BulkAnnotator both raise RuntimeError when + recurrent linked features are configured. +3. BulkAnnotator raises RuntimeError when fixed features are configured. +4. BulkFeatureIdExtractor raises ValueError when linked features are configured, + or when the fixed features are invalid. +""" + +import os.path + + +import tensorflow as tf + +from tensorflow.python.framework import test_util +from tensorflow.python.platform import googletest +from google.protobuf import text_format + +from dragnn.protos import spec_pb2 +from dragnn.python import bulk_component +from dragnn.python import component +from dragnn.python import dragnn_ops +from dragnn.python import network_units +from syntaxnet import sentence_pb2 + +import dragnn.python.load_dragnn_cc_impl +import syntaxnet.load_parser_ops + +FLAGS = tf.app.flags.FLAGS + + +class MockNetworkUnit(object): + + def get_layer_size(self, unused_layer_name): + return 64 + + +class MockComponent(object): + + def __init__(self): + self.name = 'mock' + self.network = MockNetworkUnit() + + +class MockMaster(object): + + def __init__(self): + self.spec = spec_pb2.MasterSpec() + self.hyperparams = spec_pb2.GridPoint() + self.lookup_component = {'mock': MockComponent()} + + +def _create_fake_corpus(): + """Returns a list of fake serialized sentences for tests.""" + num_docs = 4 + corpus = [] + for num_tokens in range(1, num_docs + 1): + sentence = sentence_pb2.Sentence() + sentence.text = 'x' * num_tokens + for i in range(num_tokens): + token = sentence.token.add() + token.word = 'x' + token.start = i + token.end = i + corpus.append(sentence.SerializeToString()) + return corpus + + +class BulkComponentTest(test_util.TensorFlowTestCase): + + def setUp(self): + self.master = MockMaster() + self.master_state = component.MasterState( + handle='handle', current_batch_size=2) + self.network_states = { + 'mock': component.NetworkState(), + 'test': component.NetworkState(), + } + + def testFailsOnNonIdentityTranslator(self): + component_spec = spec_pb2.ComponentSpec() + text_format.Parse(""" + name: "test" + network_unit { + registered_name: "IdentityNetwork" + } + linked_feature { + name: "features" embedding_dim: -1 size: 1 + source_translator: "history" + source_component: "mock" + } + """, component_spec) + + # For feature extraction: + with tf.Graph().as_default(): + comp = bulk_component.BulkFeatureExtractorComponentBuilder( + self.master, component_spec) + + # Expect feature extraction to generate a error due to the "history" + # translator. + with self.assertRaises(NotImplementedError): + comp.build_greedy_training(self.master_state, self.network_states) + + # As well as annotation: + with tf.Graph().as_default(): + comp = bulk_component.BulkAnnotatorComponentBuilder( + self.master, component_spec) + + with self.assertRaises(NotImplementedError): + comp.build_greedy_training(self.master_state, self.network_states) + + def testFailsOnRecurrentLinkedFeature(self): + component_spec = spec_pb2.ComponentSpec() + text_format.Parse(""" + name: "test" + network_unit { + registered_name: "FeedForwardNetwork" + parameters { + key: 'hidden_layer_sizes' value: '64' + } + } + linked_feature { + name: "features" embedding_dim: -1 size: 1 + source_translator: "identity" + source_component: "test" + source_layer: "layer_0" + } + """, component_spec) + + # For feature extraction: + with tf.Graph().as_default(): + comp = bulk_component.BulkFeatureExtractorComponentBuilder( + self.master, component_spec) + + # Expect feature extraction to generate a error due to the "history" + # translator. + with self.assertRaises(RuntimeError): + comp.build_greedy_training(self.master_state, self.network_states) + + # As well as annotation: + with tf.Graph().as_default(): + comp = bulk_component.BulkAnnotatorComponentBuilder( + self.master, component_spec) + + with self.assertRaises(RuntimeError): + comp.build_greedy_training(self.master_state, self.network_states) + + def testConstantFixedFeatureFailsIfNotPretrained(self): + component_spec = spec_pb2.ComponentSpec() + text_format.Parse(""" + name: "test" + network_unit { + registered_name: "IdentityNetwork" + } + fixed_feature { + name: "fixed" embedding_dim: 32 size: 1 + is_constant: true + } + component_builder { + registered_name: "bulk_component.BulkFeatureExtractorComponentBuilder" + } + """, component_spec) + with tf.Graph().as_default(): + comp = bulk_component.BulkFeatureExtractorComponentBuilder( + self.master, component_spec) + + with self.assertRaisesRegexp(ValueError, + 'Constant embeddings must be pretrained'): + comp.build_greedy_training(self.master_state, self.network_states) + with self.assertRaisesRegexp(ValueError, + 'Constant embeddings must be pretrained'): + comp.build_greedy_inference( + self.master_state, self.network_states, during_training=True) + with self.assertRaisesRegexp(ValueError, + 'Constant embeddings must be pretrained'): + comp.build_greedy_inference( + self.master_state, self.network_states, during_training=False) + + def testNormalFixedFeaturesAreDifferentiable(self): + component_spec = spec_pb2.ComponentSpec() + text_format.Parse(""" + name: "test" + network_unit { + registered_name: "IdentityNetwork" + } + fixed_feature { + name: "fixed" embedding_dim: 32 size: 1 + pretrained_embedding_matrix { part {} } + vocab { part {} } + } + component_builder { + registered_name: "bulk_component.BulkFeatureExtractorComponentBuilder" + } + """, component_spec) + with tf.Graph().as_default(): + comp = bulk_component.BulkFeatureExtractorComponentBuilder( + self.master, component_spec) + + # Get embedding matrix variables. + with tf.variable_scope(comp.name, reuse=True): + fixed_embedding_matrix = tf.get_variable( + network_units.fixed_embeddings_name(0)) + + # Get output layer. + comp.build_greedy_training(self.master_state, self.network_states) + activations = self.network_states[comp.name].activations + outputs = activations[comp.network.layers[0].name].bulk_tensor + + # Compute the gradient of the output layer w.r.t. the embedding matrix. + # This should be well-defined for in the normal case. + gradients = tf.gradients(outputs, fixed_embedding_matrix) + self.assertEqual(len(gradients), 1) + self.assertFalse(gradients[0] is None) + + def testConstantFixedFeaturesAreNotDifferentiableButOthersAre(self): + component_spec = spec_pb2.ComponentSpec() + text_format.Parse(""" + name: "test" + network_unit { + registered_name: "IdentityNetwork" + } + fixed_feature { + name: "constant" embedding_dim: 32 size: 1 + is_constant: true + pretrained_embedding_matrix { part {} } + vocab { part {} } + } + fixed_feature { + name: "trainable" embedding_dim: 32 size: 1 + pretrained_embedding_matrix { part {} } + vocab { part {} } + } + component_builder { + registered_name: "bulk_component.BulkFeatureExtractorComponentBuilder" + } + """, component_spec) + with tf.Graph().as_default(): + comp = bulk_component.BulkFeatureExtractorComponentBuilder( + self.master, component_spec) + + # Get embedding matrix variables. + with tf.variable_scope(comp.name, reuse=True): + constant_embedding_matrix = tf.get_variable( + network_units.fixed_embeddings_name(0)) + trainable_embedding_matrix = tf.get_variable( + network_units.fixed_embeddings_name(1)) + + # Get output layer. + comp.build_greedy_training(self.master_state, self.network_states) + activations = self.network_states[comp.name].activations + outputs = activations[comp.network.layers[0].name].bulk_tensor + + # The constant embeddings are non-differentiable. + constant_gradients = tf.gradients(outputs, constant_embedding_matrix) + self.assertEqual(len(constant_gradients), 1) + self.assertTrue(constant_gradients[0] is None) + + # The trainable embeddings are differentiable. + trainable_gradients = tf.gradients(outputs, trainable_embedding_matrix) + self.assertEqual(len(trainable_gradients), 1) + self.assertFalse(trainable_gradients[0] is None) + + def testFailsOnFixedFeature(self): + component_spec = spec_pb2.ComponentSpec() + text_format.Parse(""" + name: "annotate" + network_unit { + registered_name: "IdentityNetwork" + } + fixed_feature { + name: "fixed" embedding_dim: 32 size: 1 + } + """, component_spec) + with tf.Graph().as_default(): + comp = bulk_component.BulkAnnotatorComponentBuilder( + self.master, component_spec) + + # Expect feature extraction to generate a runtime error due to the + # fixed feature. + with self.assertRaises(RuntimeError): + comp.build_greedy_training(self.master_state, self.network_states) + + def testBulkFeatureIdExtractorOkWithOneFixedFeature(self): + component_spec = spec_pb2.ComponentSpec() + text_format.Parse(""" + name: "test" + network_unit { + registered_name: "IdentityNetwork" + } + fixed_feature { + name: "fixed" embedding_dim: -1 size: 1 + } + """, component_spec) + with tf.Graph().as_default(): + comp = bulk_component.BulkFeatureIdExtractorComponentBuilder( + self.master, component_spec) + + # Should not raise errors. + self.network_states[component_spec.name] = component.NetworkState() + comp.build_greedy_training(self.master_state, self.network_states) + self.network_states[component_spec.name] = component.NetworkState() + comp.build_greedy_inference(self.master_state, self.network_states) + + def testBulkFeatureIdExtractorFailsOnLinkedFeature(self): + component_spec = spec_pb2.ComponentSpec() + text_format.Parse(""" + name: "test" + network_unit { + registered_name: "IdentityNetwork" + } + fixed_feature { + name: "fixed" embedding_dim: -1 size: 1 + } + linked_feature { + name: "linked" embedding_dim: -1 size: 1 + source_translator: "identity" + source_component: "mock" + } + """, component_spec) + with tf.Graph().as_default(): + with self.assertRaises(ValueError): + unused_comp = bulk_component.BulkFeatureIdExtractorComponentBuilder( + self.master, component_spec) + + def testBulkFeatureIdExtractorOkWithMultipleFixedFeatures(self): + component_spec = spec_pb2.ComponentSpec() + text_format.Parse(""" + name: "test" + network_unit { + registered_name: "IdentityNetwork" + } + fixed_feature { + name: "fixed1" embedding_dim: -1 size: 1 + } + fixed_feature { + name: "fixed2" embedding_dim: -1 size: 1 + } + fixed_feature { + name: "fixed3" embedding_dim: -1 size: 1 + } + """, component_spec) + with tf.Graph().as_default(): + comp = bulk_component.BulkFeatureIdExtractorComponentBuilder( + self.master, component_spec) + + # Should not raise errors. + self.network_states[component_spec.name] = component.NetworkState() + comp.build_greedy_training(self.master_state, self.network_states) + self.network_states[component_spec.name] = component.NetworkState() + comp.build_greedy_inference(self.master_state, self.network_states) + + def testBulkFeatureIdExtractorFailsOnEmbeddedFixedFeature(self): + component_spec = spec_pb2.ComponentSpec() + text_format.Parse(""" + name: "test" + network_unit { + registered_name: "IdentityNetwork" + } + fixed_feature { + name: "fixed" embedding_dim: 2 size: 1 + } + """, component_spec) + with tf.Graph().as_default(): + with self.assertRaises(ValueError): + unused_comp = bulk_component.BulkFeatureIdExtractorComponentBuilder( + self.master, component_spec) + + def testBulkFeatureIdExtractorExtractFocusWithOffset(self): + path = os.path.join(tf.test.get_temp_dir(), 'label-map') + with open(path, 'w') as label_map_file: + label_map_file.write('0\n') + + master_spec = spec_pb2.MasterSpec() + text_format.Parse(""" + component { + name: "test" + transition_system { + registered_name: "shift-only" + } + resource { + name: "label-map" + part { + file_pattern: "%s" + file_format: "text" + } + } + network_unit { + registered_name: "ExportFixedFeaturesNetwork" + } + backend { + registered_name: "SyntaxNetComponent" + } + fixed_feature { + name: "focus1" embedding_dim: -1 size: 1 fml: "input.focus" + predicate_map: "none" + } + fixed_feature { + name: "focus2" embedding_dim: -1 size: 1 fml: "input(1).focus" + predicate_map: "none" + } + fixed_feature { + name: "focus3" embedding_dim: -1 size: 1 fml: "input(2).focus" + predicate_map: "none" + } + } + """ % path, master_spec) + + with tf.Graph().as_default(): + corpus = _create_fake_corpus() + corpus = tf.constant(corpus, shape=[len(corpus)]) + handle = dragnn_ops.get_session( + container='test', + master_spec=master_spec.SerializeToString(), + grid_point='') + handle = dragnn_ops.attach_data_reader(handle, corpus) + handle = dragnn_ops.init_component_data( + handle, beam_size=1, component='test') + batch_size = dragnn_ops.batch_size(handle, component='test') + master_state = component.MasterState(handle, batch_size) + + extractor = bulk_component.BulkFeatureIdExtractorComponentBuilder( + self.master, master_spec.component[0]) + network_state = component.NetworkState() + self.network_states['test'] = network_state + handle = extractor.build_greedy_inference(master_state, + self.network_states) + focus1 = network_state.activations['focus1'].bulk_tensor + focus2 = network_state.activations['focus2'].bulk_tensor + focus3 = network_state.activations['focus3'].bulk_tensor + + with self.test_session() as sess: + focus1, focus2, focus3 = sess.run([focus1, focus2, focus3]) + tf.logging.info('focus1=\n%s', focus1) + tf.logging.info('focus2=\n%s', focus2) + tf.logging.info('focus3=\n%s', focus3) + + self.assertAllEqual( + focus1, + [[0], [-1], [-1], [-1], + [0], [1], [-1], [-1], + [0], [1], [2], [-1], + [0], [1], [2], [3]]) + + self.assertAllEqual( + focus2, + [[-1], [-1], [-1], [-1], + [1], [-1], [-1], [-1], + [1], [2], [-1], [-1], + [1], [2], [3], [-1]]) + + self.assertAllEqual( + focus3, + [[-1], [-1], [-1], [-1], + [-1], [-1], [-1], [-1], + [2], [-1], [-1], [-1], + [2], [3], [-1], [-1]]) + + +if __name__ == '__main__': + googletest.main() diff --git a/syntaxnet/dragnn/python/component.py b/syntaxnet/dragnn/python/component.py new file mode 100644 index 0000000000000000000000000000000000000000..db45549f9666cde4e6412079c933a24ecc8fad47 --- /dev/null +++ b/syntaxnet/dragnn/python/component.py @@ -0,0 +1,589 @@ +"""Builds a DRAGNN graph for local training.""" + +from abc import ABCMeta +from abc import abstractmethod + +import tensorflow as tf +from tensorflow.python.platform import tf_logging as logging + +from dragnn.python import dragnn_ops +from dragnn.python import network_units +from syntaxnet.util import check +from syntaxnet.util import registry + + +class NetworkState(object): + """Simple utility to manage the state of a DRAGNN network. + + This class encapsulates the variables that are a specific to any + particular instance of a DRAGNN stack, as constructed by the + MasterBuilder below. + + Attributes: + activations: Dictionary mapping layer names to StoredActivation objects. + """ + + def __init__(self): + self.activations = {} + + +class MasterState(object): + """Simple utility to encapsulate tensors associated with the master state. + + Attributes: + handle: string tensor handle to the underlying nlp_saft::dragnn::MasterState + current_batch_size: int tensor containing the batch size following the most + recent MasterState::Reset(). + """ + + def __init__(self, handle, current_batch_size): + self.handle = handle + self.current_batch_size = current_batch_size + + +@registry.RegisteredClass +class ComponentBuilderBase(object): + """Utility to build a single Component in a DRAGNN stack of models. + + This class handles converting a ComponentSpec proto into various TF + sub-graphs. It will stitch together various neural units with dynamic + unrolling inside a tf.while loop. + + All variables for parameters are created during the constructor within the + scope of the component's name, e.g. 'tagger/embedding_matrix_0' for a + component named 'tagger'. + + As part of the specification, ComponentBuilder will wrap an underlying + NetworkUnit which generates the actual network layout. + """ + + __metaclass__ = ABCMeta # required for @abstractmethod + + def __init__(self, master, component_spec, attr_defaults=None): + """Initializes the ComponentBuilder from specifications. + + Args: + master: dragnn.MasterBuilder object. + component_spec: dragnn.ComponentSpec proto to be built. + attr_defaults: Optional dict of component attribute defaults. If not + provided or if empty, attributes are not extracted. + """ + self.master = master + self.num_actions = component_spec.num_actions + self.name = component_spec.name + self.spec = component_spec + self.moving_average = None + + # Determine if this component should apply self-normalization. + self.eligible_for_self_norm = ( + not self.master.hyperparams.self_norm_components_filter or self.name in + self.master.hyperparams.self_norm_components_filter.split(',')) + + # Extract component attributes before make_network(), so the network unit + # can access them. + self._attrs = {} + if attr_defaults: + self._attrs = network_units.get_attrs_with_defaults( + self.spec.component_builder.parameters, attr_defaults) + + with tf.variable_scope(self.name): + self.training_beam_size = tf.constant( + self.spec.training_beam_size, name='TrainingBeamSize') + self.inference_beam_size = tf.constant( + self.spec.inference_beam_size, name='InferenceBeamSize') + self.locally_normalize = tf.constant(False, name='LocallyNormalize') + self._step = tf.get_variable( + 'step', [], initializer=tf.zeros_initializer(), dtype=tf.int32) + self._total = tf.get_variable( + 'total', [], initializer=tf.zeros_initializer(), dtype=tf.int32) + + # Construct network variables. + self.network = self.make_network(self.spec.network_unit) + + # Construct moving average. + if self.master.hyperparams.use_moving_average: + self.moving_average = tf.train.ExponentialMovingAverage( + decay=self.master.hyperparams.average_weight, num_updates=self._step) + self.avg_ops = [self.moving_average.apply(self.network.params)] + + def make_network(self, network_unit): + """Makes a NetworkUnitInterface object based on the network_unit spec. + + Components may override this method to exert control over the + network unit construction, such as which network units are supported. + + Args: + network_unit: RegisteredModuleSpec proto defining the network unit. + + Returns: + An implementation of NetworkUnitInterface. + + Raises: + ValueError: if the requested network unit is not found in the registry. + """ + network_type = network_unit.registered_name + + with tf.variable_scope(self.name): + # Raises ValueError if not found. + return network_units.NetworkUnitInterface.Create(network_type, self) + + @abstractmethod + def build_greedy_training(self, state, network_states): + """Builds a training graph for this component. + + Two assumptions are made about the resulting graph: + 1. An oracle will be used to unroll the state and compute the cost. + 2. The graph will be differentiable when the cost is being minimized. + + Args: + state: MasterState from the 'AdvanceMaster' op that advances the + underlying master to this component. + network_states: dictionary of component NetworkState objects. + + Returns: + (state, cost, correct, total) -- These are TF ops corresponding to + the final state after unrolling, the total cost, the total number of + correctly predicted actions, and the total number of actions. + """ + pass + + @abstractmethod + def build_greedy_inference(self, state, network_states, + during_training=False): + """Builds an inference graph for this component. + + If this graph is being constructed 'during_training', then it needs to be + differentiable even though it doesn't return an explicit cost. + + There may be other cases where the distinction between training and eval is + important. The handling of dropout is an example of this. + + Args: + state: MasterState from the 'AdvanceMaster' op that advances the + underlying master to this component. + network_states: dictionary of component NetworkState objects. + during_training: whether the graph is being constructed during training + + Returns: + Handle to the state once inference is complete for this Component. + """ + pass + + def get_summaries(self): + """Constructs a set of summaries for this component. + + Returns: + List of Summary ops to get parameter norms, progress reports, and + so forth for this component. + """ + + def combine_norm(matrices): + # Handles None in cases where the optimizer or moving average slot is + # not present. + squares = [tf.reduce_sum(tf.square(m)) for m in matrices if m is not None] + + # Some components may not have any parameters, in which case we simply + # return zero. + if squares: + return tf.sqrt(tf.add_n(squares)) + else: + return tf.constant(0, tf.float32) + + summaries = [] + summaries.append(tf.summary.scalar('%s step' % self.name, self._step)) + summaries.append(tf.summary.scalar('%s total' % self.name, self._total)) + if self.network.params: + summaries.append( + tf.summary.scalar('%s parameter Norm' % self.name, + combine_norm(self.network.params))) + slot_names = self.master.optimizer.get_slot_names() + for name in slot_names: + slot_params = [ + self.master.optimizer.get_slot(p, name) for p in self.network.params + ] + summaries.append( + tf.summary.scalar('%s %s Norm' % (self.name, name), + combine_norm(slot_params))) + + # Construct moving average. + if self.master.hyperparams.use_moving_average: + summaries.append( + tf.summary.scalar('%s avg Norm' % self.name, + combine_norm([ + self.moving_average.average(p) + for p in self.network.params + ]))) + + return summaries + + def get_variable(self, var_name=None, var_params=None): + """Returns either the original or averaged version of a given variable. + + If the master.read_from_avg flag is set to True, and the + ExponentialMovingAverage (EMA) object has been attached, then this will ask + the EMA object for the given variable. + + This is to allow executing inference from the averaged version of + parameters. + + Arguments: + var_name: Name of the variable. + var_params: tf.Variable for which to retrieve an average. + + Only one of |var_name| or |var_params| needs to be provided. If both are + provided, |var_params| takes precedence. + + Returns: + tf.Variable object corresponding to original or averaged version. + """ + if var_params: + var_name = var_params.name + else: + check.NotNone(var_name, 'specify at least one of var_name or var_params') + var_params = tf.get_variable(var_name) + + if self.moving_average and self.master.read_from_avg: + logging.info('Retrieving average for: %s', var_name) + var_params = self.moving_average.average(var_params) + assert var_params + logging.info('Returning: %s', var_params.name) + return var_params + + def advance_counters(self, total): + """Returns ops to advance the per-component step and total counters. + + Args: + total: Total number of actions to increment counters by. + + Returns: + tf.Group op incrementing 'step' by 1 and 'total' by total. + """ + update_total = tf.assign_add(self._total, total, use_locking=True) + update_step = tf.assign_add(self._step, 1, use_locking=True) + return tf.group(update_total, update_step) + + def add_regularizer(self, cost): + """Adds L2 regularization for parameters which have it turned on. + + Args: + cost: float cost before regularization. + + Returns: + Updated cost optionally including regularization. + """ + if self.network is None: + return cost + regularized_weights = self.network.get_l2_regularized_weights() + if not regularized_weights: + return cost + l2_coeff = self.master.hyperparams.l2_regularization_coefficient + if l2_coeff == 0.0: + return cost + tf.logging.info('[%s] Regularizing parameters: %s', self.name, + [w.name for w in regularized_weights]) + l2_costs = [tf.nn.l2_loss(p) for p in regularized_weights] + return tf.add(cost, l2_coeff * tf.add_n(l2_costs), name='regularizer') + + def build_post_restore_hook(self): + """Builds a post restore graph for this component. + + This is a run-once graph that prepares any state necessary for the + inference portion of the component. It is generally a no-op. + + Returns: + A no-op state. + """ + logging.info('Building default post restore hook for component: %s', + self.spec.name) + return tf.no_op(name='setup_%s' % self.spec.name) + + def attr(self, name): + """Returns the value of the component attribute with the |name|.""" + return self._attrs[name] + + +def update_tensor_arrays(network_tensors, arrays): + """Updates a list of tensor arrays from the network's output tensors. + + Arguments: + network_tensors: Output tensors from the underlying NN unit. + arrays: TensorArrays to be updated. + + Returns: + New list of TensorArrays after writing activations. + """ + # TODO(googleuser): Only store activations that will be used later in linked + # feature specifications. + next_arrays = [] + for index, network_tensor in enumerate(network_tensors): + array = arrays[index] + size = array.size() + array = array.write(size, network_tensor) + next_arrays.append(array) + return next_arrays + + +class DynamicComponentBuilder(ComponentBuilderBase): + """Component builder for recurrent DRAGNN networks. + + Feature extraction and annotation are done sequentially in a tf.while_loop + so fixed and linked features can be recurrent. + """ + + def build_greedy_training(self, state, network_states): + """Builds a training loop for this component. + + This loop repeatedly evaluates the network and computes the loss, but it + does not advance using the predictions of the network. Instead, it advances + using the oracle defined in the underlying transition system. The final + state will always correspond to the gold annotation. + + Args: + state: MasterState from the 'AdvanceMaster' op that advances the + underlying master to this component. + network_states: NetworkState object containing component TensorArrays. + + Returns: + (state, cost, correct, total) -- These are TF ops corresponding to + the final state after unrolling, the total cost, the total number of + correctly predicted actions, and the total number of actions. + """ + logging.info('Building component: %s', self.spec.name) + stride = state.current_batch_size * self.training_beam_size + + cost = tf.constant(0.) + correct = tf.constant(0) + total = tf.constant(0) + + # Create the TensorArray's to store activations for downstream/recurrent + # connections. + def cond(handle, *_): + all_final = dragnn_ops.emit_all_final(handle, component=self.name) + return tf.logical_not(tf.reduce_all(all_final)) + + def body(handle, cost, correct, total, *arrays): + """Runs the network and advances the state by a step.""" + + with tf.control_dependencies([handle, cost, correct, total] + + [x.flow for x in arrays]): + # Get a copy of the network inside this while loop. + updated_state = MasterState(handle, state.current_batch_size) + network_tensors = self._feedforward_unit( + updated_state, arrays, network_states, stride, during_training=True) + + # Every layer is written to a TensorArray, so that it can be backprop'd. + next_arrays = update_tensor_arrays(network_tensors, arrays) + with tf.control_dependencies([x.flow for x in next_arrays]): + with tf.name_scope('compute_loss'): + # A gold label > -1 determines that the sentence is still + # in a valid state. Otherwise, the sentence has ended. + # + # We add only the valid sentences to the loss, in the following way: + # 1. We compute 'valid_ix', the indices in gold that contain + # valid oracle actions. + # 2. We compute the cost function by comparing logits and gold + # only for the valid indices. + gold = dragnn_ops.emit_oracle_labels(handle, component=self.name) + gold.set_shape([None]) + valid = tf.greater(gold, -1) + valid_ix = tf.reshape(tf.where(valid), [-1]) + gold = tf.gather(gold, valid_ix) + + logits = self.network.get_logits(network_tensors) + logits = tf.gather(logits, valid_ix) + + cost += tf.reduce_sum( + tf.nn.sparse_softmax_cross_entropy_with_logits( + labels=tf.cast(gold, tf.int64), logits=logits)) + + if (self.eligible_for_self_norm and + self.master.hyperparams.self_norm_alpha > 0): + log_z = tf.reduce_logsumexp(logits, [1]) + cost += (self.master.hyperparams.self_norm_alpha * + tf.nn.l2_loss(log_z)) + + correct += tf.reduce_sum( + tf.to_int32(tf.nn.in_top_k(logits, gold, 1))) + total += tf.size(gold) + + with tf.control_dependencies([cost, correct, total, gold]): + handle = dragnn_ops.advance_from_oracle(handle, component=self.name) + return [handle, cost, correct, total] + next_arrays + + with tf.name_scope(self.name + '/train_state'): + init_arrays = [] + for layer in self.network.layers: + init_arrays.append(layer.create_array(state.current_batch_size)) + + output = tf.while_loop( + cond, + body, [state.handle, cost, correct, total] + init_arrays, + name='train_%s' % self.name) + + # Saves completed arrays and return final state and cost. + state.handle = output[0] + correct = output[2] + total = output[3] + arrays = output[4:] + cost = output[1] + + # Store handles to the final output for use in subsequent tasks. + network_state = network_states[self.name] + with tf.name_scope(self.name + '/stored_act'): + for index, layer in enumerate(self.network.layers): + network_state.activations[layer.name] = network_units.StoredActivations( + array=arrays[index]) + + # Normalize the objective by the total # of steps taken. + with tf.control_dependencies([tf.assert_greater(total, 0)]): + cost /= tf.to_float(total) + + # Adds regularization for the hidden weights. + cost = self.add_regularizer(cost) + + with tf.control_dependencies([x.flow for x in arrays]): + return tf.identity(state.handle), cost, correct, total + + def build_greedy_inference(self, state, network_states, + during_training=False): + """Builds an inference loop for this component. + + Repeatedly evaluates the network and advances the underlying state according + to the predicted scores. + + Args: + state: MasterState from the 'AdvanceMaster' op that advances the + underlying master to this component. + network_states: NetworkState object containing component TensorArrays. + during_training: whether the graph is being constructed during training + + Returns: + Handle to the state once inference is complete for this Component. + """ + logging.info('Building component: %s', self.spec.name) + if during_training: + stride = state.current_batch_size * self.training_beam_size + else: + stride = state.current_batch_size * self.inference_beam_size + + def cond(handle, *_): + all_final = dragnn_ops.emit_all_final(handle, component=self.name) + return tf.logical_not(tf.reduce_all(all_final)) + + def body(handle, *arrays): + """Runs the network and advances the state by a step.""" + + with tf.control_dependencies([handle] + [x.flow for x in arrays]): + # Get a copy of the network inside this while loop. + updated_state = MasterState(handle, state.current_batch_size) + network_tensors = self._feedforward_unit( + updated_state, + arrays, + network_states, + stride, + during_training=during_training) + next_arrays = update_tensor_arrays(network_tensors, arrays) + with tf.control_dependencies([x.flow for x in next_arrays]): + logits = self.network.get_logits(network_tensors) + logits = tf.cond(self.locally_normalize, + lambda: tf.nn.log_softmax(logits), lambda: logits) + handle = dragnn_ops.advance_from_prediction( + handle, logits, component=self.name) + return [handle] + next_arrays + + # Create the TensorArray's to store activations for downstream/recurrent + # connections. + with tf.name_scope(self.name + '/inference_state'): + init_arrays = [] + for layer in self.network.layers: + init_arrays.append(layer.create_array(stride)) + output = tf.while_loop( + cond, + body, [state.handle] + init_arrays, + name='inference_%s' % self.name) + + # Saves completed arrays and returns final state. + state.handle = output[0] + arrays = output[1:] + network_state = network_states[self.name] + with tf.name_scope(self.name + '/stored_act'): + for index, layer in enumerate(self.network.layers): + network_state.activations[layer.name] = network_units.StoredActivations( + array=arrays[index]) + with tf.control_dependencies([x.flow for x in arrays]): + return tf.identity(state.handle) + + def _feedforward_unit(self, state, arrays, network_states, stride, + during_training): + """Constructs a single instance of a feed-forward cell. + + Given an input state and access to the arrays storing activations, this + function encapsulates creation of a single network unit. This will *not* + create new variables. + + Args: + state: MasterState for the state that will be used to extract features. + arrays: List of TensorArrays corresponding to network outputs from this + component. These are used for recurrent link features; the arrays from + other components are used for stack-prop style connections. + network_states: NetworkState object containing the TensorArrays from + *all* components. + stride: int Tensor with the current beam * batch size. + during_training: Whether to build a unit for training (vs inference). + + Returns: + List of tensors generated by the underlying network implementation. + """ + with tf.variable_scope(self.name, reuse=True): + fixed_embeddings = [] + for channel_id, feature_spec in enumerate(self.spec.fixed_feature): + fixed_embedding = network_units.fixed_feature_lookup( + self, state, channel_id, stride) + if feature_spec.is_constant: + fixed_embedding.tensor = tf.stop_gradient(fixed_embedding.tensor) + fixed_embeddings.append(fixed_embedding) + + linked_embeddings = [] + for channel_id, feature_spec in enumerate(self.spec.linked_feature): + if feature_spec.source_component == self.name: + # Recurrent feature: pull from the local arrays. + index = self.network.get_layer_index(feature_spec.source_layer) + source_array = arrays[index] + source_layer_size = self.network.layers[index].dim + linked_embeddings.append( + network_units.activation_lookup_recurrent( + self, state, channel_id, source_array, source_layer_size, + stride)) + else: + # Stackprop style feature: pull from another component's arrays. + source = self.master.lookup_component[feature_spec.source_component] + source_tensor = network_states[source.name].activations[ + feature_spec.source_layer] + source_layer_size = source.network.get_layer_size( + feature_spec.source_layer) + linked_embeddings.append( + network_units.activation_lookup_other( + self, state, channel_id, source_tensor.dynamic_tensor, + source_layer_size)) + + context_tensor_arrays = [] + for context_layer in self.network.context_layers: + index = self.network.get_layer_index(context_layer.name) + context_tensor_arrays.append(arrays[index]) + + if self.spec.attention_component: + logging.info('%s component has attention over %s', self.name, + self.spec.attention_component) + source = self.master.lookup_component[self.spec.attention_component] + network_state = network_states[self.spec.attention_component] + with tf.control_dependencies( + [tf.assert_equal(state.current_batch_size, 1)]): + attention_tensor = tf.identity( + network_state.activations['layer_0'].bulk_tensor) + + else: + attention_tensor = None + + return self.network.create(fixed_embeddings, linked_embeddings, + context_tensor_arrays, attention_tensor, + during_training) diff --git a/syntaxnet/dragnn/python/composite_optimizer.py b/syntaxnet/dragnn/python/composite_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..9f1a8cf85ec9fe6def8199081343a4fc5d5875b9 --- /dev/null +++ b/syntaxnet/dragnn/python/composite_optimizer.py @@ -0,0 +1,55 @@ +"""An optimizer that switches between several methods.""" + +import tensorflow as tf +from tensorflow.python.training import optimizer + + +class CompositeOptimizer(optimizer.Optimizer): + """Optimizer that switches between several methods. + """ + + def __init__(self, + optimizer1, + optimizer2, + switch, + use_locking=False, + name='Composite'): + """Construct a new Composite optimizer. + + Args: + optimizer1: A tf.python.training.optimizer.Optimizer object. + optimizer2: A tf.python.training.optimizer.Optimizer object. + switch: A tf.bool Tensor, selecting whether to use the first or the second + optimizer. + use_locking: Bool. If True apply use locks to prevent concurrent updates + to variables. + name: Optional name prefix for the operations created when applying + gradients. Defaults to "Composite". + """ + super(CompositeOptimizer, self).__init__(use_locking, name) + self._optimizer1 = optimizer1 + self._optimizer2 = optimizer2 + self._switch = switch + + def apply_gradients(self, grads_and_vars, global_step=None, name=None): + + return tf.cond( + self._switch, + lambda: self._optimizer1.apply_gradients(grads_and_vars, + global_step, name), + lambda: self._optimizer2.apply_gradients(grads_and_vars, + global_step, name) + ) + + + def get_slot(self, var, name): + slot1 = self._optimizer1.get_slot(var, name) + slot2 = self._optimizer2.get_slot(var, name) + if slot1 and slot2: + raise LookupError('Slot named %s for variable %s populated for both ' + 'optimizers' % (name, var.name)) + return slot1 or slot2 + + def get_slot_names(self): + return sorted(self._optimizer1.get_slot_names() + + self._optimizer2.get_slot_names()) diff --git a/syntaxnet/dragnn/python/composite_optimizer_test.py b/syntaxnet/dragnn/python/composite_optimizer_test.py new file mode 100644 index 0000000000000000000000000000000000000000..fe18b84f44d067ce4d4208d5ddf78c7f6c209fce --- /dev/null +++ b/syntaxnet/dragnn/python/composite_optimizer_test.py @@ -0,0 +1,112 @@ +"""Tests for CompositeOptimizer. +""" + + +import numpy as np +import tensorflow as tf + +from tensorflow.python.framework import test_util +from tensorflow.python.platform import googletest +from tensorflow.python.platform import tf_logging as logging + +from dragnn.python import composite_optimizer + + +class MockAdamOptimizer(tf.train.AdamOptimizer): + + def __init__(self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-8, + use_locking=False, + name="Adam"): + super(MockAdamOptimizer, self).__init__(learning_rate, beta1, beta2, + epsilon, use_locking, name) + + def _create_slots(self, var_list): + super(MockAdamOptimizer, self)._create_slots(var_list) + for v in var_list: + self._zeros_slot(v, "adam_counter", self._name) + + def _apply_dense(self, grad, var): + train_op = super(MockAdamOptimizer, self)._apply_dense(grad, var) + counter = self.get_slot(var, "adam_counter") + return tf.group(train_op, tf.assign_add(counter, [1.0])) + + +class MockMomentumOptimizer(tf.train.MomentumOptimizer): + + def __init__(self, + learning_rate, + momentum, + use_locking=False, + name="Momentum", + use_nesterov=False): + super(MockMomentumOptimizer, self).__init__(learning_rate, momentum, + use_locking, name, use_nesterov) + + def _create_slots(self, var_list): + super(MockMomentumOptimizer, self)._create_slots(var_list) + for v in var_list: + self._zeros_slot(v, "momentum_counter", self._name) + + def _apply_dense(self, grad, var): + train_op = super(MockMomentumOptimizer, self)._apply_dense(grad, var) + counter = self.get_slot(var, "momentum_counter") + return tf.group(train_op, tf.assign_add(counter, [1.0])) + + +class CompositeOptimizerTest(test_util.TensorFlowTestCase): + + def test_switching(self): + with self.test_session() as sess: + # Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3 + x_data = np.random.rand(100).astype(np.float32) + y_data = x_data * 0.1 + 0.3 + + # Try to find values for w and b that compute y_data = w * x_data + b + # (We know that w should be 0.1 and b 0.3, but TensorFlow will + # figure that out for us.) + w = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) + b = tf.Variable(tf.zeros([1])) + y = w * x_data + b + + # Minimize the mean squared errors. + loss = tf.reduce_mean(tf.square(y - y_data)) + + # Set up optimizers. + step = tf.get_variable( + "step", + shape=[], + initializer=tf.zeros_initializer(), + trainable=False, + dtype=tf.int32) + optimizer1 = MockAdamOptimizer(0.05) + optimizer2 = MockMomentumOptimizer(0.05, 0.5) + switch = tf.less(step, 100) + optimizer = composite_optimizer.CompositeOptimizer(optimizer1, optimizer2, + switch) + train_op = optimizer.minimize(loss) + + sess.run(tf.global_variables_initializer()) + + # Fit the line.: + for iteration in range(201): + self.assertEqual(sess.run(switch), iteration < 100) + sess.run(train_op) + sess.run(tf.assign_add(step, 1)) + slot_names = optimizer.get_slot_names() + self.assertItemsEqual( + slot_names, + ["m", "v", "momentum", "adam_counter", "momentum_counter"]) + adam_counter = sess.run(optimizer.get_slot(w, "adam_counter")) + momentum_counter = sess.run(optimizer.get_slot(w, "momentum_counter")) + self.assertEqual(adam_counter, min(iteration + 1, 100)) + self.assertEqual(momentum_counter, max(iteration - 99, 0)) + if iteration % 20 == 0: + logging.info("%d %s %d %d", iteration, sess.run([switch, step, w, b]), + adam_counter, momentum_counter) + +if __name__ == "__main__": + googletest.main() diff --git a/syntaxnet/dragnn/python/digraph_ops.py b/syntaxnet/dragnn/python/digraph_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..e49bcba6c10d77a579dff1fc420f01c348a843d9 --- /dev/null +++ b/syntaxnet/dragnn/python/digraph_ops.py @@ -0,0 +1,341 @@ +"""TensorFlow ops for directed graphs.""" + +import tensorflow as tf + +from syntaxnet.util import check + + +def ArcPotentialsFromTokens(source_tokens, target_tokens, weights): + r"""Returns arc potentials computed from token activations and weights. + + For each batch of source and target token activations, computes a scalar + potential for each arc as the 3-way product between the activation vectors of + the source and target of the arc and the |weights|. Specifically, + + arc[b,s,t] = + \sum_{i,j} source_tokens[b,s,i] * weights[i,j] * target_tokens[b,t,j] + + Note that the token activations can be extended with bias terms to implement a + "biaffine" model (Dozat and Manning, 2017). + + Args: + source_tokens: [B,N,S] tensor of batched activations for the source token in + each arc. + target_tokens: [B,N,T] tensor of batched activations for the target token in + each arc. + weights: [S,T] matrix of weights. + + B,N may be statically-unknown, but S,T must be statically-known. The dtype + of all arguments must be compatible. + + Returns: + [B,N,N] tensor A of arc potentials where A_{b,s,t} is the potential of the + arc from s to t in batch element b. The dtype of A is the same as that of + the arguments. Note that the diagonal entries (i.e., where s==t) represent + self-loops and may not be meaningful. + """ + # All arguments must have statically-known rank. + check.Eq(source_tokens.get_shape().ndims, 3, 'source_tokens must be rank 3') + check.Eq(target_tokens.get_shape().ndims, 3, 'target_tokens must be rank 3') + check.Eq(weights.get_shape().ndims, 2, 'weights must be a matrix') + + # All activation dimensions must be statically-known. + num_source_activations = weights.get_shape().as_list()[0] + num_target_activations = weights.get_shape().as_list()[1] + check.NotNone(num_source_activations, 'unknown source activation dimension') + check.NotNone(num_target_activations, 'unknown target activation dimension') + check.Eq(source_tokens.get_shape().as_list()[2], num_source_activations, + 'dimension mismatch between weights and source_tokens') + check.Eq(target_tokens.get_shape().as_list()[2], num_target_activations, + 'dimension mismatch between weights and target_tokens') + + # All arguments must share the same type. + check.Same([weights.dtype.base_dtype, + source_tokens.dtype.base_dtype, + target_tokens.dtype.base_dtype], + 'dtype mismatch') + + source_tokens_shape = tf.shape(source_tokens) + target_tokens_shape = tf.shape(target_tokens) + batch_size = source_tokens_shape[0] + num_tokens = source_tokens_shape[1] + with tf.control_dependencies([ + tf.assert_equal(batch_size, target_tokens_shape[0]), + tf.assert_equal(num_tokens, target_tokens_shape[1])]): + # Flatten out the batch dimension so we can use one big multiplication. + targets_bnxt = tf.reshape(target_tokens, [-1, num_target_activations]) + + # Matrices are row-major, so we arrange for the RHS argument of each matmul + # to have its transpose flag set. That way no copying is required to align + # the rows of the LHS with the columns of the RHS. + weights_targets_bnxs = tf.matmul(targets_bnxt, weights, transpose_b=True) + + # The next computation is over pairs of tokens within each batch element, so + # restore the batch dimension. + weights_targets_bxnxs = tf.reshape( + weights_targets_bnxs, [batch_size, num_tokens, num_source_activations]) + + # Note that this multiplication is repeated across the batch dimension, + # instead of being one big multiplication as in the first matmul. There + # doesn't seem to be a way to arrange this as a single multiplication given + # the pairwise nature of this computation. + arcs_bxnxn = tf.matmul(source_tokens, weights_targets_bxnxs, + transpose_b=True) + return arcs_bxnxn + + +def ArcSourcePotentialsFromTokens(tokens, weights): + r"""Returns arc source potentials computed from tokens and weights. + + For each batch of token activations, computes a scalar potential for each arc + as the product between the activations of the source token and the |weights|. + Specifically, + + arc[b,s,:] = \sum_{i} weights[i] * tokens[b,s,i] + + Args: + tokens: [B,N,S] tensor of batched activations for source tokens. + weights: [S] vector of weights. + + B,N may be statically-unknown, but S must be statically-known. The dtype of + all arguments must be compatible. + + Returns: + [B,N,N] tensor A of arc potentials as defined above. The dtype of A is the + same as that of the arguments. Note that the diagonal entries (i.e., where + s==t) represent self-loops and may not be meaningful. + """ + # All arguments must have statically-known rank. + check.Eq(tokens.get_shape().ndims, 3, 'tokens must be rank 3') + check.Eq(weights.get_shape().ndims, 1, 'weights must be a vector') + + # All activation dimensions must be statically-known. + num_source_activations = weights.get_shape().as_list()[0] + check.NotNone(num_source_activations, 'unknown source activation dimension') + check.Eq(tokens.get_shape().as_list()[2], num_source_activations, + 'dimension mismatch between weights and tokens') + + # All arguments must share the same type. + check.Same([weights.dtype.base_dtype, + tokens.dtype.base_dtype], + 'dtype mismatch') + + tokens_shape = tf.shape(tokens) + batch_size = tokens_shape[0] + num_tokens = tokens_shape[1] + + # Flatten out the batch dimension so we can use a couple big matmuls. + tokens_bnxs = tf.reshape(tokens, [-1, num_source_activations]) + weights_sx1 = tf.expand_dims(weights, 1) + sources_bnx1 = tf.matmul(tokens_bnxs, weights_sx1) + sources_bnxn = tf.tile(sources_bnx1, [1, num_tokens]) + + # Restore the batch dimension in the output. + sources_bxnxn = tf.reshape(sources_bnxn, [batch_size, num_tokens, num_tokens]) + return sources_bxnxn + + +def RootPotentialsFromTokens(root, tokens, weights): + r"""Returns root selection potentials computed from tokens and weights. + + For each batch of token activations, computes a scalar potential for each root + selection as the 3-way product between the activations of the artificial root + token, the token activations, and the |weights|. Specifically, + + roots[b,r] = \sum_{i,j} root[i] * weights[i,j] * tokens[b,r,j] + + Args: + root: [S] vector of activations for the artificial root token. + tokens: [B,N,T] tensor of batched activations for root tokens. + weights: [S,T] matrix of weights. + + B,N may be statically-unknown, but S,T must be statically-known. The dtype + of all arguments must be compatible. + + Returns: + [B,N] matrix R of root-selection potentials as defined above. The dtype of + R is the same as that of the arguments. + """ + # All arguments must have statically-known rank. + check.Eq(root.get_shape().ndims, 1, 'root must be a vector') + check.Eq(tokens.get_shape().ndims, 3, 'tokens must be rank 3') + check.Eq(weights.get_shape().ndims, 2, 'weights must be a matrix') + + # All activation dimensions must be statically-known. + num_source_activations = weights.get_shape().as_list()[0] + num_target_activations = weights.get_shape().as_list()[1] + check.NotNone(num_source_activations, 'unknown source activation dimension') + check.NotNone(num_target_activations, 'unknown target activation dimension') + check.Eq(root.get_shape().as_list()[0], num_source_activations, + 'dimension mismatch between weights and root') + check.Eq(tokens.get_shape().as_list()[2], num_target_activations, + 'dimension mismatch between weights and tokens') + + # All arguments must share the same type. + check.Same([weights.dtype.base_dtype, + root.dtype.base_dtype, + tokens.dtype.base_dtype], + 'dtype mismatch') + + root_1xs = tf.expand_dims(root, 0) + + tokens_shape = tf.shape(tokens) + batch_size = tokens_shape[0] + num_tokens = tokens_shape[1] + + # Flatten out the batch dimension so we can use a couple big matmuls. + tokens_bnxt = tf.reshape(tokens, [-1, num_target_activations]) + weights_targets_bnxs = tf.matmul(tokens_bnxt, weights, transpose_b=True) + roots_1xbn = tf.matmul(root_1xs, weights_targets_bnxs, transpose_b=True) + + # Restore the batch dimension in the output. + roots_bxn = tf.reshape(roots_1xbn, [batch_size, num_tokens]) + return roots_bxn + + +def CombineArcAndRootPotentials(arcs, roots): + """Combines arc and root potentials into a single set of potentials. + + Args: + arcs: [B,N,N] tensor of batched arc potentials. + roots: [B,N] matrix of batched root potentials. + + Returns: + [B,N,N] tensor P of combined potentials where + P_{b,s,t} = s == t ? roots[b,t] : arcs[b,s,t] + """ + # All arguments must have statically-known rank. + check.Eq(arcs.get_shape().ndims, 3, 'arcs must be rank 3') + check.Eq(roots.get_shape().ndims, 2, 'roots must be a matrix') + + # All arguments must share the same type. + dtype = arcs.dtype.base_dtype + check.Same([dtype, roots.dtype.base_dtype], 'dtype mismatch') + + roots_shape = tf.shape(roots) + arcs_shape = tf.shape(arcs) + batch_size = roots_shape[0] + num_tokens = roots_shape[1] + with tf.control_dependencies([ + tf.assert_equal(batch_size, arcs_shape[0]), + tf.assert_equal(num_tokens, arcs_shape[1]), + tf.assert_equal(num_tokens, arcs_shape[2])]): + return tf.matrix_set_diag(arcs, roots) + + +def LabelPotentialsFromTokens(tokens, weights): + r"""Computes label potentials from tokens and weights. + + For each batch of token activations, computes a scalar potential for each + label as the product between the activations of the source token and the + |weights|. Specifically, + + labels[b,t,l] = \sum_{i} weights[l,i] * tokens[b,t,i] + + Args: + tokens: [B,N,T] tensor of batched token activations. + weights: [L,T] matrix of weights. + + B,N may be dynamic, but L,T must be static. The dtype of all arguments must + be compatible. + + Returns: + [B,N,L] tensor of label potentials as defined above, with the same dtype as + the arguments. + """ + check.Eq(tokens.get_shape().ndims, 3, 'tokens must be rank 3') + check.Eq(weights.get_shape().ndims, 2, 'weights must be a matrix') + + num_labels = weights.get_shape().as_list()[0] + num_activations = weights.get_shape().as_list()[1] + check.NotNone(num_labels, 'unknown number of labels') + check.NotNone(num_activations, 'unknown activation dimension') + check.Eq(tokens.get_shape().as_list()[2], num_activations, + 'activation mismatch between weights and tokens') + tokens_shape = tf.shape(tokens) + batch_size = tokens_shape[0] + num_tokens = tokens_shape[1] + + check.Same([tokens.dtype.base_dtype, + weights.dtype.base_dtype], + 'dtype mismatch') + + # Flatten out the batch dimension so we can use one big matmul(). + tokens_bnxt = tf.reshape(tokens, [-1, num_activations]) + labels_bnxl = tf.matmul(tokens_bnxt, weights, transpose_b=True) + + # Restore the batch dimension in the output. + labels_bxnxl = tf.reshape(labels_bnxl, [batch_size, num_tokens, num_labels]) + return labels_bxnxl + + +def LabelPotentialsFromTokenPairs(sources, targets, weights): + r"""Computes label potentials from source and target tokens and weights. + + For each aligned pair of source and target token activations, computes a + scalar potential for each label on the arc from the source to the target. + Specifically, + + labels[b,t,l] = \sum_{i,j} sources[b,t,i] * weights[l,i,j] * targets[b,t,j] + + Args: + sources: [B,N,S] tensor of batched source token activations. + targets: [B,N,T] tensor of batched target token activations. + weights: [L,S,T] tensor of weights. + + B,N may be dynamic, but L,S,T must be static. The dtype of all arguments + must be compatible. + + Returns: + [B,N,L] tensor of label potentials as defined above, with the same dtype as + the arguments. + """ + check.Eq(sources.get_shape().ndims, 3, 'sources must be rank 3') + check.Eq(targets.get_shape().ndims, 3, 'targets must be rank 3') + check.Eq(weights.get_shape().ndims, 3, 'weights must be rank 3') + + num_labels = weights.get_shape().as_list()[0] + num_source_activations = weights.get_shape().as_list()[1] + num_target_activations = weights.get_shape().as_list()[2] + check.NotNone(num_labels, 'unknown number of labels') + check.NotNone(num_source_activations, 'unknown source activation dimension') + check.NotNone(num_target_activations, 'unknown target activation dimension') + check.Eq(sources.get_shape().as_list()[2], num_source_activations, + 'activation mismatch between weights and source tokens') + check.Eq(targets.get_shape().as_list()[2], num_target_activations, + 'activation mismatch between weights and target tokens') + + check.Same([sources.dtype.base_dtype, + targets.dtype.base_dtype, + weights.dtype.base_dtype], + 'dtype mismatch') + + sources_shape = tf.shape(sources) + targets_shape = tf.shape(targets) + batch_size = sources_shape[0] + num_tokens = sources_shape[1] + with tf.control_dependencies([tf.assert_equal(batch_size, targets_shape[0]), + tf.assert_equal(num_tokens, targets_shape[1])]): + # For each token, we must compute a vector-3tensor-vector product. There is + # no op for this, but we can use reshape() and matmul() to compute it. + + # Reshape |weights| and |targets| so we can use a single matmul(). + weights_lsxt = tf.reshape(weights, [num_labels * num_source_activations, + num_target_activations]) + targets_bnxt = tf.reshape(targets, [-1, num_target_activations]) + weights_targets_bnxls = tf.matmul(targets_bnxt, weights_lsxt, + transpose_b=True) + + # Restore all dimensions. + weights_targets_bxnxlxs = tf.reshape( + weights_targets_bnxls, + [batch_size, num_tokens, num_labels, num_source_activations]) + + # Incorporate the source activations. In this case, we perform a batched + # matmul() between the trailing [L,S] matrices of the current result and the + # trailing [S] vectors of the tokens. + sources_bxnx1xs = tf.expand_dims(sources, 2) + labels_bxnxlx1 = tf.matmul(weights_targets_bxnxlxs, sources_bxnx1xs, + transpose_b=True) + labels_bxnxl = tf.squeeze(labels_bxnxlx1, [3]) + return labels_bxnxl diff --git a/syntaxnet/dragnn/python/digraph_ops_test.py b/syntaxnet/dragnn/python/digraph_ops_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b37684bc56ad81e23be9cfac43933064eb2c599f --- /dev/null +++ b/syntaxnet/dragnn/python/digraph_ops_test.py @@ -0,0 +1,163 @@ +"""Tests for digraph ops.""" + +import tensorflow as tf + +from dragnn.python import digraph_ops + + +class DigraphOpsTest(tf.test.TestCase): + """Testing rig.""" + + def testArcPotentialsFromTokens(self): + with self.test_session(): + # Batch of two, where the second batch item is the reverse of the first. + source_tokens = tf.constant([[[1, 2], + [2, 3], + [3, 4]], + [[3, 4], + [2, 3], + [1, 2]]], tf.float32) + target_tokens = tf.constant([[[4, 5, 6], + [5, 6, 7], + [6, 7, 8]], + [[6, 7, 8], + [5, 6, 7], + [4, 5, 6]]], tf.float32) + weights = tf.constant([[2, 3, 5], + [7, 11, 13]], + tf.float32) + + arcs = digraph_ops.ArcPotentialsFromTokens(source_tokens, target_tokens, + weights) + + # For example, + # ((1 * 2 * 4 + 1 * 3 * 5 + 1 * 5 * 6) + + # (2 * 7 * 4 + 2 * 11 * 5 + 2 * 13 * 6)) = 375 + self.assertAllEqual(arcs.eval(), + [[[375, 447, 519], + [589, 702, 815], + [803, 957, 1111]], + [[1111, 957, 803], # reflected through the center + [815, 702, 589], + [519, 447, 375]]]) + + def testArcSourcePotentialsFromTokens(self): + with self.test_session(): + tokens = tf.constant([[[4, 5, 6], + [5, 6, 7], + [6, 7, 8]], + [[6, 7, 8], + [5, 6, 7], + [4, 5, 6]]], tf.float32) + weights = tf.constant([2, 3, 5], tf.float32) + + arcs = digraph_ops.ArcSourcePotentialsFromTokens(tokens, weights) + + self.assertAllEqual(arcs.eval(), [[[53, 53, 53], + [63, 63, 63], + [73, 73, 73]], + [[73, 73, 73], + [63, 63, 63], + [53, 53, 53]]]) + + def testRootPotentialsFromTokens(self): + with self.test_session(): + root = tf.constant([1, 2], tf.float32) + tokens = tf.constant([[[4, 5, 6], + [5, 6, 7], + [6, 7, 8]], + [[6, 7, 8], + [5, 6, 7], + [4, 5, 6]]], tf.float32) + weights = tf.constant([[2, 3, 5], + [7, 11, 13]], + tf.float32) + + roots = digraph_ops.RootPotentialsFromTokens(root, tokens, weights) + + self.assertAllEqual(roots.eval(), [[375, 447, 519], + [519, 447, 375]]) + + def testCombineArcAndRootPotentials(self): + with self.test_session(): + arcs = tf.constant([[[1, 2, 3], + [2, 3, 4], + [3, 4, 5]], + [[3, 4, 5], + [2, 3, 4], + [1, 2, 3]]], tf.float32) + roots = tf.constant([[6, 7, 8], + [8, 7, 6]], tf.float32) + + potentials = digraph_ops.CombineArcAndRootPotentials(arcs, roots) + + self.assertAllEqual(potentials.eval(), [[[6, 2, 3], + [2, 7, 4], + [3, 4, 8]], + [[8, 4, 5], + [2, 7, 4], + [1, 2, 6]]]) + + def testLabelPotentialsFromTokens(self): + with self.test_session(): + tokens = tf.constant([[[1, 2], + [3, 4], + [5, 6]], + [[6, 5], + [4, 3], + [2, 1]]], tf.float32) + + + weights = tf.constant([[ 2, 3], + [ 5, 7], + [11, 13]], tf.float32) + + labels = digraph_ops.LabelPotentialsFromTokens(tokens, weights) + + self.assertAllEqual(labels.eval(), + + [[[ 8, 19, 37], + [ 18, 43, 85], + [ 28, 67, 133]], + [[ 27, 65, 131], + [ 17, 41, 83], + [ 7, 17, 35]]]) + + def testLabelPotentialsFromTokenPairs(self): + with self.test_session(): + sources = tf.constant([[[1, 2], + [3, 4], + [5, 6]], + [[6, 5], + [4, 3], + [2, 1]]], tf.float32) + targets = tf.constant([[[3, 4], + [5, 6], + [7, 8]], + [[8, 7], + [6, 5], + [4, 3]]], tf.float32) + + + weights = tf.constant([[[ 2, 3], + [ 5, 7]], + [[11, 13], + [17, 19]], + [[23, 29], + [31, 37]]], tf.float32) + + labels = digraph_ops.LabelPotentialsFromTokenPairs(sources, targets, + weights) + + self.assertAllEqual(labels.eval(), + + [[[ 104, 339, 667], + [ 352, 1195, 2375], + [ 736, 2531, 5043]], + [[ 667, 2419, 4857], + [ 303, 1115, 2245], + [ 75, 291, 593]]]) + + +if __name__ == "__main__": + tf.test.main() diff --git a/syntaxnet/dragnn/python/dragnn_ops.py b/syntaxnet/dragnn/python/dragnn_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..b26e5fc0cc2e77c6d49de2960f126061bdab698c --- /dev/null +++ b/syntaxnet/dragnn/python/dragnn_ops.py @@ -0,0 +1,9 @@ +"""Groups the DRAGNN TensorFlow ops in one module.""" + + +try: + from dragnn.core.ops.gen_dragnn_bulk_ops import * + from dragnn.core.ops.gen_dragnn_ops import * +except ImportError as e: + raise e + diff --git a/syntaxnet/dragnn/python/evaluation.py b/syntaxnet/dragnn/python/evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..a028502252d419882c90d2e9cde6e44a3ec4b04b --- /dev/null +++ b/syntaxnet/dragnn/python/evaluation.py @@ -0,0 +1,117 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Parser evaluation utils.""" + +from __future__ import division + +import tensorflow as tf + +from syntaxnet import sentence_pb2 +from syntaxnet.util import check + + +def calculate_parse_metrics(gold_corpus, annotated_corpus): + """Calculate POS/UAS/LAS accuracy based on gold and annotated sentences.""" + check.Eq(len(gold_corpus), len(annotated_corpus), 'Corpora are not aligned') + num_tokens = 0 + num_correct_pos = 0 + num_correct_uas = 0 + num_correct_las = 0 + for gold_str, annotated_str in zip(gold_corpus, annotated_corpus): + gold = sentence_pb2.Sentence() + annotated = sentence_pb2.Sentence() + gold.ParseFromString(gold_str) + annotated.ParseFromString(annotated_str) + check.Eq(gold.text, annotated.text, 'Text is not aligned') + check.Eq(len(gold.token), len(annotated.token), 'Tokens are not aligned') + tokens = zip(gold.token, annotated.token) + num_tokens += len(tokens) + num_correct_pos += sum(1 for x, y in tokens if x.tag == y.tag) + num_correct_uas += sum(1 for x, y in tokens if x.head == y.head) + num_correct_las += sum(1 for x, y in tokens + if x.head == y.head and x.label == y.label) + + tf.logging.info('Total num documents: %d', len(annotated_corpus)) + tf.logging.info('Total num tokens: %d', num_tokens) + pos = num_correct_pos * 100.0 / num_tokens + uas = num_correct_uas * 100.0 / num_tokens + las = num_correct_las * 100.0 / num_tokens + tf.logging.info('POS: %.2f%%', pos) + tf.logging.info('UAS: %.2f%%', uas) + tf.logging.info('LAS: %.2f%%', las) + return pos, uas, las + + +def parser_summaries(gold_corpus, annotated_corpus): + """Computes parser evaluation summaries for gold and annotated sentences.""" + pos, uas, las = calculate_parse_metrics(gold_corpus, annotated_corpus) + return {'POS': pos, 'LAS': las, 'UAS': uas, 'eval_metric': las} + + +def calculate_segmentation_metrics(gold_corpus, annotated_corpus): + """Calculate precision/recall/f1 based on gold and annotated sentences.""" + check.Eq(len(gold_corpus), len(annotated_corpus), 'Corpora are not aligned') + num_gold_tokens = 0 + num_test_tokens = 0 + num_correct_tokens = 0 + def token_span(token): + check.Ge(token.end, token.start) + return (token.start, token.end) + + def ratio(numerator, denominator): + check.Ge(numerator, 0) + check.Ge(denominator, 0) + if denominator > 0: + return numerator / denominator + elif numerator == 0: + return 0.0 # map 0/0 to 0 + else: + return float('inf') # map x/0 to inf + + for gold_str, annotated_str in zip(gold_corpus, annotated_corpus): + gold = sentence_pb2.Sentence() + annotated = sentence_pb2.Sentence() + gold.ParseFromString(gold_str) + annotated.ParseFromString(annotated_str) + check.Eq(gold.text, annotated.text, 'Text is not aligned') + gold_spans = set() + test_spans = set() + for token in gold.token: + check.NotIn(token_span(token), gold_spans, 'Duplicate token') + gold_spans.add(token_span(token)) + for token in annotated.token: + check.NotIn(token_span(token), test_spans, 'Duplicate token') + test_spans.add(token_span(token)) + num_gold_tokens += len(gold_spans) + num_test_tokens += len(test_spans) + num_correct_tokens += len(gold_spans.intersection(test_spans)) + + tf.logging.info('Total num documents: %d', len(annotated_corpus)) + tf.logging.info('Total gold tokens: %d', num_gold_tokens) + tf.logging.info('Total test tokens: %d', num_test_tokens) + precision = 100 * ratio(num_correct_tokens, num_test_tokens) + recall = 100 * ratio(num_correct_tokens, num_gold_tokens) + f1 = ratio(2 * precision * recall, precision + recall) + tf.logging.info('Precision: %.2f%%', precision) + tf.logging.info('Recall: %.2f%%', recall) + tf.logging.info('F1: %.2f%%', f1) + + return round(precision, 2), round(recall, 2), round(f1, 2) + + +def segmentation_summaries(gold_corpus, annotated_corpus): + """Computes segmentation eval summaries for gold and annotated sentences.""" + prec, rec, f1 = calculate_segmentation_metrics(gold_corpus, annotated_corpus) + return {'precision': prec, 'recall': rec, 'f1': f1, 'eval_metric': f1} diff --git a/syntaxnet/dragnn/python/evaluation_test.py b/syntaxnet/dragnn/python/evaluation_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7be0fc4be1716a56a72529b56d91ce3ad732338c --- /dev/null +++ b/syntaxnet/dragnn/python/evaluation_test.py @@ -0,0 +1,108 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for parser evaluation.""" + +import tensorflow as tf + +from dragnn.python import evaluation +from syntaxnet import sentence_pb2 + + +class EvaluationTest(tf.test.TestCase): + + def _add_sentence(self, tags, heads, labels, corpus): + """Adds a sentence to the corpus.""" + sentence = sentence_pb2.Sentence() + for tag, head, label in zip(tags, heads, labels): + sentence.token.add(word='x', start=0, end=0, + tag=tag, head=head, label=label) + corpus.append(sentence.SerializeToString()) + + def setUp(self): + self._gold_corpus = [] + self._test_corpus = [] + + # A correct sentence. + self._add_sentence(['DT'], [-1], ['ROOT'], self._gold_corpus) + self._add_sentence(['DT'], [-1], ['ROOT'], self._test_corpus) + + # An incorrect sentence. There is one POS mistake, two head mistakes, and + # one label mistake. NB: Since the label mistake occurs on the one token + # with a correct head, this sentence has three mistakes w.r.t. LAS. + self._add_sentence(['DT', 'JJ', 'NN'], [2, 2, -1], ['det', 'amod', 'ROOT'], + self._gold_corpus) + self._add_sentence(['xx', 'JJ', 'NN'], [1, 0, -1], ['det', 'amod', 'xxxx'], + self._test_corpus) + + def testCalculateParseMetrics(self): + pos, uas, las = evaluation.calculate_parse_metrics(self._gold_corpus, + self._test_corpus) + self.assertEqual(75, pos) + self.assertEqual(50, uas) + self.assertEqual(25, las) + + def testCalculateSegmentationMetrics(self): + self._gold_corpus = [] + self._test_corpus = [] + + def add_sentence_for_segment_eval(starts, ends, corpus): + """Adds a sentence to the corpus.""" + sentence = sentence_pb2.Sentence() + for start, end in zip(starts, ends): + sentence.token.add(word='x', start=start, end=end) + corpus.append(sentence.SerializeToString()) + + # A test case with 5 gold words, 4 test words and 3 are correct. + # -gold tokens: 'This is a gold sentence' + # -test tokens: 'Thisis a gold sentence' + add_sentence_for_segment_eval( + [0, 5, 8, 10, 15], [3, 6, 8, 13, 22], self._gold_corpus) + add_sentence_for_segment_eval( + [0, 8, 10, 15], [6, 8, 13, 22], self._test_corpus) + + # Another test case with 3 gold words, 5 test words and 2 correct words. + # -gold tokens: 'another gold sentence' + # -test tokens: 'another gold sen tence' + add_sentence_for_segment_eval([0, 8, 13], [6, 11, 20], self._gold_corpus) + add_sentence_for_segment_eval([0, 8, 13, 17, 21], [6, 11, 15, 19, 22], + self._test_corpus) + prec, rec, f1 = evaluation.calculate_segmentation_metrics(self._gold_corpus, + self._test_corpus) + self.assertEqual(55.56, prec) + self.assertEqual(62.50, rec) + self.assertEqual(58.82, f1) + + summaries = evaluation.segmentation_summaries(self._gold_corpus, + self._test_corpus) + self.assertEqual({ + 'precision': 55.56, + 'recall': 62.50, + 'f1': 58.82, + 'eval_metric': 58.82 + }, summaries) + + def testParserSummaries(self): + summaries = evaluation.parser_summaries(self._gold_corpus, + self._test_corpus) + self.assertEqual({ + 'POS': 75, + 'UAS': 50, + 'LAS': 25, + 'eval_metric': 25 # equals LAS + }, summaries) + + +if __name__ == '__main__': + tf.test.main() diff --git a/syntaxnet/dragnn/python/graph_builder.py b/syntaxnet/dragnn/python/graph_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..25a0e2bd8318d8392489f31f30f67d9a8901d6f2 --- /dev/null +++ b/syntaxnet/dragnn/python/graph_builder.py @@ -0,0 +1,584 @@ +"""Builds a DRAGNN graph for local training.""" + + +import tensorflow as tf +from tensorflow.core.protobuf import saver_pb2 +from tensorflow.python.platform import tf_logging as logging + +from dragnn.protos import spec_pb2 +from dragnn.python import component +from dragnn.python import composite_optimizer +from dragnn.python import dragnn_ops +from syntaxnet.util import check + +try: + tf.NotDifferentiable('ExtractFixedFeatures') +except KeyError, e: + logging.info(str(e)) + + +def _create_learning_rate(hyperparams, step_var): + """Creates learning rate var, with decay and switching for CompositeOptimizer. + + Args: + hyperparams: a GridPoint proto containing optimizer spec, particularly + learning_method to determine optimizer class to use. + step_var: tf.Variable, global training step. + + Returns: + a scalar `Tensor`, the learning rate based on current step and hyperparams. + """ + if hyperparams.learning_method != 'composite': + base_rate = hyperparams.learning_rate + else: + spec = hyperparams.composite_optimizer_spec + switch = tf.less(step_var, spec.switch_after_steps) + base_rate = tf.cond(switch, lambda: tf.constant(spec.method1.learning_rate), + lambda: tf.constant(spec.method2.learning_rate)) + return tf.train.exponential_decay( + base_rate, + step_var, + hyperparams.decay_steps, + hyperparams.decay_base, + staircase=hyperparams.decay_staircase) + + +def _create_optimizer(hyperparams, learning_rate_var, step_var=None): + """Creates an optimizer object for a given spec, learning rate and step var. + + Args: + hyperparams: a GridPoint proto containing optimizer spec, particularly + learning_method to determine optimizer class to use. + learning_rate_var: a `tf.Tensor`, the learning rate. + step_var: a `tf.Variable`, global training step. + + Returns: + a `tf.train.Optimizer` object that was built. + """ + if hyperparams.learning_method == 'gradient_descent': + return tf.train.GradientDescentOptimizer( + learning_rate_var, use_locking=True) + elif hyperparams.learning_method == 'adam': + return tf.train.AdamOptimizer( + learning_rate_var, + beta1=hyperparams.adam_beta1, + beta2=hyperparams.adam_beta2, + epsilon=hyperparams.adam_eps, + use_locking=True) + elif hyperparams.learning_method == 'momentum': + return tf.train.MomentumOptimizer( + learning_rate_var, hyperparams.momentum, use_locking=True) + elif hyperparams.learning_method == 'composite': + spec = hyperparams.composite_optimizer_spec + optimizer1 = _create_optimizer(spec.method1, learning_rate_var, step_var) + optimizer2 = _create_optimizer(spec.method2, learning_rate_var, step_var) + if step_var is None: + logging.fatal('step_var is required for CompositeOptimizer') + switch = tf.less(step_var, spec.switch_after_steps) + return composite_optimizer.CompositeOptimizer( + optimizer1, optimizer2, switch, use_locking=True) + else: + logging.fatal('Unknown learning method (optimizer)') + + +class MasterBuilder(object): + """A builder for a DRAGNN stack of models. + + This class is the major factory for all DRAGNN models. It provides + common hooks to build training and evaluation targets from a single + MasterSpec and hyperparameter configuration. + + The key concept is as follows: to execute a DRAGNN graph, one needs + two stateful pieces: + + 1. A handle to a C++ dragnn state, managed outside of TensorFlow and + accesssed via the custom dragnn ops. + 2. A set of StoredActivations, one for each component, that contain network + activations that can be used across components. + + TODO(googleuser): Update these comments to be accurate. + Both of these can be handled automatically "under-the-hood" by the + MasterBuilder API. For #1, the key consideration is that each C++ + ComputeSession is allocated statically, meaning memory is shared + across different tensorflow::Session invocations. ComputeSessions are + allocated from pools. The `pool_scope` identifies the pool, unique to this + MasterBuilder, from which the ComputeSession is allocated. From there, + GetSession takes care of handing out ComputeSessions with unique handles. + Each ComputeSession can then be run concurrently. + + Attributes: + spec: the MasterSpec proto. + hyperparams: the GridPoint proto containing hyperparameters. + pool_scope: string identifier for the ComputeSession pool to use. + components: a list of ComponentBuilders in the order they are defined + in the MasterSpec. + lookup_component: a dictionary to lookup ComponentBuilders by name. + optimizer: handle to the tf.train Optimizer object used to train this model. + master_vars: dictionary of globally shared tf.Variable objects (e.g. + the global training step and learning rate.) + """ + + def __init__(self, master_spec, hyperparam_config=None, pool_scope='shared'): + """Initializes the MasterBuilder from specifications. + + During construction, all components are initialized along with their + parameter tf.Variables. + + Args: + master_spec: dragnn.MasterSpec proto. + hyperparam_config: dragnn.GridPoint proto specifying hyperparameters. + Defaults to empty specification. + pool_scope: string identifier for the compute session pool to use. + + Raises: + ValueError: if a component is not found in the registry. + """ + self.spec = master_spec + self.hyperparams = (spec_pb2.GridPoint() + if hyperparam_config is None else hyperparam_config) + self.pool_scope = pool_scope + + # Construct all utility class and variables for each Component. + self.components = [] + self.lookup_component = {} + for component_spec in master_spec.component: + component_type = component_spec.component_builder.registered_name + + # Raises ValueError if not found. + comp = component.ComponentBuilderBase.Create(component_type, self, + component_spec) + + self.lookup_component[comp.name] = comp + self.components.append(comp) + + # Add global step variable. + self.master_vars = {} + with tf.variable_scope('master', reuse=False): + self.master_vars['step'] = tf.get_variable( + 'step', [], initializer=tf.zeros_initializer(), dtype=tf.int32) + self.master_vars['learning_rate'] = _create_learning_rate( + self.hyperparams, self.master_vars['step']) + + # Construct optimizer. + self.optimizer = _create_optimizer(self.hyperparams, + self.master_vars['learning_rate'], + self.master_vars['step']) + + @property + def component_names(self): + return tuple(c.name for c in self.components) + + def _get_compute_session(self): + """Returns a new ComputeSession handle.""" + return dragnn_ops.get_session( + self.pool_scope, + master_spec=self.spec.SerializeToString(), + grid_point=self.hyperparams.SerializeToString(), + name='GetSession') + + def _get_session_with_reader(self, enable_tracing): + """Utility to create ComputeSession management ops. + + Creates a new ComputeSession handle and provides the following + named nodes: + + ComputeSession/InputBatch -- a placeholder for attaching a string + specification for AttachReader. + ComputeSession/AttachReader -- the AttachReader op. + + Args: + enable_tracing: bool, whether to enable tracing before attaching the data. + + Returns: + handle: handle to a new ComputeSession returned by the AttachReader op. + input_batch: InputBatch placeholder. + """ + with tf.name_scope('ComputeSession'): + input_batch = tf.placeholder( + dtype=tf.string, shape=[None], name='InputBatch') + + # Get the ComputeSession and chain some essential ops. + handle = self._get_compute_session() + if enable_tracing: + handle = dragnn_ops.set_tracing(handle, True) + handle = dragnn_ops.attach_data_reader( + handle, input_batch, name='AttachReader') + + return handle, input_batch + + def _outputs_with_release(self, handle, inputs, outputs): + """Ensures ComputeSession is released before outputs are returned. + + Args: + handle: Handle to ComputeSession on which all computation until now has + depended. It will be released and assigned to the output 'run'. + inputs: list of nodes we want to pass through without any dependencies. + outputs: list of nodes whose access should ensure the ComputeSession is + safely released. + + Returns: + A dictionary of both input and output nodes. + """ + with tf.control_dependencies(outputs.values()): + with tf.name_scope('ComputeSession'): + release_op = dragnn_ops.release_session(handle) + run_op = tf.group(release_op, name='run') + for output in outputs: + with tf.control_dependencies([release_op]): + outputs[output] = tf.identity(outputs[output], name=output) + all_nodes = inputs.copy() + all_nodes.update(outputs) + + # Add an alias for simply running without collecting outputs. + # Common, for instance, with training. + all_nodes['run'] = run_op + return all_nodes + + def build_training(self, + handle, + compute_gradients=True, + use_moving_average=False, + advance_counters=True, + component_weights=None, + unroll_using_oracle=None, + max_index=-1): + """Builds a training pipeline. + + Args: + handle: Handle tensor for the ComputeSession. + compute_gradients: Whether to generate gradients and an optimizer op. + When False, build_training will return a 'dry run' training op, + used normally only for oracle tracing. + use_moving_average: Whether or not to read from the moving + average variables instead of the true parameters. Note: it is not + possible to make gradient updates when this is True. + advance_counters: Whether or not this loop should increment the + per-component step counters. + component_weights: If set, this is a list of relative weights + each component's cost should get in the pipeline. Defaults to 1.0 for + each component. + unroll_using_oracle: If set, this is a list of booleans indicating + whether or not to use the gold decodings for each component. Defaults + to True for each component. + max_index: Training will use only the first max_index components, + or -1 for all components. + + Returns: + handle: to the ComputeSession, conditioned on completing training step. + outputs: a dictionary of useful training tensors. + + Raises: + IndexError: if max_index is positive but out of bounds. + """ + check.IsFalse(compute_gradients and use_moving_average, + 'It is not possible to make gradient updates when reading ' + 'from the moving average variables.') + + self.read_from_avg = use_moving_average + if max_index < 0: + max_index = len(self.components) + else: + if not 0 < max_index <= len(self.components): + raise IndexError('Invalid max_index {} for components {}; handle {}'. + format(max_index, self.component_names, handle.name)) + + # By default, we train every component supervised. + if not component_weights: + component_weights = [1] * max_index + if not unroll_using_oracle: + unroll_using_oracle = [True] * max_index + + component_weights = component_weights[:max_index] + total_weight = (float)(sum(component_weights)) + component_weights = [w / total_weight for w in component_weights] + + unroll_using_oracle = unroll_using_oracle[:max_index] + + logging.info('Creating training target:') + logging.info('\tWeights: %s', component_weights) + logging.info('\tOracle: %s', unroll_using_oracle) + + metrics_list = [] + cost = tf.constant(0.) + effective_batch = tf.constant(0) + + avg_ops = [] + params_to_train = [] + + network_states = {} + for component_index in range(0, max_index): + comp = self.components[component_index] + network_states[comp.name] = component.NetworkState() + + logging.info('Initializing data for component "%s"', comp.name) + handle = dragnn_ops.init_component_data( + handle, beam_size=comp.training_beam_size, component=comp.name) + # TODO(googleuser): Phase out component.MasterState. + master_state = component.MasterState(handle, + dragnn_ops.batch_size( + handle, component=comp.name)) + with tf.control_dependencies([handle, cost]): + component_cost = tf.constant(0.) + component_correct = tf.constant(0) + component_total = tf.constant(0) + if unroll_using_oracle[component_index]: + handle, component_cost, component_correct, component_total = ( + comp.build_greedy_training(master_state, network_states)) + else: + handle = comp.build_greedy_inference( + master_state, network_states, during_training=True) + + weighted_component_cost = tf.multiply( + component_cost, + tf.constant((float)(component_weights[component_index])), + name='weighted_component_cost') + + cost += weighted_component_cost + effective_batch += component_total + metrics_list += [[component_total], [component_correct]] + + if advance_counters: + with tf.control_dependencies( + [comp.advance_counters(component_total)]): + cost = tf.identity(cost) + + # Keep track of which parameters will be trained, and any moving + # average updates to apply for these parameters. + params_to_train += comp.network.params + if self.hyperparams.use_moving_average: + avg_ops += comp.avg_ops + + # Concatenate evaluation results + metrics = tf.concat(metrics_list, 0) + + # If gradient computation is requested, then: + # 1. compute the gradients, + # 2. add an optimizer to update the parameters using the gradients, + # 3. make the ComputeSession handle depend on the optimizer. + if compute_gradients: + logging.info('Creating train op with %d variables:\n\t%s', + len(params_to_train), + '\n\t'.join([x.name for x in params_to_train])) + + grads_and_vars = self.optimizer.compute_gradients( + cost, var_list=params_to_train) + clipped_gradients = [(self._clip_gradients(g), v) + for g, v in grads_and_vars] + minimize_op = self.optimizer.apply_gradients( + clipped_gradients, global_step=self.master_vars['step']) + + if self.hyperparams.use_moving_average: + with tf.control_dependencies([minimize_op]): + minimize_op = tf.group(*avg_ops) + + # Make sure all the side-effectful minimizations ops finish before + # proceeding. + with tf.control_dependencies([minimize_op]): + handle = tf.identity(handle) + + # Restore that subsequent builds don't use average by default. + self.read_from_avg = False + + # Returns named access to common outputs. + outputs = { + 'cost': cost, + 'batch': effective_batch, + 'metrics': metrics, + } + return handle, outputs + + def _clip_gradients(self, grad): + """Clips gradients if the hyperparameter `gradient_clip_norm` requires it. + + Sparse tensors, in the form of IndexedSlices returned for the + gradients of embeddings, require special handling. + + Args: + grad: Gradient Tensor, IndexedSlices, or None. + + Returns: + Optionally clipped gradient. + """ + if grad is not None and self.hyperparams.gradient_clip_norm > 0: + logging.info('Clipping gradient %s', grad) + if isinstance(grad, tf.IndexedSlices): + tmp = tf.clip_by_norm(grad.values, self.hyperparams.gradient_clip_norm) + return tf.IndexedSlices(tmp, grad.indices, grad.dense_shape) + else: + return tf.clip_by_norm(grad, self.hyperparams.gradient_clip_norm) + else: + return grad + + def build_post_restore_hook(self): + """Builds a graph that should be executed after the restore op. + + This graph is intended to be run once, before the inference pipeline is + run. + + Returns: + setup_op - An op that, when run, guarantees all setup ops will run. + """ + with tf.control_dependencies( + [comp.build_post_restore_hook() for comp in self.components]): + return tf.no_op(name='post_restore_hook_master') + + def build_inference(self, handle, use_moving_average=False): + """Builds an inference pipeline. + + This always uses the whole pipeline. + + Args: + handle: Handle tensor for the ComputeSession. + use_moving_average: Whether or not to read from the moving + average variables instead of the true parameters. Note: it is not + possible to make gradient updates when this is True. + + Returns: + handle: Handle after annotation. + """ + self.read_from_avg = use_moving_average + network_states = {} + + for comp in self.components: + network_states[comp.name] = component.NetworkState() + handle = dragnn_ops.init_component_data( + handle, beam_size=comp.inference_beam_size, component=comp.name) + master_state = component.MasterState(handle, + dragnn_ops.batch_size( + handle, component=comp.name)) + with tf.control_dependencies([handle]): + handle = comp.build_greedy_inference(master_state, network_states) + handle = dragnn_ops.write_annotations(handle, component=comp.name) + + self.read_from_avg = False + return handle + + def add_training_from_config(self, + target_config, + prefix='train-', + trace_only=False, + **kwargs): + """Constructs a training pipeline from a TrainTarget proto. + + This constructs a separately managed pipeline for a given target: + it has its own ComputeSession, InputSpec placeholder, etc. The ops + are given standardized names to allow access from the C++ API. It + passes the values in target_config to build_training() above. + + For the default prefix ('train-'), and a target named 'target', this will + construct the following targets in the graph: + + train-target/ComputeSession/* (the standard ComputeSession controls) + train-target/run (handle to a completed training step) + train-target/metrics (per-decision metrics from gold oracles) + train-target/cost (total cost across all components) + + Enabling `trace_only` effectively creates a graph that is a 'dry run'. + There will be no side affects. In addition, the gradients won't be computed + and the model parameters will not be updated. + + Args: + target_config: the TrainTarget proto. + prefix: Preprends target_config.name with this to construct + a unique identifier. + trace_only: Enabling this will result in: + 1. Tracing will be enabled for the ComputeSession.. + 2. A 'traces' node will be added to the outputs. + 3. Gradients will not be computed. + **kwargs: Passed on to build_training() above. + + Returns: + Dictionary of training targets. + """ + logging.info('Creating new training target ' + '%s' + ' from config: %s', target_config.name, str(target_config)) + scope_id = prefix + target_config.name + with tf.name_scope(scope_id): + # Construct training targets. Disable tracing during training. + handle, input_batch = self._get_session_with_reader(trace_only) + if trace_only: + # Build a training graph that doesn't have any side effects. + handle, outputs = self.build_training( + handle, + compute_gradients=False, + advance_counters=False, + component_weights=target_config.component_weights, + unroll_using_oracle=target_config.unroll_using_oracle, + max_index=target_config.max_index, + **kwargs) + outputs['traces'] = dragnn_ops.get_component_trace( + handle, component=self.spec.component[-1].name) + else: + # The standard training scenario has gradients and updates counters. + handle, outputs = self.build_training( + handle, + compute_gradients=True, + advance_counters=True, + component_weights=target_config.component_weights, + unroll_using_oracle=target_config.unroll_using_oracle, + max_index=target_config.max_index, + **kwargs) + + # In addition, it keeps track of the number of training steps. + outputs['target_step'] = tf.get_variable( + scope_id + '/TargetStep', [], + initializer=tf.zeros_initializer(), + dtype=tf.int32) + increment_target_step = tf.assign_add( + outputs['target_step'], 1, use_locking=True) + + with tf.control_dependencies([increment_target_step]): + handle = tf.identity(handle) + + return self._outputs_with_release(handle, {'input_batch': input_batch}, + outputs) + + def add_annotation(self, name_scope='annotation', enable_tracing=False): + """Adds an annotation pipeline to the graph. + + This will create the following additional named targets by default, for use + in C++ annotation code (as well as regular ComputeSession targets): + annotation/ComputeSession/session_id (placeholder for giving unique id) + annotation/EmitAnnotations (get annotated data) + annotation/GetComponentTrace (get trace data) + annotation/SetTracing (sets tracing based on annotation/tracing_on) + + Args: + name_scope: Scope for the annotation pipeline. + enable_tracing: Enabling this will result in two things: + 1. Tracing will be enabled during inference. + 2. A 'traces' node will be added to the outputs. + + Returns: + A dictionary of input and output nodes. + """ + with tf.name_scope(name_scope): + handle, input_batch = self._get_session_with_reader(enable_tracing) + handle = self.build_inference(handle, use_moving_average=True) + + annotations = dragnn_ops.emit_annotations( + handle, component=self.spec.component[-1].name) + outputs = {'annotations': annotations} + + if enable_tracing: + outputs['traces'] = dragnn_ops.get_component_trace( + handle, component=self.spec.component[-1].name) + + return self._outputs_with_release(handle, {'input_batch': input_batch}, + outputs) + + def add_post_restore_hook(self, name_scope): + """Adds the post restore ops.""" + with tf.name_scope(name_scope): + return self.build_post_restore_hook() + + def add_saver(self): + """Adds a Saver for all variables in the graph.""" + logging.info('Saving non-quantized variables:\n\t%s', '\n\t'.join( + [x.name for x in tf.global_variables() if 'quantized' not in x.name])) + self.saver = tf.train.Saver( + var_list=[ + x for x in tf.global_variables() if 'quantized' not in x.name + ], + write_version=saver_pb2.SaverDef.V1) diff --git a/syntaxnet/dragnn/python/graph_builder_test.py b/syntaxnet/dragnn/python/graph_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0a19d7a54914cbc34c180a44952a5008f1ebc5fb --- /dev/null +++ b/syntaxnet/dragnn/python/graph_builder_test.py @@ -0,0 +1,655 @@ +"""Tests for graph_builder.""" + + +import collections +import os.path + + +import numpy as np +import tensorflow as tf + +from google.protobuf import text_format + +from dragnn.protos import spec_pb2 +from dragnn.protos import trace_pb2 +from dragnn.python import dragnn_ops +from dragnn.python import graph_builder +from syntaxnet import sentence_pb2 + +from tensorflow.python.framework import test_util +from tensorflow.python.platform import googletest +from tensorflow.python.platform import tf_logging as logging + +import dragnn.python.load_dragnn_cc_impl +import syntaxnet.load_parser_ops + +FLAGS = tf.app.flags.FLAGS +if not hasattr(FLAGS, 'test_srcdir'): + FLAGS.test_srcdir = '' +if not hasattr(FLAGS, 'test_tmpdir'): + FLAGS.test_tmpdir = tf.test.get_temp_dir() + +_DUMMY_GOLD_SENTENCE = """ +token { + word: "sentence" start: 0 end: 7 tag: "NN" category: "NOUN" label: "ROOT" +} +token { + word: "0" start: 9 end: 9 head: 0 tag: "CD" category: "NUM" label: "num" +} +token { + word: "." start: 10 end: 10 head: 0 tag: "." category: "." label: "punct" +} +""" + +# The second sentence has different length, to test the effect of +# mixed-length batches. +_DUMMY_GOLD_SENTENCE_2 = """ +token { + word: "sentence" start: 0 end: 7 tag: "NN" category: "NOUN" label: "ROOT" +} +""" + +# The test sentence is the gold sentence with the tags and parse information +# removed. +_DUMMY_TEST_SENTENCE = """ +token { + word: "sentence" start: 0 end: 7 +} +token { + word: "0" start: 9 end: 9 +} +token { + word: "." start: 10 end: 10 +} +""" + +_DUMMY_TEST_SENTENCE_2 = """ +token { + word: "sentence" start: 0 end: 7 +} +""" + +_TAGGER_EXPECTED_SENTENCES = [ + """ +token { + word: "sentence" start: 0 end: 7 tag: "NN" +} +token { + word: "0" start: 9 end: 9 tag: "CD" +} +token { + word: "." start: 10 end: 10 tag: "." +} +""", """ +token { + word: "sentence" start: 0 end: 7 tag: "NN" +} +""" +] + +_TAGGER_PARSER_EXPECTED_SENTENCES = [ + """ +token { + word: "sentence" start: 0 end: 7 tag: "NN" label: "ROOT" +} +token { + word: "0" start: 9 end: 9 head: 0 tag: "CD" label: "num" +} +token { + word: "." start: 10 end: 10 head: 0 tag: "." label: "punct" +} +""", """ +token { + word: "sentence" start: 0 end: 7 tag: "NN" label: "ROOT" +} +""" +] + +_UNLABELED_PARSER_EXPECTED_SENTENCES = [ + """ +token { + word: "sentence" start: 0 end: 7 label: "punct" +} +token { + word: "0" start: 9 end: 9 head: 0 label: "punct" +} +token { + word: "." start: 10 end: 10 head: 0 label: "punct" +} +""", """ +token { + word: "sentence" start: 0 end: 7 label: "punct" +} +""" +] + +_LABELED_PARSER_EXPECTED_SENTENCES = [ + """ +token { + word: "sentence" start: 0 end: 7 label: "ROOT" +} +token { + word: "0" start: 9 end: 9 head: 0 label: "num" +} +token { + word: "." start: 10 end: 10 head: 0 label: "punct" +} +""", """ +token { + word: "sentence" start: 0 end: 7 label: "ROOT" +} +""" +] + + +def _as_op(x): + """Always returns the tf.Operation associated with a node.""" + return x.op if isinstance(x, tf.Tensor) else x + + +def _find_input_path(src, dst_predicate): + """Finds an input path from `src` to a node that satisfies `dst_predicate`. + + TensorFlow graphs are directed. We generate paths from outputs to inputs, + recursively searching both direct (i.e. data) and control inputs. Graphs with + while_loop control flow may contain cycles. Therefore we eliminate loops + during the DFS. + + Args: + src: tf.Tensor or tf.Operation root node. + dst_predicate: function taking one argument (a node), returning true iff a + a target node has been found. + + Returns: + a path from `src` to the first node that satisfies dest_predicate, or the + empty list otherwise. + """ + path_to = {src: None} + + def dfs(x): + if dst_predicate(x): + return x + x_op = _as_op(x) + for y in x_op.control_inputs + list(x_op.inputs): + # Check if we've already visited node `y`. + if y not in path_to: + path_to[y] = x + res = dfs(y) + if res is not None: + return res + return None + + dst = dfs(src) + path = [] + while dst in path_to: + path.append(dst) + dst = path_to[dst] + return list(reversed(path)) + + +def _find_input_path_to_type(src, dst_type): + """Finds a path from `src` to a node with type (i.e. kernel) `dst_type`.""" + return _find_input_path(src, lambda x: _as_op(x).type == dst_type) + + +class GraphBuilderTest(test_util.TensorFlowTestCase): + + def assertEmpty(self, container, msg=None): + """Assert that an object has zero length. + + Args: + container: Anything that implements the collections.Sized interface. + msg: Optional message to report on failure. + """ + if not isinstance(container, collections.Sized): + self.fail('Expected a Sized object, got: ' + '{!r}'.format(type(container).__name__), msg) + + # explicitly check the length since some Sized objects (e.g. numpy.ndarray) + # have strange __nonzero__/__bool__ behavior. + if len(container): + self.fail('{!r} has length of {}.'.format(container, len(container)), msg) + + def assertNotEmpty(self, container, msg=None): + """Assert that an object has non-zero length. + + Args: + container: Anything that implements the collections.Sized interface. + msg: Optional message to report on failure. + """ + if not isinstance(container, collections.Sized): + self.fail('Expected a Sized object, got: ' + '{!r}'.format(type(container).__name__), msg) + + # explicitly check the length since some Sized objects (e.g. numpy.ndarray) + # have strange __nonzero__/__bool__ behavior. + if not len(container): + self.fail('{!r} has length of 0.'.format(container), msg) + + def LoadSpec(self, spec_path): + master_spec = spec_pb2.MasterSpec() + testdata = os.path.join(FLAGS.test_srcdir, + 'dragnn/core/testdata') + with file(os.path.join(testdata, spec_path), 'r') as fin: + text_format.Parse(fin.read().replace('TESTDATA', testdata), master_spec) + return master_spec + + def MakeHyperparams(self, **kwargs): + hyperparam_config = spec_pb2.GridPoint() + for key in kwargs: + setattr(hyperparam_config, key, kwargs[key]) + return hyperparam_config + + def RunTraining(self, hyperparam_config): + master_spec = self.LoadSpec('master_spec_link.textproto') + + self.assertTrue(isinstance(hyperparam_config, spec_pb2.GridPoint)) + gold_doc = sentence_pb2.Sentence() + text_format.Parse(_DUMMY_GOLD_SENTENCE, gold_doc) + gold_doc_2 = sentence_pb2.Sentence() + text_format.Parse(_DUMMY_GOLD_SENTENCE_2, gold_doc_2) + reader_strings = [ + gold_doc.SerializeToString(), gold_doc_2.SerializeToString() + ] + tf.logging.info('Generating graph with config: %s', hyperparam_config) + with tf.Graph().as_default(): + builder = graph_builder.MasterBuilder(master_spec, hyperparam_config) + + target = spec_pb2.TrainTarget() + target.name = 'testTraining-all' + train = builder.add_training_from_config(target) + with self.test_session() as sess: + logging.info('Initializing') + sess.run(tf.global_variables_initializer()) + + # Run one iteration of training and verify nothing crashes. + logging.info('Training') + sess.run(train['run'], feed_dict={train['input_batch']: reader_strings}) + + def testTraining(self): + """Tests the default hyperparameter settings.""" + self.RunTraining(self.MakeHyperparams()) + + def testTrainingWithGradientClipping(self): + """Adds code coverage for gradient clipping.""" + self.RunTraining(self.MakeHyperparams(gradient_clip_norm=1.25)) + + def testTrainingWithAdamAndAveraging(self): + """Adds code coverage for ADAM and the use of moving averaging.""" + self.RunTraining( + self.MakeHyperparams(learning_method='adam', use_moving_average=True)) + + def testTrainingWithCompositeOptimizer(self): + """Adds code coverage for CompositeOptimizer.""" + grid_point = self.MakeHyperparams(learning_method='composite') + grid_point.composite_optimizer_spec.method1.learning_method = 'adam' + grid_point.composite_optimizer_spec.method2.learning_method = 'momentum' + grid_point.composite_optimizer_spec.method2.momentum = 0.9 + self.RunTraining(grid_point) + + def RunFullTrainingAndInference(self, + test_name, + master_spec_path=None, + master_spec=None, + component_weights=None, + unroll_using_oracle=None, + num_evaluated_components=1, + expected_num_actions=None, + expected=None, + batch_size_limit=None): + if not master_spec: + master_spec = self.LoadSpec(master_spec_path) + + gold_doc = sentence_pb2.Sentence() + text_format.Parse(_DUMMY_GOLD_SENTENCE, gold_doc) + gold_doc_2 = sentence_pb2.Sentence() + text_format.Parse(_DUMMY_GOLD_SENTENCE_2, gold_doc_2) + gold_reader_strings = [ + gold_doc.SerializeToString(), gold_doc_2.SerializeToString() + ] + + test_doc = sentence_pb2.Sentence() + text_format.Parse(_DUMMY_TEST_SENTENCE, test_doc) + test_doc_2 = sentence_pb2.Sentence() + text_format.Parse(_DUMMY_TEST_SENTENCE_2, test_doc_2) + test_reader_strings = [ + test_doc.SerializeToString(), test_doc.SerializeToString(), + test_doc_2.SerializeToString(), test_doc.SerializeToString() + ] + + if batch_size_limit is not None: + gold_reader_strings = gold_reader_strings[:batch_size_limit] + test_reader_strings = test_reader_strings[:batch_size_limit] + + with tf.Graph().as_default(): + tf.set_random_seed(1) + hyperparam_config = spec_pb2.GridPoint() + builder = graph_builder.MasterBuilder( + master_spec, hyperparam_config, pool_scope=test_name) + target = spec_pb2.TrainTarget() + target.name = 'testFullInference-train-%s' % test_name + if component_weights: + target.component_weights.extend(component_weights) + else: + target.component_weights.extend([0] * len(master_spec.component)) + target.component_weights[-1] = 1.0 + if unroll_using_oracle: + target.unroll_using_oracle.extend(unroll_using_oracle) + else: + target.unroll_using_oracle.extend([False] * len(master_spec.component)) + target.unroll_using_oracle[-1] = True + train = builder.add_training_from_config(target) + oracle_trace = builder.add_training_from_config( + target, prefix='train_traced-', trace_only=True) + builder.add_saver() + + anno = builder.add_annotation(test_name) + trace = builder.add_annotation(test_name + '-traced', enable_tracing=True) + + # Verifies that the summaries can be built. + for component in builder.components: + component.get_summaries() + + config = tf.ConfigProto( + intra_op_parallelism_threads=0, inter_op_parallelism_threads=0) + with self.test_session(config=config) as sess: + logging.info('Initializing') + sess.run(tf.global_variables_initializer()) + + logging.info('Dry run oracle trace...') + traces = sess.run( + oracle_trace['traces'], + feed_dict={oracle_trace['input_batch']: gold_reader_strings}) + + # Check that the oracle traces are not empty. + for serialized_trace in traces: + master_trace = trace_pb2.MasterTrace() + master_trace.ParseFromString(serialized_trace) + self.assertTrue(master_trace.component_trace) + self.assertTrue(master_trace.component_trace[0].step_trace) + + logging.info('Simulating training...') + break_iter = 400 + is_resolved = False + for i in range(0, + 400): # needs ~100 iterations, but is not deterministic + cost, eval_res_val = sess.run( + [train['cost'], train['metrics']], + feed_dict={train['input_batch']: gold_reader_strings}) + logging.info('cost = %s', cost) + self.assertFalse(np.isnan(cost)) + total_val = eval_res_val.reshape((-1, 2))[:, 0].sum() + correct_val = eval_res_val.reshape((-1, 2))[:, 1].sum() + if correct_val == total_val and not is_resolved: + logging.info('... converged on iteration %d with (correct, total) ' + '= (%d, %d)', i, correct_val, total_val) + is_resolved = True + # Run for slightly longer than convergence to help with quantized + # weight tiebreakers. + break_iter = i + 50 + + if i == break_iter: + break + + # If training failed, report total/correct actions for each component. + if not expected_num_actions: + expected_num_actions = 4 * num_evaluated_components + if (correct_val != total_val or correct_val != expected_num_actions or + total_val != expected_num_actions): + for c in xrange(len(master_spec.component)): + logging.error('component %s:\nname=%s\ntotal=%s\ncorrect=%s', c, + master_spec.component[c].name, eval_res_val[2 * c], + eval_res_val[2 * c + 1]) + + assert correct_val == total_val, 'Did not converge! %d vs %d.' % ( + correct_val, total_val) + + self.assertEqual(expected_num_actions, correct_val) + self.assertEqual(expected_num_actions, total_val) + + builder.saver.save(sess, os.path.join(FLAGS.test_tmpdir, 'model')) + + logging.info('Running test.') + logging.info('Printing annotations') + annotations = sess.run( + anno['annotations'], + feed_dict={anno['input_batch']: test_reader_strings}) + logging.info('Put %d inputs in, got %d annotations out.', + len(test_reader_strings), len(annotations)) + + # Also run the annotation graph with tracing enabled. + annotations_with_trace, traces = sess.run( + [trace['annotations'], trace['traces']], + feed_dict={trace['input_batch']: test_reader_strings}) + + # The result of the two annotation graphs should be identical. + self.assertItemsEqual(annotations, annotations_with_trace) + + # Check that the inference traces are not empty. + for serialized_trace in traces: + master_trace = trace_pb2.MasterTrace() + master_trace.ParseFromString(serialized_trace) + self.assertTrue(master_trace.component_trace) + self.assertTrue(master_trace.component_trace[0].step_trace) + + self.assertEqual(len(test_reader_strings), len(annotations)) + pred_sentences = [] + for annotation in annotations: + pred_sentences.append(sentence_pb2.Sentence()) + pred_sentences[-1].ParseFromString(annotation) + + if expected is None: + expected = _TAGGER_EXPECTED_SENTENCES + + expected_sentences = [expected[i] for i in [0, 0, 1, 0]] + + for i, pred_sentence in enumerate(pred_sentences): + self.assertProtoEquals(expected_sentences[i], pred_sentence) + + def testSimpleTagger(self): + self.RunFullTrainingAndInference('simple-tagger', + 'simple_tagger_master_spec.textproto') + + def testSimpleTaggerLayerNorm(self): + spec = self.LoadSpec('simple_tagger_master_spec.textproto') + spec.component[0].network_unit.parameters['layer_norm_hidden'] = 'True' + spec.component[0].network_unit.parameters['layer_norm_input'] = 'True' + self.RunFullTrainingAndInference('simple-tagger', master_spec=spec) + + def testSimpleTaggerLSTM(self): + self.RunFullTrainingAndInference('simple-tagger-lstm', + 'simple_tagger_lstm_master_spec.textproto') + + def testSimpleTaggerWrappedLSTM(self): + self.RunFullTrainingAndInference( + 'simple-tagger-wrapped-lstm', + 'simple_tagger_wrapped_lstm_master_spec.textproto') + + def testSplitTagger(self): + self.RunFullTrainingAndInference('split-tagger', + 'split_tagger_master_spec.textproto') + + def testTaggerParser(self): + self.RunFullTrainingAndInference( + 'tagger-parser', + 'tagger_parser_master_spec.textproto', + component_weights=[0., 1., 1.], + unroll_using_oracle=[False, True, True], + expected_num_actions=12, + expected=_TAGGER_PARSER_EXPECTED_SENTENCES) + + def testTaggerParserWithAttention(self): + spec = self.LoadSpec('tagger_parser_master_spec.textproto') + + # Make the 'parser' component attend to the 'tagger' component. + self.assertEqual('tagger', spec.component[1].name) + self.assertEqual('parser', spec.component[2].name) + spec.component[2].attention_component = 'tagger' + + # Attention + beam decoding is not yet supported. + spec.component[2].inference_beam_size = 1 + + # Running with batch size equal to 1 should be fine. + self.RunFullTrainingAndInference( + 'tagger-parser', + master_spec=spec, + batch_size_limit=1, + component_weights=[0., 1., 1.], + unroll_using_oracle=[False, True, True], + expected_num_actions=9, + expected=_TAGGER_PARSER_EXPECTED_SENTENCES) + + def testTaggerParserWithAttentionBatchDeath(self): + spec = self.LoadSpec('tagger_parser_master_spec.textproto') + + # Make the 'parser' component attend to the 'tagger' component. + self.assertEqual('tagger', spec.component[1].name) + self.assertEqual('parser', spec.component[2].name) + spec.component[2].attention_component = 'tagger' + + # Trying to run with a batch size greater than 1 should fail: + with self.assertRaises(tf.errors.InvalidArgumentError): + self.RunFullTrainingAndInference( + 'tagger-parser', + master_spec=spec, + component_weights=[0., 1., 1.], + unroll_using_oracle=[False, True, True], + expected_num_actions=9, + expected=_TAGGER_PARSER_EXPECTED_SENTENCES) + + def testSimpleParser(self): + self.RunFullTrainingAndInference( + 'simple-parser', + 'simple_parser_master_spec.textproto', + expected_num_actions=8, + component_weights=[1], + expected=_LABELED_PARSER_EXPECTED_SENTENCES) + + def checkOpOrder(self, name, endpoint, expected_op_order): + """Checks that ops ending up at root are called in the expected order. + + To check the order, we find a path along the directed graph formed by + the inputs of each op. If op X has a chain of inputs to op Y, then X + cannot be executed before Y. There may be multiple paths between any two + ops, but the ops along any path are executed in that order. Therefore, we + look up the expected ops in reverse order. + + Args: + name: string name of the endpoint, for logging. + endpoint: node whose execution we want to check. + expected_op_order: string list of op types, in the order we expecte them + to be executed leading up to `endpoint`. + """ + for target in reversed(expected_op_order): + path = _find_input_path_to_type(endpoint, target) + self.assertNotEmpty(path) + logging.info('path[%d] from %s to %s: %s', + len(path), name, target, [_as_op(x).type for x in path]) + endpoint = path[-1] + + def getBuilderAndTarget( + self, test_name, master_spec_path='simple_parser_master_spec.textproto'): + """Generates a MasterBuilder and TrainTarget based on a simple spec.""" + master_spec = self.LoadSpec(master_spec_path) + hyperparam_config = spec_pb2.GridPoint() + target = spec_pb2.TrainTarget() + target.name = 'test-%s-train' % test_name + target.component_weights.extend([0] * len(master_spec.component)) + target.component_weights[-1] = 1.0 + target.unroll_using_oracle.extend([False] * len(master_spec.component)) + target.unroll_using_oracle[-1] = True + builder = graph_builder.MasterBuilder( + master_spec, hyperparam_config, pool_scope=test_name) + return builder, target + + def testGetSessionReleaseSession(self): + """Checks that GetSession and ReleaseSession are called in order.""" + test_name = 'get-session-release-session' + + with tf.Graph().as_default(): + # Build the actual graphs. The choice of spec is arbitrary, as long as + # training and annotation nodes can be constructed. + builder, target = self.getBuilderAndTarget(test_name) + train = builder.add_training_from_config(target) + anno = builder.add_annotation(test_name) + + # We want to ensure that certain ops are executed in the correct order. + # Specifically, the ops GetSession and ReleaseSession must both be called, + # and in that order. + # + # First of all, the path to a non-existent node type should be empty. + path = _find_input_path_to_type(train['run'], 'foo') + self.assertEmpty(path) + + # The train['run'] is expected to start by calling GetSession, and to end + # by calling ReleaseSession. + self.checkOpOrder('train', train['run'], ['GetSession', 'ReleaseSession']) + + # A similar contract applies to the annotations. + self.checkOpOrder('annotations', anno['annotations'], + ['GetSession', 'ReleaseSession']) + + def testAttachDataReader(self): + """Checks that train['run'] and 'annotations' call AttachDataReader.""" + test_name = 'attach-data-reader' + + with tf.Graph().as_default(): + builder, target = self.getBuilderAndTarget(test_name) + train = builder.add_training_from_config(target) + anno = builder.add_annotation(test_name) + + # AttachDataReader should be called between GetSession and ReleaseSession. + self.checkOpOrder('train', train['run'], + ['GetSession', 'AttachDataReader', 'ReleaseSession']) + + # A similar contract applies to the annotations. + self.checkOpOrder('annotations', anno['annotations'], + ['GetSession', 'AttachDataReader', 'ReleaseSession']) + + def testSetTracingFalse(self): + """Checks that 'annotations' doesn't call SetTracing if disabled.""" + test_name = 'set-tracing-false' + + with tf.Graph().as_default(): + builder, _ = self.getBuilderAndTarget(test_name) + + # Note: "enable_tracing=False" is the default. + anno = builder.add_annotation(test_name, enable_tracing=False) + + # ReleaseSession should still be there. + path = _find_input_path_to_type(anno['annotations'], 'ReleaseSession') + self.assertNotEmpty(path) + + # As should AttachDataReader. + path = _find_input_path_to_type(path[-1], 'AttachDataReader') + self.assertNotEmpty(path) + + # But SetTracing should not be called. + set_tracing_path = _find_input_path_to_type(path[-1], 'SetTracing') + self.assertEmpty(set_tracing_path) + + # Instead, we should go to GetSession. + path = _find_input_path_to_type(path[-1], 'GetSession') + self.assertNotEmpty(path) + + def testSetTracingTrue(self): + """Checks that 'annotations' does call SetTracing if enabled.""" + test_name = 'set-tracing-true' + + with tf.Graph().as_default(): + builder, _ = self.getBuilderAndTarget(test_name) + anno = builder.add_annotation(test_name, enable_tracing=True) + + # Check SetTracing is called after GetSession but before AttachDataReader. + self.checkOpOrder('annotations', anno['annotations'], [ + 'GetSession', 'SetTracing', 'AttachDataReader', 'ReleaseSession' + ]) + + # Same for the 'traces' output, if that's what you were to call. + self.checkOpOrder('traces', anno['traces'], [ + 'GetSession', 'SetTracing', 'AttachDataReader', 'ReleaseSession' + ]) + + +if __name__ == '__main__': + googletest.main() diff --git a/syntaxnet/dragnn/python/lexicon.py b/syntaxnet/dragnn/python/lexicon.py new file mode 100644 index 0000000000000000000000000000000000000000..b56ca0e8235d531c443aeb924c387321e21a9748 --- /dev/null +++ b/syntaxnet/dragnn/python/lexicon.py @@ -0,0 +1,73 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SyntaxNet lexicon utils.""" + +import os.path + + +import tensorflow as tf + +from syntaxnet import task_spec_pb2 +from syntaxnet.ops import gen_parser_ops + + +def create_lexicon_context(path): + """Construct a SyntaxNet TaskContext file for standard lexical resources.""" + context = task_spec_pb2.TaskSpec() + for name in [ + 'word-map', 'tag-map', 'tag-to-category', 'lcword-map', 'category-map', + 'char-map', 'char-ngram-map', 'label-map', 'prefix-table', 'suffix-table' + ]: + context.input.add(name=name).part.add(file_pattern=os.path.join(path, name)) + return context + + +def build_lexicon(output_path, + training_corpus_path, + tf_master='', + training_corpus_format='conll-sentence', + morph_to_pos=False, + **kwargs): + """Constructs a SyntaxNet lexicon at the given path. + + Args: + output_path: Location to construct the lexicon. + training_corpus_path: Path to CONLL formatted training data. + tf_master: TensorFlow master executor (string, defaults to '' to use the + local instance). + training_corpus_format: Format of the training corpus (defaults to CONLL; + search for REGISTER_SYNTAXNET_DOCUMENT_FORMAT for other formats). + morph_to_pos: Whether to serialize morph attributes to the tag field, + combined with category and fine POS tag. + **kwargs: Forwarded to the LexiconBuilder op. + """ + context = create_lexicon_context(output_path) + if morph_to_pos: + context.parameter.add(name='join_category_to_pos', value='true') + context.parameter.add(name='add_pos_as_attribute', value='true') + context.parameter.add(name='serialize_morph_to_pos', value='true') + + # Add the training data to the context. + resource = context.input.add() + resource.name = 'corpus' + resource.record_format.extend([training_corpus_format]) + part = resource.part.add() + part.file_pattern = training_corpus_path + + # Run the lexicon builder op. + with tf.Session(tf_master) as sess: + sess.run( + gen_parser_ops.lexicon_builder( + task_context_str=str(context), corpus_name='corpus', **kwargs)) diff --git a/syntaxnet/dragnn/python/lexicon_test.py b/syntaxnet/dragnn/python/lexicon_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d23442bc031cb7fff0be93c21230b1ac73786645 --- /dev/null +++ b/syntaxnet/dragnn/python/lexicon_test.py @@ -0,0 +1,79 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for SyntaxNet lexicon.""" + +import os +import os.path + +import tensorflow as tf + +from google.protobuf import text_format + +from dragnn.python import lexicon + +# Imported for FLAGS.tf_master, which is used in the lexicon module. + +from syntaxnet import parser_trainer +from syntaxnet import task_spec_pb2 + +import syntaxnet.load_parser_ops + +FLAGS = tf.app.flags.FLAGS +if not hasattr(FLAGS, 'test_srcdir'): + FLAGS.test_srcdir = '' +if not hasattr(FLAGS, 'test_tmpdir'): + FLAGS.test_tmpdir = tf.test.get_temp_dir() + + +_EXPECTED_CONTEXT = r""" +input { name: "word-map" Part { file_pattern: "/tmp/word-map" } } +input { name: "tag-map" Part { file_pattern: "/tmp/tag-map" } } +input { name: "tag-to-category" Part { file_pattern: "/tmp/tag-to-category" } } +input { name: "lcword-map" Part { file_pattern: "/tmp/lcword-map" } } +input { name: "category-map" Part { file_pattern: "/tmp/category-map" } } +input { name: "char-map" Part { file_pattern: "/tmp/char-map" } } +input { name: "char-ngram-map" Part { file_pattern: "/tmp/char-ngram-map" } } +input { name: "label-map" Part { file_pattern: "/tmp/label-map" } } +input { name: "prefix-table" Part { file_pattern: "/tmp/prefix-table" } } +input { name: "suffix-table" Part { file_pattern: "/tmp/suffix-table" } } +""" + + +class LexiconTest(tf.test.TestCase): + + def testCreateLexiconContext(self): + expected_context = task_spec_pb2.TaskSpec() + text_format.Parse(_EXPECTED_CONTEXT, expected_context) + self.assertProtoEquals( + lexicon.create_lexicon_context('/tmp'), expected_context) + + def testBuildLexicon(self): + empty_input_path = os.path.join(FLAGS.test_tmpdir, 'empty-input') + lexicon_output_path = os.path.join(FLAGS.test_tmpdir, 'lexicon-output') + + with open(empty_input_path, 'w'): + pass + + # The directory may already exist when running locally multiple times. + if not os.path.exists(lexicon_output_path): + os.mkdir(lexicon_output_path) + + # Just make sure this doesn't crash; the lexicon builder op is already + # exercised in its own unit test. + lexicon.build_lexicon(lexicon_output_path, empty_input_path) + + +if __name__ == '__main__': + tf.test.main() diff --git a/syntaxnet/dragnn/python/load_dragnn_cc_impl.py b/syntaxnet/dragnn/python/load_dragnn_cc_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..913d4eda3b4f9156aeb6001eaa838095f09bb005 --- /dev/null +++ b/syntaxnet/dragnn/python/load_dragnn_cc_impl.py @@ -0,0 +1,22 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Loads dragnn_ops shared library.""" + +import os.path +import tensorflow as tf + +tf.load_op_library( + os.path.join(tf.resource_loader.get_data_files_path(), 'dragnn_cc_impl.so')) diff --git a/syntaxnet/dragnn/python/network_units.py b/syntaxnet/dragnn/python/network_units.py new file mode 100644 index 0000000000000000000000000000000000000000..2dabdcf301628315d5d3856fd12f3e678f5f7179 --- /dev/null +++ b/syntaxnet/dragnn/python/network_units.py @@ -0,0 +1,1602 @@ +"""Basic network units used in assembling DRAGNN graphs.""" + +from abc import ABCMeta +from abc import abstractmethod + + +import tensorflow as tf +from tensorflow.python.ops import nn +from tensorflow.python.ops import tensor_array_ops as ta +from tensorflow.python.platform import tf_logging as logging + +from dragnn.python import dragnn_ops +from syntaxnet.util import check +from syntaxnet.util import registry + + +def linked_embeddings_name(channel_id): + """Returns the name of the linked embedding matrix for some channel ID.""" + return 'linked_embedding_matrix_%d' % channel_id + + +def fixed_embeddings_name(channel_id): + """Returns the name of the fixed embedding matrix for some channel ID.""" + return 'fixed_embedding_matrix_%d' % channel_id + + +class StoredActivations(object): + """Wrapper around stored activation vectors. + + Because activations are produced and consumed in different layouts by bulk + vs. dynamic components, this class provides a simple common + interface/conversion API. It can be constructed from either a TensorArray + (dynamic) or a Tensor (bulk), and the resulting object to use for lookups is + either bulk_tensor (for bulk components) or dynamic_tensor (for dynamic + components). + """ + + def __init__(self, tensor=None, array=None, stride=None, dim=None): + """Creates ops for converting the input to either format. + + If 'tensor' is used, then a conversion from [stride * steps, dim] to + [steps + 1, stride, dim] is performed for dynamic_tensor reads. + + If 'array' is used, then a conversion from [steps + 1, stride, dim] to + [stride * steps, dim] is performed for bulk_tensor reads. + + Args: + tensor: Bulk tensor input. + array: TensorArray dynamic input. + stride: stride of bulk tensor. Not used for dynamic. + dim: dim of bulk tensor. Not used for dynamic. + """ + if tensor is not None: + check.IsNone(array, 'Cannot initialize from tensor and array') + check.NotNone(stride, 'Stride is required for bulk tensor') + check.NotNone(dim, 'Dim is required for bulk tensor') + + self._bulk_tensor = tensor + with tf.name_scope('convert_to_dyn'): + tensor = tf.reshape(tensor, [stride, -1, dim]) + tensor = tf.transpose(tensor, perm=[1, 0, 2]) + pad = tf.zeros([1, stride, dim], dtype=tensor.dtype) + self._array_tensor = tf.concat([pad, tensor], 0) + + if array is not None: + check.IsNone(tensor, 'Cannot initialize from both tensor and array') + with tf.name_scope('convert_to_bulk'): + self._bulk_tensor = convert_network_state_tensorarray(array) + with tf.name_scope('convert_to_dyn'): + self._array_tensor = array.stack() + + @property + def bulk_tensor(self): + return self._bulk_tensor + + @property + def dynamic_tensor(self): + return self._array_tensor + + +class NamedTensor(object): + """Container for a tensor with associated name and dimension attributes.""" + + def __init__(self, tensor, name, dim=None): + """Inits NamedTensor with tensor, name and optional dim.""" + self.tensor = tensor + self.name = name + self.dim = dim + + +def add_embeddings(channel_id, feature_spec, seed): + """Adds a variable for the embedding of a given fixed feature. + + Supports pre-trained or randomly initialized embeddings In both cases, extra + vector is reserved for out-of-vocabulary words, so the embedding matrix has + the size of [feature_spec.vocabulary_size + 1, feature_spec.embedding_dim]. + + Args: + channel_id: Numeric id of the fixed feature channel + feature_spec: Feature spec protobuf of type FixedFeatureChannel + seed: used for random initializer + + Returns: + tf.Variable object corresponding to the embedding for that feature. + + Raises: + RuntimeError: if more the pretrained embeddings are specified in resources + containing more than one part. + """ + check.Gt(feature_spec.embedding_dim, 0, + 'Embeddings requested for non-embedded feature: %s' % feature_spec) + name = fixed_embeddings_name(channel_id) + shape = [feature_spec.vocabulary_size + 1, feature_spec.embedding_dim] + if feature_spec.HasField('pretrained_embedding_matrix'): + if len(feature_spec.pretrained_embedding_matrix.part) > 1: + raise RuntimeError('pretrained_embedding_matrix resource contains ' + 'more than one part:\n%s', + str(feature_spec.pretrained_embedding_matrix)) + if len(feature_spec.vocab.part) > 1: + raise RuntimeError('vocab resource contains more than one part:\n%s', + str(feature_spec.vocab)) + embeddings = dragnn_ops.dragnn_embedding_initializer( + embedding_input=feature_spec.pretrained_embedding_matrix.part[0] + .file_pattern, + vocab=feature_spec.vocab.part[0].file_pattern, + scaling_coefficient=1.0) + return tf.get_variable(name, initializer=tf.reshape(embeddings, shape)) + else: + return tf.get_variable( + name, + shape, + initializer=tf.random_normal_initializer( + stddev=1.0 / feature_spec.embedding_dim**.5, seed=seed)) + + +def embedding_lookup(embedding_matrix, indices, ids, weights, size): + """Performs a weighted embedding lookup. + + Args: + embedding_matrix: float Tensor from which to do the lookup. + indices: int Tensor for the output rows of the looked up vectors. + ids: int Tensor vectors to look up in the embedding_matrix. + weights: float Tensor weights to apply to the looked up vectors. + size: int number of output rows. Needed since some output rows may be + empty. + + Returns: + Weighted embedding vectors. + """ + embeddings = tf.nn.embedding_lookup([embedding_matrix], ids) + # TODO(googleuser): allow skipping weights. + broadcast_weights_shape = tf.concat([tf.shape(weights), [1]], 0) + embeddings *= tf.reshape(weights, broadcast_weights_shape) + embeddings = tf.unsorted_segment_sum(embeddings, indices, size) + return embeddings + + +def fixed_feature_lookup(component, state, channel_id, stride): + """Looks up fixed features and passes them through embeddings. + + Embedding vectors may be scaled by weights if the features specify it. + + Args: + component: Component object in which to look up the fixed features. + state: MasterState object for the live nlp_saft::dragnn::MasterState. + channel_id: int id of the fixed feature to look up. + stride: int Tensor of current batch * beam size. + + Returns: + NamedTensor object containing the embedding vectors. + """ + feature_spec = component.spec.fixed_feature[channel_id] + check.Gt(feature_spec.embedding_dim, 0, + 'Embeddings requested for non-embedded feature: %s' % feature_spec) + embedding_matrix = component.get_variable(fixed_embeddings_name(channel_id)) + + with tf.op_scope([embedding_matrix], 'fixed_embedding_' + feature_spec.name): + indices, ids, weights = dragnn_ops.extract_fixed_features( + state.handle, component=component.name, channel_id=channel_id) + size = stride * feature_spec.size + embeddings = embedding_lookup(embedding_matrix, indices, ids, weights, size) + dim = feature_spec.size * feature_spec.embedding_dim + return NamedTensor( + tf.reshape(embeddings, [-1, dim]), feature_spec.name, dim=dim) + + +def get_input_tensor(fixed_embeddings, linked_embeddings): + """Helper function for constructing an input tensor from all the features. + + Args: + fixed_embeddings: list of NamedTensor objects for fixed feature channels + linked_embeddings: list of NamedTensor objects for linked feature channels + + Returns: + a tensor of shape [N, D], where D is the total input dimension of the + concatenated feature channels + + Raises: + RuntimeError: if no features, fixed or linked, are configured. + """ + embeddings = fixed_embeddings + linked_embeddings + if not embeddings: + raise RuntimeError('There needs to be at least one feature set defined.') + + # Concat_v2 takes care of optimizing away the concatenation + # operation in the case when there is exactly one embedding input. + return tf.concat([e.tensor for e in embeddings], 1) + + +def get_input_tensor_with_stride(fixed_embeddings, linked_embeddings, stride): + """Constructs an input tensor with a separate dimension for steps. + + Args: + fixed_embeddings: list of NamedTensor objects for fixed feature channels + linked_embeddings: list of NamedTensor objects for linked feature channels + stride: int stride (i.e. beam * batch) to use to reshape the input + + Returns: + a tensor of shape [stride, num_steps, D], where D is the total input + dimension of the concatenated feature channels + """ + input_tensor = get_input_tensor(fixed_embeddings, linked_embeddings) + shape = tf.shape(input_tensor) + return tf.reshape(input_tensor, [stride, -1, shape[1]]) + + +def convert_network_state_tensorarray(tensorarray): + """Converts a source TensorArray to a source Tensor. + + Performs a permutation between the steps * [stride, D] shape of a + source TensorArray and the (flattened) [stride * steps, D] shape of + a source Tensor. + + The TensorArrays used during recurrence have an additional zeroth step that + needs to be removed. + + Args: + tensorarray: TensorArray object to be converted. + + Returns: + Tensor object after conversion. + """ + tensor = tensorarray.stack() # Results in a [steps, stride, D] tensor. + tensor = tf.slice(tensor, [1, 0, 0], [-1, -1, -1]) # Lop off the 0th step. + tensor = tf.transpose(tensor, [1, 0, 2]) # Switch steps and stride. + return tf.reshape(tensor, [-1, tf.shape(tensor)[2]]) + + +def pass_through_embedding_matrix(act_block, embedding_matrix, step_idx): + """Passes the activations through the embedding_matrix. + + Takes care to handle out of bounds lookups. + + Args: + act_block: matrix of activations. + embedding_matrix: matrix of weights. + step_idx: vector containing step indices, with -1 indicating out of bounds. + + Returns: + the embedded activations. + """ + # Indicator vector for out of bounds lookups. + step_idx_mask = tf.expand_dims(tf.equal(step_idx, -1), -1) + + # Pad the last column of the activation vectors with the indicator. + act_block = tf.concat([act_block, tf.to_float(step_idx_mask)], 1) + return tf.matmul(act_block, embedding_matrix) + + +def lookup_named_tensor(name, named_tensors): + """Retrieves a NamedTensor by name. + + Args: + name: Name of the tensor to retrieve. + named_tensors: List of NamedTensor objects to search. + + Returns: + The NamedTensor in |named_tensors| with the |name|. + + Raises: + KeyError: If the |name| is not found among the |named_tensors|. + """ + for named_tensor in named_tensors: + if named_tensor.name == name: + return named_tensor + raise KeyError('Name "%s" not found in named tensors: %s' % + (name, named_tensors)) + + +def activation_lookup_recurrent(component, state, channel_id, source_array, + source_layer_size, stride): + """Looks up activations from tensor arrays. + + If the linked feature's embedding_dim is set to -1, the feature vectors are + not passed through (i.e. multiplied by) an embedding matrix. + + Args: + component: Component object in which to look up the fixed features. + state: MasterState object for the live nlp_saft::dragnn::MasterState. + channel_id: int id of the fixed feature to look up. + source_array: TensorArray from which to fetch feature vectors, expected to + have size [steps + 1] elements of shape [stride, D] each. + source_layer_size: int length of feature vectors before embedding. + stride: int Tensor of current batch * beam size. + + Returns: + NamedTensor object containing the embedding vectors. + """ + feature_spec = component.spec.linked_feature[channel_id] + + with tf.name_scope('activation_lookup_recurrent_%s' % feature_spec.name): + # Linked features are returned as a pair of tensors, one indexing into + # steps, and one indexing within the activation tensor (beam x batch) + # stored for a step. + step_idx, idx = dragnn_ops.extract_link_features( + state.handle, component=component.name, channel_id=channel_id) + + # We take the [steps, batch*beam, ...] tensor array, gather and concat + # the steps we might need into a [some_steps*batch*beam, ...] tensor, + # and flatten 'idx' to dereference this new tensor. + # + # The first element of each tensor array is reserved for an + # initialization variable, so we offset all step indices by +1. + # + # TODO(googleuser): It would be great to not have to extract + # the steps in their entirety, forcing a copy of much of the + # TensorArray at each step. Better would be to support a + # TensorArray.gather_nd to pick the specific elements directly. + # TODO(googleuser): In the interim, a small optimization would + # be to use tf.unique instead of tf.range. + step_min = tf.reduce_min(step_idx) + ta_range = tf.range(step_min + 1, tf.reduce_max(step_idx) + 2) + act_block = source_array.gather(ta_range) + act_block = tf.reshape(act_block, + tf.concat([[-1], tf.shape(act_block)[2:]], 0)) + flat_idx = (step_idx - step_min) * stride + idx + act_block = tf.gather(act_block, flat_idx) + act_block = tf.reshape(act_block, [-1, source_layer_size]) + + if feature_spec.embedding_dim != -1: + embedding_matrix = component.get_variable( + linked_embeddings_name(channel_id)) + act_block = pass_through_embedding_matrix(act_block, embedding_matrix, + step_idx) + dim = feature_spec.size * feature_spec.embedding_dim + else: + # If embedding_dim is -1, just output concatenation of activations. + dim = feature_spec.size * source_layer_size + + return NamedTensor( + tf.reshape(act_block, [-1, dim]), feature_spec.name, dim=dim) + + +def activation_lookup_other(component, state, channel_id, source_tensor, + source_layer_size): + """Looks up activations from tensors. + + If the linked feature's embedding_dim is set to -1, the feature vectors are + not passed through (i.e. multiplied by) an embedding matrix. + + Args: + component: Component object in which to look up the fixed features. + state: MasterState object for the live nlp_saft::dragnn::MasterState. + channel_id: int id of the fixed feature to look up. + source_tensor: Tensor from which to fetch feature vectors. Expected to have + have shape [steps + 1, stride, D]. + source_layer_size: int length of feature vectors before embedding (D). It + would in principle be possible to get this dimension dynamically from + the second dimension of source_tensor. However, having it statically is + more convenient. + + Returns: + NamedTensor object containing the embedding vectors. + """ + feature_spec = component.spec.linked_feature[channel_id] + + with tf.name_scope('activation_lookup_other_%s' % feature_spec.name): + # Linked features are returned as a pair of tensors, one indexing into + # steps, and one indexing within the stride (beam x batch) of each step. + step_idx, idx = dragnn_ops.extract_link_features( + state.handle, component=component.name, channel_id=channel_id) + + # The first element of each tensor array is reserved for an + # initialization variable, so we offset all step indices by +1. + indices = tf.stack([step_idx + 1, idx], axis=1) + act_block = tf.gather_nd(source_tensor, indices) + act_block = tf.reshape(act_block, [-1, source_layer_size]) + + if feature_spec.embedding_dim != -1: + embedding_matrix = component.get_variable( + linked_embeddings_name(channel_id)) + act_block = pass_through_embedding_matrix(act_block, embedding_matrix, + step_idx) + dim = feature_spec.size * feature_spec.embedding_dim + else: + # If embedding_dim is -1, just output concatenation of activations. + dim = feature_spec.size * source_layer_size + + return NamedTensor( + tf.reshape(act_block, [-1, dim]), feature_spec.name, dim=dim) + + +class LayerNorm(object): + """Utility to add layer normalization to any tensor. + + Layer normalization implementation is based on: + + https://arxiv.org/abs/1607.06450. "Layer Normalization" + Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton + + This object will construct additional variables that need to be optimized, and + these variables can be accessed via params(). + + Attributes: + params: List of additional parameters to be trained. + """ + + def __init__(self, component, name, shape, dtype): + """Construct variables to normalize an input of given shape. + + Arguments: + component: ComponentBuilder handle. + name: Human readable name to organize the variables. + shape: Shape of the layer to be normalized. + dtype: Type of the layer to be normalized. + """ + self._name = name + self._shape = shape + self._component = component + beta = tf.get_variable( + 'beta_%s' % name, + shape=shape, + dtype=dtype, + initializer=tf.zeros_initializer()) + gamma = tf.get_variable( + 'gamma_%s' % name, + shape=shape, + dtype=dtype, + initializer=tf.ones_initializer()) + self._params = [beta, gamma] + + @property + def params(self): + return self._params + + def normalize(self, inputs): + """Apply normalization to input. + + The shape must match the declared shape in the constructor. + [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] + + Args: + inputs: Input tensor + + Returns: + Normalized version of input tensor. + + Raises: + ValueError: if inputs has undefined rank. + """ + inputs_shape = inputs.get_shape() + inputs_rank = inputs_shape.ndims + if inputs_rank is None: + raise ValueError('Inputs %s has undefined rank.' % inputs.name) + axis = range(1, inputs_rank) + + beta = self._component.get_variable('beta_%s' % self._name) + gamma = self._component.get_variable('gamma_%s' % self._name) + + with tf.variable_scope('layer_norm_%s' % self._name): + # Calculate the moments on the last axis (layer activations). + mean, variance = nn.moments(inputs, axis, keep_dims=True) + + # Compute layer normalization using the batch_normalization function. + variance_epsilon = 1E-12 + outputs = nn.batch_normalization( + inputs, mean, variance, beta, gamma, variance_epsilon) + outputs.set_shape(inputs_shape) + return outputs + + +class Layer(object): + """A layer in a feed-forward network. + + Attributes: + component: ComponentBuilderBase that produces this layer. + name: Name of this layer. + dim: Dimension of this layer, or negative if dynamic. + """ + + def __init__(self, component, name, dim): + check.NotNone(dim, 'Dimension is required') + self.component = component + self.name = name + self.dim = dim + + def __str__(self): + return 'Layer: %s/%s[%d]' % (self.component.name, self.name, self.dim) + + def create_array(self, stride): + """Creates a new tensor array to store this layer's activations. + + Arguments: + stride: Possibly dynamic batch * beam size with which to initialize the + tensor array + + Returns: + TensorArray object + """ + check.Gt(self.dim, 0, 'Cannot create array when dimension is dynamic') + tensor_array = ta.TensorArray(dtype=tf.float32, + size=0, + dynamic_size=True, + clear_after_read=False, + infer_shape=False, + name='%s_array' % self.name) + + # Start each array with all zeros. Special values will still be learned via + # the extra embedding dimension stored for each linked feature channel. + initial_value = tf.zeros([stride, self.dim]) + return tensor_array.write(0, initial_value) + + +def get_attrs_with_defaults(parameters, defaults): + """Populates a dictionary with run-time attributes. + + Given defaults, populates any overrides from 'parameters' with their + corresponding converted values. 'defaults' should be typed. This is useful + for specifying NetworkUnit-specific configuration options. + + Args: + parameters: a map. + defaults: a typed set of default values. + + Returns: + dictionary populated with any overrides. + + Raises: + RuntimeError: if a key in parameters is not present in defaults. + """ + attrs = defaults + for key, value in parameters.iteritems(): + check.In(key, defaults, 'Unknown attribute: %s' % key) + if isinstance(defaults[key], bool): + attrs[key] = value.lower() == 'true' + else: + attrs[key] = type(defaults[key])(value) + return attrs + + +def maybe_apply_dropout(inputs, keep_prob, per_sequence, stride=None): + """Applies dropout, if so configured, to an input tensor. + + The input may be rank 2 or 3 depending on whether the stride (i.e., batch + size) has been incorporated into the shape. + + Args: + inputs: [stride * num_steps, dim] or [stride, num_steps, dim] input tensor. + keep_prob: Scalar probability of keeping each input element. If >= 1.0, no + dropout is performed. + per_sequence: If true, sample the dropout mask once per sequence, instead of + once per step. Requires |stride| when true. + stride: Scalar batch size. Optional if |per_sequence| is false. + + Returns: + [stride * num_steps, dim] or [stride, num_steps, dim] tensor, matching the + shape of |inputs|, containing the masked or original inputs, depending on + whether dropout was actually performed. + """ + check.Ge(inputs.get_shape().ndims, 2, 'inputs must be rank 2 or 3') + check.Le(inputs.get_shape().ndims, 3, 'inputs must be rank 2 or 3') + flat = (inputs.get_shape().ndims == 2) + + if keep_prob >= 1.0: + return inputs + + if not per_sequence: + return tf.nn.dropout(inputs, keep_prob) + + check.NotNone(stride, 'per-sequence dropout requires stride') + dim = inputs.get_shape().as_list()[-1] + check.NotNone(dim, 'inputs must have static activation dimension, but have ' + 'static shape %s' % inputs.get_shape().as_list()) + + # If needed, restore the batch dimension to separate the sequences. + inputs_sxnxd = tf.reshape(inputs, [stride, -1, dim]) if flat else inputs + + # Replace |num_steps| with 1 in |noise_shape|, so the dropout mask broadcasts + # to all steps for a particular sequence. + noise_shape = [stride, 1, dim] + masked_sxnxd = tf.nn.dropout(inputs_sxnxd, keep_prob, noise_shape) + + # If needed, flatten out the batch dimension in the return value. + return tf.reshape(masked_sxnxd, [-1, dim]) if flat else masked_sxnxd + + +@registry.RegisteredClass +class NetworkUnitInterface(object): + """Base class to implement NN specifications. + + This class contains the required functionality to build a network inside of a + DRAGNN graph: (1) initializing TF variables during __init__(), and (2) + creating particular instances from extracted features in create(). + + Attributes: + params (list): List of tf.Variable objects representing trainable + parameters. + layers (list): List of Layer objects to track network layers that should + be written to Tensors during training and inference. + """ + __metaclass__ = ABCMeta # required for @abstractmethod + + def __init__(self, component, init_layers=None, init_context_layers=None): + """Initializes parameters for embedding matrices. + + The subclass may provide optional lists of initial layers and context layers + to allow this base class constructor to use accessors like get_layer_size(), + which is required for networks that may be used self-recurrently. + + Args: + component: parent ComponentBuilderBase object. + init_layers: optional initial layers. + init_context_layers: optional initial context layers. + """ + self._seed = component.master.hyperparams.seed + self._component = component + self._params = [] + self._layers = init_layers if init_layers else [] + self._regularized_weights = [] + self._context_layers = init_context_layers if init_context_layers else [] + self._fixed_feature_dims = {} # mapping from name to dimension + self._linked_feature_dims = {} # mapping from name to dimension + + # Allocate parameters for all embedding channels. Note that for both Fixed + # and Linked embedding matrices, we store an additional +1 embedding that's + # used when the index is out of scope. + for channel_id, spec in enumerate(component.spec.fixed_feature): + check.NotIn(spec.name, self._fixed_feature_dims, + 'Duplicate fixed feature') + check.Gt(spec.size, 0, 'Invalid fixed feature size') + if spec.embedding_dim > 0: + fixed_dim = spec.embedding_dim + self._params.append(add_embeddings(channel_id, spec, self._seed)) + else: + fixed_dim = 1 # assume feature ID extraction; only one ID per step + self._fixed_feature_dims[spec.name] = spec.size * fixed_dim + + for channel_id, spec in enumerate(component.spec.linked_feature): + check.NotIn(spec.name, self._linked_feature_dims, + 'Duplicate linked feature') + check.Gt(spec.size, 0, 'Invalid linked feature size') + if spec.source_component == component.name: + source_array_dim = self.get_layer_size(spec.source_layer) + else: + source = component.master.lookup_component[spec.source_component] + source_array_dim = source.network.get_layer_size(spec.source_layer) + + if spec.embedding_dim != -1: + check.Gt(source_array_dim, 0, + 'Cannot embed linked feature with dynamic dimension') + self._params.append( + tf.get_variable( + linked_embeddings_name(channel_id), + [source_array_dim + 1, spec.embedding_dim], + initializer=tf.random_normal_initializer( + stddev=1 / spec.embedding_dim**.5, seed=self._seed))) + + self._linked_feature_dims[spec.name] = spec.size * spec.embedding_dim + else: + # If embedding_dim is -1, linked features are not embedded. + self._linked_feature_dims[spec.name] = spec.size * source_array_dim + + # Compute the cumulative dimension of all inputs. If any input has dynamic + # dimension, then the result is -1. + input_dims = (self._fixed_feature_dims.values() + + self._linked_feature_dims.values()) + if any(x < 0 for x in input_dims): + self._concatenated_input_dim = -1 + else: + self._concatenated_input_dim = sum(input_dims) + tf.logging.info('component %s concat_input_dim %s', component.name, + self._concatenated_input_dim) + + # Allocate attention parameters. + if self._component.spec.attention_component: + attention_source_component = self._component.master.lookup_component[ + self._component.spec.attention_component] + attention_hidden_layer_sizes = map( + int, attention_source_component.spec.network_unit.parameters[ + 'hidden_layer_sizes'].split(',')) + attention_hidden_layer_size = attention_hidden_layer_sizes[-1] + + hidden_layer_sizes = map(int, component.spec.network_unit.parameters[ + 'hidden_layer_sizes'].split(',')) + # The attention function is built on the last layer of hidden embeddings. + hidden_layer_size = hidden_layer_sizes[-1] + self._params.append( + tf.get_variable( + 'attention_weights_pm_0', + [attention_hidden_layer_size, hidden_layer_size], + initializer=tf.random_normal_initializer( + stddev=1e-4, seed=self._seed))) + + self._params.append( + tf.get_variable( + 'attention_weights_hm_0', [hidden_layer_size, hidden_layer_size], + initializer=tf.random_normal_initializer( + stddev=1e-4, seed=self._seed))) + + self._params.append( + tf.get_variable( + 'attention_bias_0', [1, hidden_layer_size], + initializer=tf.zeros_initializer())) + + self._params.append( + tf.get_variable( + 'attention_bias_1', [1, hidden_layer_size], + initializer=tf.zeros_initializer())) + + self._params.append( + tf.get_variable( + 'attention_weights_pu', + [attention_hidden_layer_size, component.num_actions], + initializer=tf.random_normal_initializer( + stddev=1e-4, seed=self._seed))) + + @abstractmethod + def create(self, + fixed_embeddings, + linked_embeddings, + context_tensor_arrays, + attention_tensor, + during_training, + stride=None): + """Constructs a feed-forward unit based on the features and context tensors. + + Args: + fixed_embeddings: list of NamedTensor objects + linked_embeddings: list of NamedTensor objects + context_tensor_arrays: optional list of TensorArray objects used for + implicit recurrence. + attention_tensor: optional Tensor used for attention. + during_training: whether to create a network for training (vs inference). + stride: int scalar tensor containing the stride required for + bulk computation. + + Returns: + A list of tensors corresponding to the list of layers. + """ + pass + + @property + def layers(self): + return self._layers + + @property + def params(self): + return self._params + + @property + def regularized_weights(self): + return self._regularized_weights + + @property + def context_layers(self): + return self._context_layers + + def get_layer_index(self, layer_name): + """Gets the index of the given named layer of the network.""" + return [x.name for x in self.layers].index(layer_name) + + def get_layer_size(self, layer_name): + """Gets the size of the given named layer of the network. + + Args: + layer_name: string name of layer to look update + + Returns: + the size of the layer. + + Raises: + KeyError: if the layer_name to look up doesn't exist. + """ + for layer in self.layers: + if layer.name == layer_name: + return layer.dim + raise KeyError('Layer {} not found in component {}'.format( + layer_name, self._component.name)) + + def get_logits(self, network_tensors): + """Pulls out the logits from the tensors produced by this unit. + + Args: + network_tensors: list of tensors as output by create(). + + Raises: + NotImplementedError: by default a 'logits' tensor need not be implemented. + """ + raise NotImplementedError() + + def get_l2_regularized_weights(self): + """Gets the weights that need to be regularized.""" + return self.regularized_weights + + def attention(self, last_layer, attention_tensor): + """Compute the attention term for the network unit.""" + h_tensor = attention_tensor + + # Compute the attentions. + # Using feed-forward net to map the two inputs into the same dimension + focus_tensor = tf.nn.tanh( + tf.matmul( + h_tensor, + self._component.get_variable('attention_weights_pm_0'), + name='h_x_pm') + self._component.get_variable('attention_bias_0')) + + context_tensor = tf.nn.tanh( + tf.matmul( + last_layer, + self._component.get_variable('attention_weights_hm_0'), + name='l_x_hm') + self._component.get_variable('attention_bias_1')) + # The tf.multiply in the following expression broadcasts along the 0 dim: + z_vec = tf.reduce_sum(tf.multiply(focus_tensor, context_tensor), 1) + p_vec = tf.nn.softmax(tf.reshape(z_vec, [1, -1])) + # The tf.multiply in the following expression broadcasts along the 1 dim: + r_vec = tf.expand_dims( + tf.reduce_sum( + tf.multiply( + h_tensor, tf.reshape(p_vec, [-1, 1]), name='time_together2'), + 0), + 0) + return tf.matmul( + r_vec, + self._component.get_variable('attention_weights_pu'), + name='time_together3') + + +class IdentityNetwork(NetworkUnitInterface): + """A network that returns concatenated input embeddings and activations.""" + + def __init__(self, component): + super(IdentityNetwork, self).__init__(component) + self._layers = [ + Layer( + component, + name='input_embeddings', + dim=self._concatenated_input_dim) + ] + + def create(self, + fixed_embeddings, + linked_embeddings, + context_tensor_arrays, + attention_tensor, + during_training, + stride=None): + return [get_input_tensor(fixed_embeddings, linked_embeddings)] + + def get_layer_size(self, layer_name): + # Note that get_layer_size is called by super.__init__ before any layers are + # constructed if and only if there are recurrent links. + assert hasattr(self, + '_layers'), 'IdentityNetwork cannot have recurrent links' + return super(IdentityNetwork, self).get_layer_size(layer_name) + + def get_logits(self, network_tensors): + return network_tensors[-1] + + def get_context_layers(self): + return [] + + +class FeedForwardNetwork(NetworkUnitInterface): + """Implementation of C&M style feedforward network. + + Supports dropout and optional layer normalization. + + Layers: + layer_: Activations for i'th hidden layer (0-origin). + last_layer: Activations for the last hidden layer. This is a convenience + alias for "layer_", where n is the number of hidden layers. + logits: Logits associated with component actions. + """ + + def __init__(self, component): + """Initializes parameters required to run this network. + + Args: + component: parent ComponentBuilderBase object. + + Parameters used to construct the network: + hidden_layer_sizes: comma-separated list of ints, indicating the + number of hidden units in each hidden layer. + layer_norm_input (False): Whether or not to apply layer normalization + on the concatenated input to the network. + layer_norm_hidden (False): Whether or not to apply layer normalization + to the first set of hidden layer activations. + nonlinearity ('relu'): Name of function from module "tf.nn" to apply to + each hidden layer; e.g., "relu" or "elu". + dropout_keep_prob (-1.0): The probability that an input is not dropped. + If >= 1.0, disables dropout. If < 0.0, uses the global |dropout_rate| + hyperparameter. + dropout_per_sequence (False): If true, sample the dropout mask once per + sequence, instead of once per step. See Gal and Ghahramani + (https://arxiv.org/abs/1512.05287). + dropout_all_layers (False): If true, apply dropout to the input of all + hidden layers, instead of just applying it to the network input. + + Hyperparameters used: + dropout_rate: The probability that an input is not dropped. Only used + when the |dropout_keep_prob| parameter is negative. + """ + self._attrs = get_attrs_with_defaults( + component.spec.network_unit.parameters, defaults={ + 'hidden_layer_sizes': '', + 'layer_norm_input': False, + 'layer_norm_hidden': False, + 'nonlinearity': 'relu', + 'dropout_keep_prob': -1.0, + 'dropout_per_sequence': False, + 'dropout_all_layers': False}) + + # Initialize the hidden layer sizes before running the base initializer, as + # the base initializer may need to know the size of of the hidden layer for + # recurrent connections. + self._hidden_layer_sizes = ( + map(int, self._attrs['hidden_layer_sizes'].split(',')) + if self._attrs['hidden_layer_sizes'] else []) + super(FeedForwardNetwork, self).__init__(component) + + # Infer dropout rate from network parameters and grid hyperparameters. + self._dropout_rate = self._attrs['dropout_keep_prob'] + if self._dropout_rate < 0.0: + self._dropout_rate = component.master.hyperparams.dropout_rate + + # Add layer norm if specified. + self._layer_norm_input = None + self._layer_norm_hidden = None + if self._attrs['layer_norm_input']: + self._layer_norm_input = LayerNorm(self._component, 'concat_input', + self._concatenated_input_dim, + tf.float32) + self._params.extend(self._layer_norm_input.params) + + if self._attrs['layer_norm_hidden']: + self._layer_norm_hidden = LayerNorm(self._component, 'layer_0', + self._hidden_layer_sizes[0], + tf.float32) + self._params.extend(self._layer_norm_hidden.params) + + # Extract nonlinearity from |tf.nn|. + self._nonlinearity = getattr(tf.nn, self._attrs['nonlinearity']) + + # TODO(googleuser): add initializer stddevs as part of the network unit's + # configuration. + self._weights = [] + last_layer_dim = self._concatenated_input_dim + + # Initialize variables for the parameters, and add Layer objects for + # cross-component bookkeeping. + for index, hidden_layer_size in enumerate(self._hidden_layer_sizes): + weights = tf.get_variable( + 'weights_%d' % index, [last_layer_dim, hidden_layer_size], + initializer=tf.random_normal_initializer(stddev=1e-4, + seed=self._seed)) + self._params.append(weights) + if index > 0 or self._layer_norm_hidden is None: + self._params.append( + tf.get_variable( + 'bias_%d' % index, [hidden_layer_size], + initializer=tf.constant_initializer( + 0.2, dtype=tf.float32))) + + self._weights.append(weights) + self._layers.append( + Layer( + component, name='layer_%d' % index, dim=hidden_layer_size)) + last_layer_dim = hidden_layer_size + + # Add a convenience alias for the last hidden layer, if any. + if self._hidden_layer_sizes: + self._layers.append(Layer(component, 'last_layer', last_layer_dim)) + + # By default, regularize only the weights. + self._regularized_weights.extend(self._weights) + + if component.num_actions: + self._params.append( + tf.get_variable( + 'weights_softmax', [last_layer_dim, component.num_actions], + initializer=tf.random_normal_initializer( + stddev=1e-4, seed=self._seed))) + self._params.append( + tf.get_variable( + 'bias_softmax', [component.num_actions], + initializer=tf.zeros_initializer())) + self._layers.append( + Layer( + component, name='logits', dim=component.num_actions)) + + def create(self, + fixed_embeddings, + linked_embeddings, + context_tensor_arrays, + attention_tensor, + during_training, + stride=None): + """See base class.""" + input_tensor = get_input_tensor(fixed_embeddings, linked_embeddings) + + if during_training: + input_tensor.set_shape([None, self._concatenated_input_dim]) + input_tensor = self._maybe_apply_dropout(input_tensor, stride) + + if self._layer_norm_input: + input_tensor = self._layer_norm_input.normalize(input_tensor) + + tensors = [] + last_layer = input_tensor + for index, hidden_layer_size in enumerate(self._hidden_layer_sizes): + acts = tf.matmul(last_layer, + self._component.get_variable('weights_%d' % index)) + + # Note that the first layer was already handled before this loop. + # TODO(googleuser): Refactor this loop so dropout and layer normalization + # are applied consistently. + if during_training and self._attrs['dropout_all_layers'] and index > 0: + acts.set_shape([None, hidden_layer_size]) + acts = self._maybe_apply_dropout(acts, stride) + + # Don't add a bias term if we're going to apply layer norm, since layer + # norm includes a bias already. + if index == 0 and self._layer_norm_hidden: + acts = self._layer_norm_hidden.normalize(acts) + else: + acts = tf.nn.bias_add(acts, + self._component.get_variable('bias_%d' % index)) + + last_layer = self._nonlinearity(acts) + tensors.append(last_layer) + + # Add a convenience alias for the last hidden layer, if any. + if self._hidden_layer_sizes: + tensors.append(last_layer) + + if self._layers[-1].name == 'logits': + logits = tf.matmul( + last_layer, self._component.get_variable( + 'weights_softmax')) + self._component.get_variable('bias_softmax') + + if self._component.spec.attention_component: + logits += self.attention(last_layer, attention_tensor) + + logits = tf.identity(logits, name=self._layers[-1].name) + tensors.append(logits) + return tensors + + def get_layer_size(self, layer_name): + if layer_name == 'logits': + return self._component.num_actions + + if layer_name == 'last_layer': + return self._hidden_layer_sizes[-1] + + if not layer_name.startswith('layer_'): + logging.fatal( + 'Invalid layer name: "%s" Can only retrieve from "logits", ' + '"last_layer", and "layer_*".', + layer_name) + + # NOTE(danielandor): Since get_layer_size is called before the + # model has been built, we compute the layer size directly from + # the hyperparameters rather than from self._layers. + layer_index = int(layer_name.split('_')[1]) + return self._hidden_layer_sizes[layer_index] + + def get_logits(self, network_tensors): + return network_tensors[-1] + + def _maybe_apply_dropout(self, inputs, stride): + return maybe_apply_dropout(inputs, self._dropout_rate, + self._attrs['dropout_per_sequence'], stride) + + +class LSTMNetwork(NetworkUnitInterface): + """Implementation of action LSTM style network.""" + + def __init__(self, component): + assert component.num_actions > 0, 'Component num actions must be positive.' + network_unit_spec = component.spec.network_unit + self._hidden_layer_sizes = ( + int)(network_unit_spec.parameters['hidden_layer_sizes']) + + self._input_dropout_rate = component.master.hyperparams.dropout_rate + self._recurrent_dropout_rate = ( + component.master.hyperparams.recurrent_dropout_rate) + if self._recurrent_dropout_rate < 0.0: + self._recurrent_dropout_rate = component.master.hyperparams.dropout_rate + + super(LSTMNetwork, self).__init__(component) + layer_input_dim = self._concatenated_input_dim + + self._context_layers = [] + + # TODO(googleuser): should we choose different initilizer, + # e.g. truncated_normal_initializer? + self._x2i = tf.get_variable( + 'x2i', [layer_input_dim, self._hidden_layer_sizes], + initializer=tf.random_normal_initializer(stddev=1e-4, + seed=self._seed)) + self._h2i = tf.get_variable( + 'h2i', [self._hidden_layer_sizes, self._hidden_layer_sizes], + initializer=tf.random_normal_initializer(stddev=1e-4, + seed=self._seed)) + self._c2i = tf.get_variable( + 'c2i', [self._hidden_layer_sizes, self._hidden_layer_sizes], + initializer=tf.random_normal_initializer(stddev=1e-4, + seed=self._seed)) + self._bi = tf.get_variable( + 'bi', [self._hidden_layer_sizes], + initializer=tf.random_normal_initializer(stddev=1e-4, seed=self._seed)) + + self._x2o = tf.get_variable( + 'x2o', [layer_input_dim, self._hidden_layer_sizes], + initializer=tf.random_normal_initializer(stddev=1e-4, + seed=self._seed)) + self._h2o = tf.get_variable( + 'h2o', [self._hidden_layer_sizes, self._hidden_layer_sizes], + initializer=tf.random_normal_initializer(stddev=1e-4, + seed=self._seed)) + self._c2o = tf.get_variable( + 'c2o', [self._hidden_layer_sizes, self._hidden_layer_sizes], + initializer=tf.random_normal_initializer(stddev=1e-4, + seed=self._seed)) + self._bo = tf.get_variable( + 'bo', [self._hidden_layer_sizes], + initializer=tf.random_normal_initializer(stddev=1e-4, seed=self._seed)) + + self._x2c = tf.get_variable( + 'x2c', [layer_input_dim, self._hidden_layer_sizes], + initializer=tf.random_normal_initializer(stddev=1e-4, + seed=self._seed)) + self._h2c = tf.get_variable( + 'h2c', [self._hidden_layer_sizes, self._hidden_layer_sizes], + initializer=tf.random_normal_initializer(stddev=1e-4, + seed=self._seed)) + self._bc = tf.get_variable( + 'bc', [self._hidden_layer_sizes], + initializer=tf.random_normal_initializer(stddev=1e-4, seed=self._seed)) + + self._params.extend([ + self._x2i, self._h2i, self._c2i, self._bi, self._x2o, self._h2o, + self._c2o, self._bo, self._x2c, self._h2c, self._bc]) + + lstm_h_layer = Layer(component, name='lstm_h', dim=self._hidden_layer_sizes) + lstm_c_layer = Layer(component, name='lstm_c', dim=self._hidden_layer_sizes) + + self._context_layers.append(lstm_h_layer) + self._context_layers.append(lstm_c_layer) + + self._layers.extend(self._context_layers) + + self._layers.append( + Layer( + component, name='layer_0', dim=self._hidden_layer_sizes)) + + self.params.append(tf.get_variable( + 'weights_softmax', [self._hidden_layer_sizes, component.num_actions], + initializer=tf.random_normal_initializer(stddev=1e-4, + seed=self._seed))) + self.params.append( + tf.get_variable( + 'bias_softmax', [component.num_actions], + initializer=tf.zeros_initializer())) + + self._layers.append( + Layer( + component, name='logits', dim=component.num_actions)) + + def create(self, + fixed_embeddings, + linked_embeddings, + context_tensor_arrays, + attention_tensor, + during_training, + stride=None): + """See base class.""" + input_tensor = get_input_tensor(fixed_embeddings, linked_embeddings) + + # context_tensor_arrays[0] is lstm_h + # context_tensor_arrays[1] is lstm_c + assert len(context_tensor_arrays) == 2 + length = context_tensor_arrays[0].size() + + # Get the (possibly averaged) parameters to execute the network. + x2i = self._component.get_variable('x2i') + h2i = self._component.get_variable('h2i') + c2i = self._component.get_variable('c2i') + bi = self._component.get_variable('bi') + x2o = self._component.get_variable('x2o') + h2o = self._component.get_variable('h2o') + c2o = self._component.get_variable('c2o') + bo = self._component.get_variable('bo') + x2c = self._component.get_variable('x2c') + h2c = self._component.get_variable('h2c') + bc = self._component.get_variable('bc') + + # i_h_tm1, i_c_tm1 = h_{t-1}, c_{t-1} + i_h_tm1 = context_tensor_arrays[0].read(length - 1) + i_c_tm1 = context_tensor_arrays[1].read(length - 1) + + # apply dropout according to http://arxiv.org/pdf/1409.2329v5.pdf + if during_training and self._input_dropout_rate < 1: + input_tensor = tf.nn.dropout(input_tensor, self._input_dropout_rate) + + # input -- i_t = sigmoid(affine(x_t, h_{t-1}, c_{t-1})) + i_ait = tf.matmul(input_tensor, x2i) + tf.matmul(i_h_tm1, h2i) + tf.matmul( + i_c_tm1, c2i) + bi + i_it = tf.sigmoid(i_ait) + + # forget -- f_t = 1 - i_t + i_ft = tf.ones([1, 1]) - i_it + + # write memory cell -- tanh(affine(x_t, h_{t-1})) + i_awt = tf.matmul(input_tensor, x2c) + tf.matmul(i_h_tm1, h2c) + bc + i_wt = tf.tanh(i_awt) + + # c_t = f_t \odot c_{t-1} + i_t \odot tanh(affine(x_t, h_{t-1})) + ct = tf.add( + tf.multiply(i_it, i_wt), tf.multiply(i_ft, i_c_tm1), name='lstm_c') + + # output -- o_t = sigmoid(affine(x_t, h_{t-1}, c_t)) + i_aot = tf.matmul(input_tensor, x2o) + tf.matmul(ct, c2o) + tf.matmul( + i_h_tm1, h2o) + bo + + i_ot = tf.sigmoid(i_aot) + + # ht = o_t \odot tanh(ct) + ph_t = tf.tanh(ct) + ht = tf.multiply(i_ot, ph_t, name='lstm_h') + + if during_training and self._recurrent_dropout_rate < 1: + ht = tf.nn.dropout( + ht, self._recurrent_dropout_rate, name='lstm_h_dropout') + + h = tf.identity(ht, name='layer_0') + + logits = tf.nn.xw_plus_b(ht, tf.get_variable('weights_softmax'), + tf.get_variable('bias_softmax')) + + if self._component.spec.attention_component: + logits += self.attention(ht, attention_tensor) + + logits = tf.identity(logits, name='logits') + # tensors will be consistent with the layers: + # [lstm_h, lstm_c, layer_0, logits] + tensors = [ht, ct, h, logits] + return tensors + + def get_layer_size(self, layer_name): + assert layer_name == 'layer_0', 'Can only retrieve from first hidden layer.' + return self._hidden_layer_sizes + + def get_logits(self, network_tensors): + return network_tensors[self.get_layer_index('logits')] + + +class ConvNetwork(NetworkUnitInterface): + """Implementation of a convolutional feed forward network.""" + + def __init__(self, component): + """Initializes kernels and biases for this convolutional net. + + Args: + component: parent ComponentBuilderBase object. + + Parameters used to construct the network: + widths: comma separated list of ints, number of steps input to the + convolutional kernel at every layer. + depths: comma separated list of ints, number of channels input to the + convolutional kernel at every layer. + output_embedding_dim: int, number of output channels for the convolutional + kernel of the last layer, which receives no ReLU activation and + therefore can be used in a softmax output. If zero, this final + layer is disabled entirely. + nonlinearity ('relu'): Name of function from module "tf.nn" to apply to + each hidden layer; e.g., "relu" or "elu". + dropout_keep_prob (-1.0): The probability that an input is not dropped. + If >= 1.0, disables dropout. If < 0.0, uses the global |dropout_rate| + hyperparameter. + dropout_per_sequence (False): If true, sample the dropout mask once per + sequence, instead of once per step. See Gal and Ghahramani + (https://arxiv.org/abs/1512.05287). + + Hyperparameters used: + dropout_rate: The probability that an input is not dropped. Only used + when the |dropout_keep_prob| parameter is negative. + """ + + super(ConvNetwork, self).__init__(component) + self._attrs = get_attrs_with_defaults( + component.spec.network_unit.parameters, defaults={ + 'widths': '', + 'depths': '', + 'output_embedding_dim': 0, + 'nonlinearity': 'relu', + 'dropout_keep_prob': -1.0, + 'dropout_per_sequence': False}) + + self._weights = [] + self._biases = [] + self._widths = map(int, self._attrs['widths'].split(',')) + self._depths = map(int, self._attrs['depths'].split(',')) + self._output_dim = self._attrs['output_embedding_dim'] + if self._output_dim: + self._depths.append(self._output_dim) + self.kernel_shapes = [] + for i in range(len(self._depths) - 1): + self.kernel_shapes.append( + [1, self._widths[i], self._depths[i], self._depths[i + 1]]) + for i in range(len(self._depths) - 1): + with tf.variable_scope('conv%d' % i): + self._weights.append( + tf.get_variable( + 'weights', + self.kernel_shapes[i], + initializer=tf.random_normal_initializer( + stddev=1e-4, seed=self._seed), + dtype=tf.float32)) + bias_init = 0.0 if (i == len(self._widths) - 1) else 0.2 + self._biases.append( + tf.get_variable( + 'biases', + self.kernel_shapes[i][-1], + initializer=tf.constant_initializer(bias_init), + dtype=tf.float32)) + + # Extract nonlinearity from |tf.nn|. + self._nonlinearity = getattr(tf.nn, self._attrs['nonlinearity']) + + # Infer dropout rate from network parameters and grid hyperparameters. + self._dropout_rate = self._attrs['dropout_keep_prob'] + if self._dropout_rate < 0.0: + self._dropout_rate = component.master.hyperparams.dropout_rate + + self._params.extend(self._weights + self._biases) + self._layers.append( + Layer( + component, name='conv_output', dim=self._depths[-1])) + self._regularized_weights.extend(self._weights[:-1] if self._output_dim else + self._weights) + + def create(self, + fixed_embeddings, + linked_embeddings, + context_tensor_arrays, + attention_tensor, + during_training, + stride=None): + """Requires |stride|; otherwise see base class.""" + if stride is None: + raise RuntimeError("ConvNetwork needs 'stride' and must be called in the " + "bulk feature extractor component.") + input_tensor = get_input_tensor_with_stride(fixed_embeddings, + linked_embeddings, stride) + + # TODO(googleuser): Add context and attention. + del context_tensor_arrays, attention_tensor + + # On CPU, add a dimension so that the 'image' has shape + # [stride, 1, num_steps, D]. + conv = tf.expand_dims(input_tensor, 1) + for i in range(len(self._depths) - 1): + with tf.variable_scope('conv%d' % i, reuse=True) as scope: + if during_training: + conv.set_shape([None, 1, None, self._depths[i]]) + conv = self._maybe_apply_dropout(conv, stride) + conv = tf.nn.conv2d( + conv, + self._component.get_variable('weights'), [1, 1, 1, 1], + padding='SAME') + conv = tf.nn.bias_add(conv, self._component.get_variable('biases')) + if i < (len(self._weights) - 1) or not self._output_dim: + conv = self._nonlinearity(conv, name=scope.name) + return [ + tf.reshape( + conv, [-1, self._depths[-1]], name='reshape_activations') + ] + + def _maybe_apply_dropout(self, inputs, stride): + # The |inputs| are rank 4 (one 1xN "image" per sequence). Squeeze out and + # restore the singleton image height, so dropout is applied to the normal + # rank 3 batched input tensor. + inputs = tf.squeeze(inputs, [1]) + inputs = maybe_apply_dropout(inputs, self._dropout_rate, + self._attrs['dropout_per_sequence'], stride) + inputs = tf.expand_dims(inputs, 1) + return inputs + + +class PairwiseConvNetwork(NetworkUnitInterface): + """Implementation of a pairwise 2D convolutional feed forward network. + + For a sequence of N tokens, all N^2 pairs of concatenated input features are + constructed. If each input vector is of length D, then the sequence is + represented by an image of dimensions [N, N] with 2*D channels per pixel. + I.e. pixel [i, j] has a representation that is the concatenation of the + representations of the tokens at i and at j. + + To use this network for graph edge scoring, for instance by using the "heads" + transition system, the output layer needs to have dimensions [N, N] and only + a single channel. The network takes care of outputting an [N, N] sized layer, + but the user needs to ensure that the output depth equals 1. + + TODO(googleuser): Like Dozat and Manning, we will need an + additional network to label the edges, and the ability to read head + and modifier representations from different inputs. + """ + + def __init__(self, component): + """Initializes kernels and biases for this convolutional net. + + Parameters used to construct the network: + depths: comma separated list of ints, number of channels input to the + convolutional kernel at every layer. + widths: comma separated list of ints, number of steps input to the + convolutional kernel at every layer. + relu_layers: comma separate list of ints, the id of layers after which + to apply a relu activation. *By default, all but the final layer will + have a relu activation applied.* + + To generate a network with M layers, both 'depths' and 'widths' must be of + length M. The input depth of the first layer is inferred from the total + concatenated size of the input features. + + Args: + component: parent ComponentBuilderBase object. + + Raises: + RuntimeError: if the number of depths and weights are not equal. + ValueError: if the final depth is not equal to 1. + """ + parameters = component.spec.network_unit.parameters + super(PairwiseConvNetwork, self).__init__(component) + + # Each input pixel will comprise the concatenation of two tokens, so the + # input depth is double that for a single token. + self._depths = [self._concatenated_input_dim * 2] + self._depths.extend(map(int, parameters['depths'].split(','))) + self._widths = map(int, parameters['widths'].split(',')) + self._num_layers = len(self._widths) + if len(self._depths) != self._num_layers + 1: + raise RuntimeError('Unmatched depths/weights %s/%s' % + (parameters['depths'], parameters['weights'])) + if self._depths[-1] != 1: + raise ValueError('Final depth is not equal to 1 in %s' % + parameters['depths']) + + self._kernel_shapes = [] + for i, width in enumerate(self._widths): + self._kernel_shapes.append( + [width, width, self._depths[i], self._depths[i + 1]]) + if parameters['relu_layers']: + self._relu_layers = set(map(int, parameters['relu_layers'].split(','))) + else: + self._relu_layers = set(range(self._num_layers - 1)) + + self._weights = [] + self._biases = [] + for i, kernel_shape in enumerate(self._kernel_shapes): + with tf.variable_scope('conv%d' % i): + self._weights.append( + tf.get_variable( + 'weights', + kernel_shape, + initializer=tf.random_normal_initializer( + stddev=1e-4, seed=self._seed), + dtype=tf.float32)) + bias_init = 0.0 if i in self._relu_layers else 0.2 + self._biases.append( + tf.get_variable( + 'biases', + kernel_shape[-1], + initializer=tf.constant_initializer(bias_init), + dtype=tf.float32)) + + self._params.extend(self._weights + self._biases) + self._layers.append(Layer(component, name='conv_output', dim=-1)) + self._regularized_weights.extend(self._weights[:-1]) + + def create(self, + fixed_embeddings, + linked_embeddings, + context_tensor_arrays, + attention_tensor, + during_training, + stride=None): + """Requires |stride|; otherwise see base class.""" + # TODO(googleuser): Normalize the arguments to create(). 'stride' + # is unused by the recurrent network units, while 'context_tensor_arrays' + # and 'attenion_tensor_array' is unused by bulk network units. b/33587044 + if stride is None: + raise ValueError("PairwiseConvNetwork needs 'stride'") + + input_tensor = get_input_tensor_with_stride(fixed_embeddings, + linked_embeddings, stride) + + # TODO(googleuser): Add dropout. + del context_tensor_arrays, attention_tensor, during_training # Unused. + + num_steps = tf.shape(input_tensor)[1] + arg1 = tf.expand_dims(input_tensor, 1) + arg1 = tf.tile(arg1, tf.stack([1, num_steps, 1, 1])) + arg2 = tf.expand_dims(input_tensor, 2) + arg2 = tf.tile(arg2, tf.stack([1, 1, num_steps, 1])) + conv = tf.concat([arg1, arg2], 3) + for i in xrange(self._num_layers): + with tf.variable_scope('conv%d' % i, reuse=True) as scope: + conv = tf.nn.conv2d( + conv, + self._component.get_variable('weights'), [1, 1, 1, 1], + padding='SAME') + conv = tf.nn.bias_add(conv, self._component.get_variable('biases')) + if i in self._relu_layers: + conv = tf.nn.relu(conv, name=scope.name) + return [tf.reshape(conv, [-1, num_steps], name='reshape_activations')] + + +class ExportFixedFeaturesNetwork(NetworkUnitInterface): + """A network that exports fixed features as layers. + + Each fixed feature embedding is output as a layer whose name and dimension are + set to the name and dimension of the corresponding fixed feature. + """ + + def __init__(self, component): + """Initializes exported layers.""" + super(ExportFixedFeaturesNetwork, self).__init__(component) + for feature_spec in component.spec.fixed_feature: + name = feature_spec.name + dim = self._fixed_feature_dims[name] + self._layers.append(Layer(component, name, dim)) + + def create(self, + fixed_embeddings, + linked_embeddings, + context_tensor_arrays, + attention_tensor, + during_training, + stride=None): + """See base class.""" + check.Eq(len(self.layers), len(fixed_embeddings)) + for index in range(len(fixed_embeddings)): + check.Eq(self.layers[index].name, fixed_embeddings[index].name) + return [fixed_embedding.tensor for fixed_embedding in fixed_embeddings] + + +class SplitNetwork(NetworkUnitInterface): + """Network unit that splits its input into slices of equal dimension. + + Parameters: + num_slices: The number of slices to split the input into, S. The input must + have static dimension D, where D % S == 0. + + Features: + All inputs are concatenated before being split. + + Layers: + slice_0: [B * N, D / S] The first slice of the input. + slice_1: [B * N, D / S] The second slice of the input. + ... + """ + + def __init__(self, component): + """Initializes weights and layers. + + Args: + component: Parent ComponentBuilderBase object. + """ + super(SplitNetwork, self).__init__(component) + + parameters = component.spec.network_unit.parameters + self._num_slices = int(parameters['num_slices']) + check.Gt(self._num_slices, 0, 'Invalid number of slices.') + check.Eq(self._concatenated_input_dim % self._num_slices, 0, + 'Input dimension %s does not evenly divide into %s slices' % + (self._concatenated_input_dim, self._num_slices)) + self._slice_dim = int(self._concatenated_input_dim / self._num_slices) + + for slice_index in xrange(self._num_slices): + self._layers.append( + Layer(self, 'slice_%s' % slice_index, self._slice_dim)) + + def create(self, + fixed_embeddings, + linked_embeddings, + context_tensor_arrays, + attention_tensor, + during_training, + stride=None): + input_bnxd = get_input_tensor(fixed_embeddings, linked_embeddings) + return tf.split(input_bnxd, self._num_slices, axis=1) diff --git a/syntaxnet/dragnn/python/network_units_test.py b/syntaxnet/dragnn/python/network_units_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b4cb59f0cc7aade9a53d785a054aa872b86f87db --- /dev/null +++ b/syntaxnet/dragnn/python/network_units_test.py @@ -0,0 +1,144 @@ +"""Tests for network_units.""" + + +import tensorflow as tf +from tensorflow.python.framework import test_util +from tensorflow.python.platform import googletest + +from dragnn.protos import spec_pb2 +from dragnn.python import network_units + +import dragnn.python.load_dragnn_cc_impl +import syntaxnet.load_parser_ops + +FLAGS = tf.app.flags.FLAGS + + +class NetworkUnitsConverterTest(test_util.TensorFlowTestCase): + + def testConvertNetworkStateTensorarray(self): + with self.test_session() as session: + ta = tf.TensorArray( + dtype=tf.float32, + size=0, + dynamic_size=True, + clear_after_read=False, + infer_shape=False) + # Create a 3-step x 2-stride x 2-feature-dim source array. + ta = ta.write(0, [[0., 0.]] * 2) # The zeroth step will be removed. + ta = ta.write(1, [[1., 10.]] * 2) + ta = ta.write(2, [[2., 20.]] * 2) + ta = ta.write(3, [[3., 30.]] * 2) + tensor = network_units.convert_network_state_tensorarray(ta) + actual = session.run(tensor) + self.assertEqual(actual.shape, (6, 2)) + + # The arrangement of the values is expected to be stride * steps. + expected = [[1., 10.], [2., 20.], [3., 30.], [1., 10.], [2., 20.], + [3., 30.]] + self.assertAllEqual(actual, expected) + + +class MockComponent(object): + + def __init__(self, master, component_spec): + self.master = master + self.spec = component_spec + self.name = component_spec.name + self.beam_size = 1 + self._attrs = {} + + def attr(self, name): + return self._attrs[name] + + +class MockMaster(object): + + def __init__(self): + self.spec = spec_pb2.MasterSpec() + self.hyperparams = spec_pb2.GridPoint() + self.lookup_component = { + 'previous': MockComponent(self, spec_pb2.ComponentSpec()) + } + + +class NetworkUnitsLookupTest(test_util.TensorFlowTestCase): + + def setUp(self): + # Clear the graph and all existing variables. Otherwise, variables created + # in different tests may collide with each other. + tf.reset_default_graph() + + self._master = MockMaster() + self._master.spec = spec_pb2.MasterSpec() + + # Add a component with a linked feature. + component_spec = self._master.spec.component.add() + component_spec.name = 'fake_linked' + component_spec.backend.registered_name = 'FakeComponent' + linked_feature = component_spec.linked_feature.add() + linked_feature.source_component = 'fake_linked' + linked_feature.source_translator = 'identity' + linked_feature.embedding_dim = -1 + linked_feature.size = 2 + self._linked_component = MockComponent(self._master, component_spec) + + # Add a feature with a fixed feature. + component_spec = self._master.spec.component.add() + component_spec.name = 'fake_fixed' + component_spec.backend.registered_name = 'FakeComponent' + fixed_feature = component_spec.fixed_feature.add() + fixed_feature.fml = 'input.word' + fixed_feature.embedding_dim = 1 + fixed_feature.size = 1 + self._fixed_component = MockComponent(self._master, component_spec) + + def testExportFixedFeaturesNetworkWithEnabledEmbeddingMatrix(self): + network = network_units.ExportFixedFeaturesNetwork(self._fixed_component) + self.assertEqual(1, len(network.params)) + + def testExportFixedFeaturesNetworkWithDisabledEmbeddingMatrix(self): + self._fixed_component.spec.fixed_feature[0].embedding_dim = -1 + network = network_units.ExportFixedFeaturesNetwork(self._fixed_component) + self.assertEqual(0, len(network.params)) + + +class GetAttrsWithDefaultsTest(test_util.TensorFlowTestCase): + + def MakeAttrs(self, defaults, key=None, value=None): + """Returns attrs based on the |defaults| and one |key|,|value| override.""" + spec = spec_pb2.RegisteredModuleSpec() + if key and value: + spec.parameters[key] = value + return network_units.get_attrs_with_defaults(spec.parameters, defaults) + + def testFalseValues(self): + + def _assert_attr_is_false(value=None): + key = 'foo' + attrs = self.MakeAttrs({key: False}, key, value) + self.assertFalse(attrs[key]) + + _assert_attr_is_false() + _assert_attr_is_false('false') + _assert_attr_is_false('False') + _assert_attr_is_false('FALSE') + _assert_attr_is_false('no') + _assert_attr_is_false('whatever') + _assert_attr_is_false(' ') + _assert_attr_is_false('') + + def testTrueValues(self): + + def _assert_attr_is_true(value=None): + key = 'foo' + attrs = self.MakeAttrs({key: False}, key, value) + self.assertTrue(attrs[key]) + + _assert_attr_is_true('true') + _assert_attr_is_true('True') + _assert_attr_is_true('TRUE') + + +if __name__ == '__main__': + googletest.main() diff --git a/syntaxnet/dragnn/python/render_parse_tree_graphviz.py b/syntaxnet/dragnn/python/render_parse_tree_graphviz.py new file mode 100644 index 0000000000000000000000000000000000000000..4b56d0139210936a7676b6762cc9a7fa4da4c37b --- /dev/null +++ b/syntaxnet/dragnn/python/render_parse_tree_graphviz.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +"""Renders parse trees with Graphviz.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import base64 +import warnings + +import pygraphviz + + +def parse_tree_graph(sentence): + """Constructs a parse tree graph. + + Args: + sentence: syntaxnet.Sentence instance. + + Returns: + HTML graph contents, as a string. + """ + graph = pygraphviz.AGraph(directed=True, strict=False, rankdir="TB") + + for i, token in enumerate(sentence.token): + node_id = "tok_{}".format(i) + graph.add_node(node_id, label=token.word) + if token.head >= 0: + src_id = "tok_{}".format(token.head) + graph.add_edge( + src_id, + node_id, + label=token.label, + key="parse_{}_{}".format(node_id, src_id)) + + with warnings.catch_warnings(): + # Fontconfig spews some warnings, suppress them for now. (Especially because + # they can clutter IPython notebooks). + warnings.simplefilter("ignore") + svg = graph.draw(format="svg", prog="dot") + + # For both inline and "new window" displays, we show the tokens with the + # graph. (The sentence order of nodes is sometimes difficult to read.) + image_and_text = "

Text: {}

{}".format(" ".join( + token.word for token in sentence.token), svg) + + # We generate a base64 URI. This is not too big, but older browsers may not + # handle it well. + new_window_html = ("" + + image_and_text) + as_uri = "data:text/html;base64,{}".format(base64.b64encode(new_window_html)) + + return "{}

Open in new window

".format( + image_and_text, as_uri) diff --git a/syntaxnet/dragnn/python/render_parse_tree_graphviz_test.py b/syntaxnet/dragnn/python/render_parse_tree_graphviz_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3339b6ffe4140948cbadf02cf877c18bf6a161e1 --- /dev/null +++ b/syntaxnet/dragnn/python/render_parse_tree_graphviz_test.py @@ -0,0 +1,27 @@ +"""Tests for ....dragnn.python.render_parse_tree_graphviz.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensorflow.python.platform import googletest +from dragnn.python import render_parse_tree_graphviz +from syntaxnet import sentence_pb2 + + +class RenderParseTreeGraphvizTest(googletest.TestCase): + + def testGiveMeAName(self): + document = sentence_pb2.Sentence() + document.token.add(start=0, end=0, word='hi', head=1, label='something') + document.token.add(start=1, end=1, word='there') + contents = render_parse_tree_graphviz.parse_tree_graph(document) + self.assertIn('{name}
+ {transition_name}
+ {network_name}
+ {num_actions_str}
+ hidden: {num_hidden} + >""".format( + name=component.name, + transition_name=component.transition_system.registered_name, + network_name=component.network_unit.registered_name, + num_actions_str="{} action{}".format(component.num_actions, "s" if + component.num_actions != 1 else ""), + num_hidden=component.network_unit.parameters.get("hidden_layer_sizes", + "not specified")) + + +def _linked_feature_label(linked_feature): + """Generates the label on edges between components. + + Args: + linked_feature: spec_pb2.LinkedFeatureChannel proto + + Returns: + String label + """ + return """< + {name}
+ F={num_features} D={projected_dim}
+ {fml}
+ {source_translator}
+ {source_layer} + >""".format( + name=linked_feature.name, + num_features=linked_feature.size, + projected_dim=linked_feature.embedding_dim, + fml=linked_feature.fml, + source_translator=linked_feature.source_translator, + source_layer=linked_feature.source_layer) + + +def master_spec_graph(master_spec): + """Constructs a master spec graph. + + Args: + master_spec: MasterSpec proto. + + Raises: + TypeError, if master_spec is not the right type. N.B. that this may be + raised if you import proto classes in non-standard ways (e.g. dynamically). + + Returns: + SVG graph contents as a string. + """ + if not isinstance(master_spec, spec_pb2.MasterSpec): + raise TypeError("master_spec_graph() expects a MasterSpec input.") + + graph = pygraphviz.AGraph(directed=True) + + graph.node_attr.update( + shape="box", + style="filled", + fillcolor="white", + fontname="roboto, helvetica, arial", + fontsize=11) + graph.edge_attr.update(fontname="roboto, helvetica, arial", fontsize=11) + + for component in master_spec.component: + graph.add_node(component.name, label=_component_contents(component)) + + for component in master_spec.component: + for linked_feature in component.linked_feature: + graph.add_edge( + linked_feature.source_component, + component.name, + label=_linked_feature_label(linked_feature)) + + with warnings.catch_warnings(): + # Fontconfig spews some warnings, suppress them for now. (Especially because + # they can clutter IPython notebooks). + warnings.simplefilter("ignore") + return graph.draw(format="svg", prog="dot") diff --git a/syntaxnet/dragnn/python/render_spec_with_graphviz_test.py b/syntaxnet/dragnn/python/render_spec_with_graphviz_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c5021d30bea4c95ed732bb4cb663d9832b086577 --- /dev/null +++ b/syntaxnet/dragnn/python/render_spec_with_graphviz_test.py @@ -0,0 +1,60 @@ +"""Tests for render_spec_with_graphviz.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensorflow.python.platform import googletest +from dragnn.protos import spec_pb2 +from dragnn.python import render_spec_with_graphviz +from dragnn.python import spec_builder + + +def _make_basic_master_spec(): + """Constructs a simple spec. + + Modified version of nlp/saft/opensource/dragnn/tools/parser_trainer.py + + Returns: + spec_pb2.MasterSpec instance. + """ + # Construct the "lookahead" ComponentSpec. This is a simple right-to-left RNN + # sequence model, which encodes the context to the right of each token. It has + # no loss except for the downstream components. + lookahead = spec_builder.ComponentSpecBuilder('lookahead') + lookahead.set_network_unit( + name='FeedForwardNetwork', hidden_layer_sizes='256') + lookahead.set_transition_system(name='shift-only', left_to_right='true') + lookahead.add_fixed_feature(name='words', fml='input.word', embedding_dim=64) + lookahead.add_rnn_link(embedding_dim=-1) + + # Construct the ComponentSpec for parsing. + parser = spec_builder.ComponentSpecBuilder('parser') + parser.set_network_unit(name='FeedForwardNetwork', hidden_layer_sizes='256') + parser.set_transition_system(name='arc-standard') + parser.add_token_link(source=lookahead, fml='input.focus', embedding_dim=32) + + master_spec = spec_pb2.MasterSpec() + master_spec.component.extend([lookahead.spec, parser.spec]) + return master_spec + + +class RenderSpecWithGraphvizTest(googletest.TestCase): + + def test_constructs_simple_graph(self): + master_spec = _make_basic_master_spec() + contents = render_spec_with_graphviz.master_spec_graph(master_spec) + self.assertIn('lookahead', contents) + self.assertIn('