sentence_prediction.py 8.42 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sentence prediction (classification) task."""
17
from absl import logging
18
import dataclasses
19
20
21
import numpy as np
from scipy import stats
from sklearn import metrics as sklearn_metrics
22
23
24
25
import tensorflow as tf
import tensorflow_hub as hub

from official.core import base_task
Hongkun Yu's avatar
Hongkun Yu committed
26
from official.modeling.hyperparams import base_config
27
from official.modeling.hyperparams import config_definitions as cfg
Hongkun Yu's avatar
Hongkun Yu committed
28
from official.nlp.configs import encoders
Chen Chen's avatar
Chen Chen committed
29
from official.nlp.data import data_loader_factory
Hongkun Yu's avatar
Hongkun Yu committed
30
from official.nlp.modeling import models
Chen Chen's avatar
Chen Chen committed
31
from official.nlp.tasks import utils
32
33


A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
34
35
36
37
METRIC_TYPES = frozenset(
    ['accuracy', 'matthews_corrcoef', 'pearson_spearman_corr'])


Hongkun Yu's avatar
Hongkun Yu committed
38
39
40
41
42
43
44
45
46
@dataclasses.dataclass
class ModelConfig(base_config.Config):
  """A classifier/regressor configuration."""
  num_classes: int = 0
  use_encoder_pooler: bool = False
  encoder: encoders.TransformerEncoderConfig = (
      encoders.TransformerEncoderConfig())


47
48
49
@dataclasses.dataclass
class SentencePredictionConfig(cfg.TaskConfig):
  """The model config."""
Hongkun Yu's avatar
Hongkun Yu committed
50
  # At most one of `init_checkpoint` and `hub_module_url` can
51
  # be specified.
Hongkun Yu's avatar
Hongkun Yu committed
52
  init_checkpoint: str = ''
Hongkun Yu's avatar
Hongkun Yu committed
53
  init_cls_pooler: bool = False
54
  hub_module_url: str = ''
55
  metric_type: str = 'accuracy'
Hongkun Yu's avatar
Hongkun Yu committed
56
57
  # Defines the concrete model config at instantiation time.
  model: ModelConfig = ModelConfig()
58
59
60
61
62
63
64
65
  train_data: cfg.DataConfig = cfg.DataConfig()
  validation_data: cfg.DataConfig = cfg.DataConfig()


@base_task.register_task_cls(SentencePredictionConfig)
class SentencePredictionTask(base_task.Task):
  """Task object for sentence_prediction."""

A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
66
67
  def __init__(self, params=cfg.TaskConfig, logging_dir=None):
    super(SentencePredictionTask, self).__init__(params, logging_dir)
Hongkun Yu's avatar
Hongkun Yu committed
68
    if params.hub_module_url and params.init_checkpoint:
69
      raise ValueError('At most one of `hub_module_url` and '
Hongkun Yu's avatar
Hongkun Yu committed
70
                       '`init_checkpoint` can be specified.')
71
72
73
74
    if params.hub_module_url:
      self._hub_module = hub.load(params.hub_module_url)
    else:
      self._hub_module = None
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
75
76
77

    if params.metric_type not in METRIC_TYPES:
      raise ValueError('Invalid metric_type: {}'.format(params.metric_type))
78
    self.metric_type = params.metric_type
79
80
81

  def build_model(self):
    if self._hub_module:
Hongkun Yu's avatar
Hongkun Yu committed
82
      encoder_network = utils.get_encoder_from_hub(self._hub_module)
83
    else:
Hongkun Yu's avatar
Hongkun Yu committed
84
85
86
      encoder_network = encoders.instantiate_encoder_from_cfg(
          self.task_config.model.encoder)

A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
87
    # Currently, we only support bert-style sentence prediction finetuning.
Hongkun Yu's avatar
Hongkun Yu committed
88
89
90
91
92
93
    return models.BertClassifier(
        network=encoder_network,
        num_classes=self.task_config.model.num_classes,
        initializer=tf.keras.initializers.TruncatedNormal(
            stddev=self.task_config.model.encoder.initializer_range),
        use_encoder_pooler=self.task_config.model.use_encoder_pooler)
94

95
  def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
96
97
98
99
100
    if self.task_config.model.num_classes == 1:
      loss = tf.keras.losses.mean_squared_error(labels, model_outputs)
    else:
      loss = tf.keras.losses.sparse_categorical_crossentropy(
          labels, tf.cast(model_outputs, tf.float32), from_logits=True)
101
102
103

    if aux_losses:
      loss += tf.add_n(aux_losses)
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
104
    return tf.reduce_mean(loss)
105
106
107
108

  def build_inputs(self, params, input_context=None):
    """Returns tf.data.Dataset for sentence_prediction task."""
    if params.input_path == 'dummy':
Hongkun Yu's avatar
Hongkun Yu committed
109

110
111
112
113
114
115
      def dummy_data(_):
        dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
        x = dict(
            input_word_ids=dummy_ids,
            input_mask=dummy_ids,
            input_type_ids=dummy_ids)
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
116
117
118
119
120
121

        if self.task_config.model.num_classes == 1:
          y = tf.zeros((1,), dtype=tf.float32)
        else:
          y = tf.zeros((1, 1), dtype=tf.int32)
        return x, y
122
123
124
125
126
127
128

      dataset = tf.data.Dataset.range(1)
      dataset = dataset.repeat()
      dataset = dataset.map(
          dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
      return dataset

Chen Chen's avatar
Chen Chen committed
129
    return data_loader_factory.get_data_loader(params).load(input_context)
130
131
132

  def build_metrics(self, training=None):
    del training
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
133
134
135
136
137
    if self.task_config.model.num_classes == 1:
      metrics = [tf.keras.metrics.MeanSquaredError()]
    else:
      metrics = [
          tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy')]
138
139
    return metrics

140
  def process_metrics(self, metrics, labels, model_outputs):
141
    for metric in metrics:
Hongkun Yu's avatar
Hongkun Yu committed
142
      metric.update_state(labels, model_outputs)
143

144
  def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
Hongkun Yu's avatar
Hongkun Yu committed
145
    compiled_metrics.update_state(labels, model_outputs)
146

147
148
149
150
151
152
153
154
  def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
    if self.metric_type == 'accuracy':
      return super(SentencePredictionTask,
                   self).validation_step(inputs, model, metrics)
    features, labels = inputs
    outputs = self.inference_step(features, model)
    loss = self.build_losses(
        labels=labels, model_outputs=outputs, aux_losses=model.losses)
Hongkun Yu's avatar
Hongkun Yu committed
155
    logs = {self.loss: loss}
156
    if self.metric_type == 'matthews_corrcoef':
Hongkun Yu's avatar
Hongkun Yu committed
157
      logs.update({
158
          'sentence_prediction':
Hongkun Yu's avatar
Hongkun Yu committed
159
              tf.expand_dims(tf.math.argmax(outputs, axis=1), axis=0),
160
161
          'labels':
              labels,
Hongkun Yu's avatar
Hongkun Yu committed
162
      })
163
    if self.metric_type == 'pearson_spearman_corr':
Hongkun Yu's avatar
Hongkun Yu committed
164
      logs.update({
Hongkun Yu's avatar
Hongkun Yu committed
165
          'sentence_prediction': outputs,
166
          'labels': labels,
Hongkun Yu's avatar
Hongkun Yu committed
167
168
      })
    return logs
169
170

  def aggregate_logs(self, state=None, step_outputs=None):
Hongkun Yu's avatar
Hongkun Yu committed
171
172
    if self.metric_type == 'accuracy':
      return None
173
174
    if state is None:
      state = {'sentence_prediction': [], 'labels': []}
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
175
    # TODO(b/160712818): Add support for concatenating partial batches.
176
177
178
179
180
181
182
183
    state['sentence_prediction'].append(
        np.concatenate([v.numpy() for v in step_outputs['sentence_prediction']],
                       axis=0))
    state['labels'].append(
        np.concatenate([v.numpy() for v in step_outputs['labels']], axis=0))
    return state

  def reduce_aggregated_logs(self, aggregated_logs):
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
184
185
186
    if self.metric_type == 'accuracy':
      return None
    elif self.metric_type == 'matthews_corrcoef':
187
      preds = np.concatenate(aggregated_logs['sentence_prediction'], axis=0)
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
188
      preds = np.reshape(preds, -1)
189
      labels = np.concatenate(aggregated_logs['labels'], axis=0)
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
190
      labels = np.reshape(labels, -1)
191
192
193
      return {
          self.metric_type: sklearn_metrics.matthews_corrcoef(preds, labels)
      }
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
194
    elif self.metric_type == 'pearson_spearman_corr':
195
      preds = np.concatenate(aggregated_logs['sentence_prediction'], axis=0)
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
196
      preds = np.reshape(preds, -1)
197
      labels = np.concatenate(aggregated_logs['labels'], axis=0)
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
198
      labels = np.reshape(labels, -1)
199
200
201
202
203
      pearson_corr = stats.pearsonr(preds, labels)[0]
      spearman_corr = stats.spearmanr(preds, labels)[0]
      corr_metric = (pearson_corr + spearman_corr) / 2
      return {self.metric_type: corr_metric}

204
205
  def initialize(self, model):
    """Load a pretrained checkpoint (if exists) and then train from iter 0."""
Hongkun Yu's avatar
Hongkun Yu committed
206
207
208
209
    ckpt_dir_or_file = self.task_config.init_checkpoint
    if tf.io.gfile.isdir(ckpt_dir_or_file):
      ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
    if not ckpt_dir_or_file:
210
211
212
      return

    pretrain2finetune_mapping = {
Hongkun Yu's avatar
Hongkun Yu committed
213
        'encoder': model.checkpoint_items['encoder'],
214
    }
Hongkun Yu's avatar
Hongkun Yu committed
215
216
217
218
219
220
    # TODO(b/160251903): Investigate why no pooler dense improves finetuning
    # accuracies.
    if self.task_config.init_cls_pooler:
      pretrain2finetune_mapping[
          'next_sentence.pooler_dense'] = model.checkpoint_items[
              'sentence_prediction.pooler_dense']
221
    ckpt = tf.train.Checkpoint(**pretrain2finetune_mapping)
Hongkun Yu's avatar
Hongkun Yu committed
222
    status = ckpt.read(ckpt_dir_or_file)
223
    status.expect_partial().assert_existing_objects_matched()
Hongkun Yu's avatar
Hongkun Yu committed
224
    logging.info('Finished loading pretrained checkpoint from %s',
Hongkun Yu's avatar
Hongkun Yu committed
225
                 ckpt_dir_or_file)