Unverified Commit 966ba081 authored by Joe Davison's avatar Joe Davison Committed by GitHub
Browse files

zero-shot pipeline multi_class -> multi_label (#10727)

parent 58f672e6
......@@ -49,7 +49,7 @@ class TeacherModelArguments:
teacher_batch_size: Optional[int] = field(
default=32, metadata={"help": "Batch size for generating teacher predictions."}
)
multi_class: Optional[bool] = field(
multi_label: Optional[bool] = field(
default=False,
metadata={
"help": (
......@@ -163,7 +163,7 @@ def get_teacher_predictions(
hypothesis_template: str,
batch_size: int,
temperature: float,
multi_class: bool,
multi_label: bool,
use_fast_tokenizer: bool,
no_cuda: bool,
fp16: bool,
......@@ -203,7 +203,7 @@ def get_teacher_predictions(
logits = torch.cat(logits, dim=0) # N*K x 3
nli_logits = logits.reshape(len(examples), len(class_names), -1)[..., [contr_id, entail_id]] # N x K x 2
if multi_class:
if multi_label:
# softmax over (contr, entail) logits for each class independently
nli_prob = (nli_logits / temperature).softmax(-1)
else:
......@@ -285,7 +285,7 @@ def main():
teacher_args.hypothesis_template,
teacher_args.teacher_batch_size,
teacher_args.temperature,
teacher_args.multi_class,
teacher_args.multi_label,
data_args.use_fast_tokenizer,
training_args.no_cuda,
training_args.fp16,
......
......@@ -107,7 +107,8 @@ class ZeroShotClassificationPipeline(Pipeline):
sequences: Union[str, List[str]],
candidate_labels,
hypothesis_template="This example is {}.",
multi_class=False,
multi_label=False,
**kwargs,
):
"""
Classify the sequence(s) given as inputs. See the :obj:`~transformers.ZeroShotClassificationPipeline`
......@@ -126,7 +127,7 @@ class ZeroShotClassificationPipeline(Pipeline):
into the model like :obj:`"<cls> sequence to classify <sep> This example is sports . <sep>"`. The
default template works well in many cases, but it may be worthwhile to experiment with different
templates depending on the task setting.
multi_class (:obj:`bool`, `optional`, defaults to :obj:`False`):
multi_label (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not multiple candidate labels can be true. If :obj:`False`, the scores are normalized such
that the sum of the label likelihoods for each sequence is 1. If :obj:`True`, the labels are considered
independent and probabilities are normalized for each candidate by doing a softmax of the entailment
......@@ -139,6 +140,13 @@ class ZeroShotClassificationPipeline(Pipeline):
- **labels** (:obj:`List[str]`) -- The labels sorted by order of likelihood.
- **scores** (:obj:`List[float]`) -- The probabilities for each of the labels.
"""
if "multi_class" in kwargs and kwargs["multi_class"] is not None:
multi_label = kwargs.pop("multi_class")
logger.warn(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers."
)
if sequences and isinstance(sequences, str):
sequences = [sequences]
......@@ -148,9 +156,9 @@ class ZeroShotClassificationPipeline(Pipeline):
reshaped_outputs = outputs.reshape((num_sequences, len(candidate_labels), -1))
if len(candidate_labels) == 1:
multi_class = True
multi_label = True
if not multi_class:
if not multi_label:
# softmax the "entailment" logits over all candidate labels
entail_logits = reshaped_outputs[..., self.entailment_id]
scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True)
......
......@@ -140,7 +140,7 @@ class ZeroShotClassificationPipelineTests(CustomInputPipelineCommonMixin, unitte
{
"sequences": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.",
"candidate_labels": ["machine learning", "statistics", "translation", "vision"],
"multi_class": True,
"multi_label": True,
},
]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment