TokenClassificationChunkPipeline 抛出错误:“BatchEncoding”对象不是迭代器

问题描述 投票:0回答:1

遵循这个 HuggingFace 匿名化教程。 使用 pytorch 2.0.0 和 transformers-4.28.1 按原样运行代码,自定义管道出现错误:

def anonymize(text):
    ents = pipe(text) # this errors out
    ...
TypeError: 'BatchEncoding' object is not an iterator

我意识到这是一个分词器问题,

class TokenClassificationChunkPipeline(TokenClassificationPipeline):
def __init__(self, *args, **kwargs):
    super().__init__(*args, **kwargs)

def preprocess(self, sentence, offset_mapping=None):
    model_inputs = self.tokenizer(
        sentence,
        return_tensors="pt",
        truncation=True,
        return_special_tokens_mask=True,
        return_offsets_mapping=True,
        return_overflowing_tokens=True,  # Return multiple chunks
        max_length=self.tokenizer.model_max_length,
        padding=True
    )
    if offset_mapping:
        model_inputs["offset_mapping"] = offset_mapping

    model_inputs["sentence"] = sentence

    return model_inputs

这个模型输入是一个

如何制作迭代器 BatchEncoding 对象? 否则,还有别的办法吗? 有关完整代码,请访问上面的教程链接。

pytorch nlp huggingface-transformers torch named-entity-recognition
1个回答
0
投票

不确定为什么在 blogpost 中以这种方式编码管道,但这是一个工作版本:

import torch
from transformers import AutoTokenizer, AutoModelForTokenClassification
from transformers.pipelines.token_classification import TokenClassificationPipeline

model_checkpoint = "Davlan/bert-base-multilingual-cased-ner-hrl"

tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
model = AutoModelForTokenClassification.from_pretrained(model_checkpoint)


class TokenClassificationChunkPipeline(TokenClassificationPipeline):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

    def preprocess(self, sentence, offset_mapping=None, **preprocess_params):
        tokenizer_params = preprocess_params.pop("tokenizer_params", {})
        truncation = True if self.tokenizer.model_max_length and self.tokenizer.model_max_length > 0 else False
        inputs = self.tokenizer(
            sentence,
            return_tensors="pt",
            truncation=True,
            return_special_tokens_mask=True,
            return_offsets_mapping=True,
            return_overflowing_tokens=True,  # Return multiple chunks
            max_length=self.tokenizer.model_max_length,
            padding=True
        )
        #inputs.pop("overflow_to_sample_mapping", None)
        num_chunks = len(inputs["input_ids"])

        for i in range(num_chunks):
            if self.framework == "tf":
                model_inputs = {k: tf.expand_dims(v[i], 0) for k, v in inputs.items()}
            else:
                model_inputs = {k: v[i].unsqueeze(0) for k, v in inputs.items()}
            if offset_mapping is not None:
                model_inputs["offset_mapping"] = offset_mapping
            model_inputs["sentence"] = sentence if i == 0 else None
            model_inputs["is_last"] = i == num_chunks - 1
            yield model_inputs

    def _forward(self, model_inputs):
        # Forward
        special_tokens_mask = model_inputs.pop("special_tokens_mask")
        offset_mapping = model_inputs.pop("offset_mapping", None)
        sentence = model_inputs.pop("sentence")
        is_last = model_inputs.pop("is_last")

        overflow_to_sample_mapping = model_inputs.pop("overflow_to_sample_mapping")

        output = self.model(**model_inputs)
        logits = output["logits"] if isinstance(output, dict) else output[0]


        model_outputs = {
            "logits": logits,
            "special_tokens_mask": special_tokens_mask,
            "offset_mapping": offset_mapping,
            "sentence": sentence,
            "overflow_to_sample_mapping": overflow_to_sample_mapping,
            "is_last": is_last,
            **model_inputs,
        }

        # We reshape outputs to fit with the postprocess inputs
        model_outputs["input_ids"] = torch.reshape(model_outputs["input_ids"], (1, -1))
        model_outputs["token_type_ids"] = torch.reshape(model_outputs["token_type_ids"], (1, -1))
        model_outputs["attention_mask"] = torch.reshape(model_outputs["attention_mask"], (1, -1))
        model_outputs["special_tokens_mask"] = torch.reshape(model_outputs["special_tokens_mask"], (1, -1))
        model_outputs["offset_mapping"] = torch.reshape(model_outputs["offset_mapping"], (1, -1, 2))

        return model_outputs


pipe = TokenClassificationChunkPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="simple")

pipe("Bernard works at BNP Paribas in Paris.")

[出]:

[{'entity_group': 'PER',
  'score': 0.9994497,
  'word': 'Bernard',
  'start': 0,
  'end': 7},
 {'entity_group': 'ORG',
  'score': 0.9997708,
  'word': 'BNP Paribas',
  'start': 17,
  'end': 28},
 {'entity_group': 'LOC',
  'score': 0.99906,
  'word': 'Paris',
  'start': 32,
  'end': 37}]

作为参考,看看

preproces()
_forward()
函数是如何在
TokenClassificationPipeline
类中编码的,https://github.com/huggingface/transformers/blob/main/src/transformers/pipelines /token_classification.py

预处理应该返回一个生成器,这就是为什么 _forward 期待一个生成器并抱怨

TypeError: 'BatchEncoding' object is not an iterator
.

© www.soinside.com 2019 - 2024. All rights reserved.