从 LLM 生成文本时打印出特殊标记

问题描述 投票:0回答:1

嗨,我有一个关于 llm 在生成答案时打印特殊令牌的问题。 这是一个例子:

from utils.prompter import Prompter
# from utils.util import postprocessing, e2k_model
from deeppostagger import tagger
from transformers import TextIteratorStreamer, PreTrainedTokenizerFast
from threading import Thread
from auto_gptq import AutoGPTQForCausalLM
import warnings
warnings.filterwarnings('ignore')
new_line_chr = ['.', '?']
rm_chr = ['<|endoftext|>']

class LLM_qa:
    def __init__(self, model_path, max_len):
        self.model = AutoGPTQForCausalLM.from_quantized(
            model_path, 
            device_map="balanced", max_memory = {0: "10GB", 1: "10GB"}, 
            low_cpu_mem_usage=True
            )

        self.model.config.use_cache = True
        self.model.eval()

        self.max_len = max_len

        self.tokenizer = PreTrainedTokenizerFast.from_pretrained(model_path)
        
        self.prompter = Prompter("kullm")
        self.prompter_gen = Prompter("kullm")

    def qa(self, question, instruction=''):
        if instruction:
            prompt = self.prompter_gen.generate_prompt(instruction, question)
            self.max_len *=2
        else:
            prompt = self.prompter.generate_prompt(question)

        inputs = self.tokenizer(prompt, return_tensors="pt")
        streamer = TextIteratorStreamer(self.tokenizer, skip_prompt=True)

        generation_kwargs = dict(
            input_ids=inputs.input_ids[..., :-1],
            streamer=streamer, max_new_tokens=self.max_len, no_repeat_ngram_size=3, eos_token_id=2, pad_token_id=self.tokenizer.eos_token_id
            )
        
        thread = Thread(target=self.model.generate, kwargs=generation_kwargs)

        return thread, streamer

MODEL_PATH = '/mnt/research/datasets/llm/weights/kullm-polyglot-12.8b-v2/20231115_dupex_quantize'
MAX_LEN = 128

llm = LLM_qa(MODEL_PATH, MAX_LEN)

q = 'Hi?'
instruction = ''

thread, streamer = llm.qa(q, instruction)

thread.start()

generated_text = ''
for new_text in streamer:
    flg = [True for i in new_line_chr if i in new_text]
    # for c in rm_chr:
    #     new_text = new_text.replace(c, '')
    if new_text and flg:
        print(new_text)
    elif new_text:
        print(new_text, end='')

    # generated_text += new_text
    # flg = [True for i in new_line_chr if i in new_text]
    # if flg:
    #     print(generated_text)
    #     # print(engsen2korsen(generated_text))
    #     generated_text = ''

print("\n - done.")

"

输出\u003d안녕하세요! "오늘은 무엇을 도와드릴까요?<|endoftext|>

我不知道为什么打印<|endoftext|>..请帮忙

rm_chr = ['<|endoftext|>'] 是我尝试删除特殊令牌的一种方法,它确实有效,但我想知道为什么会发生这种情况以及是否有任何方法可以修复它。

huggingface-transformers tokenize large-language-model huggingface-tokenizers
1个回答
0
投票

您可以将

skip_special_tokens=True
添加到 Generation_kwargs 中。或者,您可以根据特殊令牌的分词器列表检查每个生成的令牌 ID,并进行相应的过滤。

© www.soinside.com 2019 - 2024. All rights reserved.