logo

[텍스트 분석] 혼란도

from transformers import AutoTokenizer, AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("skt/kogpt2-base-v2")
tokenizer = AutoTokenizer.from_pretrained("skt/kogpt2-base-v2")
texts = ['밥을 먹었다']
input_ids = tokenizer(texts, return_tensors='pt')['input_ids']
outputs = model(input_ids, labels=input_ids)
outputs.loss
tensor(3.4022, grad_fn=<NllLossBackward0>)
texts = ['밥을 마셨다']
input_ids = tokenizer(texts, return_tensors='pt')['input_ids']
outputs = model(input_ids, labels=input_ids)
outputs.loss
tensor(3.7854, grad_fn=<NllLossBackward0>)
Previous
BLEU