Search code examples
pythonnlpmodelpytorchbert-language-model

AttributeError: 'MaskedLMOutput' object has no attribute 'view'


Sorry to bother, I met this error when I evaluate some models, and I didn't find a good method to fix it.
What does 'MaskedLMOutput'means?Could somebody tell me How to fix this please? Thank you.

(AttributeError: 'MaskedLMOutput' object has no attribute 'view')

from transformers import BertForMaskedLM

class BertPunc(nn.Module):  
    
    def __init__(self, segment_size, output_size, dropout):
        super(BertPunc, self).__init__()
        self.bert = BertForMaskedLM.from_pretrained('cl-tohoku/bert-base-japanese')
        self.bert_vocab_size = 32000
        self.bn = nn.BatchNorm1d(segment_size*self.bert_vocab_size)
        self.fc = nn.Linear(segment_size*self.bert_vocab_size, output_size)
        self.dropout = nn.Dropout(dropout)

    def forward(self, input):
        
        x = self.bert(input)             
        x = x.view(x.shape[0], -1)                   # wrong thing here
        x = self.fc(self.dropout(self.bn(x)))
        return x

I ran this in jupyter notebook:

    def predictions(data_loader):
        y_pred = []
        y_true = []
        for inputs, labels in tqdm(data_loader, total=len(data_loader)):
            with torch.no_grad():
                inputs, labels = inputs.cuda(), labels.cuda()
                output = bert_punc(inputs)
                y_pred += list(output.argmax(dim=1).cpu().data.numpy().flatten())
                y_true += list(labels.cpu().data.numpy().flatten())
        return y_pred, y_true

def evaluation(y_pred, y_test):
    precision, recall, f1, _ = metrics.precision_recall_fscore_support(
        y_test, y_pred, average=None, labels=[1, 2, 3])
    overall = metrics.precision_recall_fscore_support(
        y_test, y_pred, average='macro', labels=[1, 2, 3])
    result = pd.DataFrame(
        np.array([precision, recall, f1]), 
        columns=list(punctuation_enc.keys())[1:], 
        index=['Precision', 'Recall', 'F1']
    )
    result['OVERALL'] = overall[:3]
    return result


y_pred_test, y_true_test = predictions(data_loader_test)
eval_test = evaluation(y_pred_test, y_true_test)
eval_test

wrong here:

   ---------------------------------------------------------------------------
    AttributeError                            Traceback (most recent call last)
    Input In [12], in <cell line: 1>()
    ----> 1 y_pred_test, y_true_test = predictions(data_loader_test)
          2 eval_test = evaluation(y_pred_test, y_true_test)
          3 eval_test
    
    Input In [10], in predictions(data_loader)
          5 with torch.no_grad():
          6     inputs, labels = inputs.cuda(), labels.cuda()
    ----> 7     output = bert_punc(inputs)
          8     y_pred += list(output.argmax(dim=1).cpu().data.numpy().flatten())
          9     y_true += list(labels.cpu().data.numpy().flatten())
    
    File ~\anaconda3\lib\site-packages\torch\nn\modules\module.py:1110, in Module._call_impl(self, *input, **kwargs)
       1106 # If we don't have any hooks, we want to skip the rest of the logic in
       1107 # this function, and just call forward.
       1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
       1109         or _global_forward_hooks or _global_forward_pre_hooks):
    -> 1110     return forward_call(*input, **kwargs)
       1111 # Do not call functions when jit is used
       1112 full_backward_hooks, non_full_backward_hooks = [], []
    
    File ~\anaconda3\lib\site-packages\torch\nn\parallel\data_parallel.py:166, in DataParallel.forward(self, *inputs, **kwargs)
        163     kwargs = ({},)
        165 if len(self.device_ids) == 1:
    --> 166     return self.module(*inputs[0], **kwargs[0])
        167 replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
        168 outputs = self.parallel_apply(replicas, inputs, kwargs)
    
    File ~\anaconda3\lib\site-packages\torch\nn\modules\module.py:1110, in Module._call_impl(self, *input, **kwargs)
       1106 # If we don't have any hooks, we want to skip the rest of the logic in
       1107 # this function, and just call forward.
       1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
       1109         or _global_forward_hooks or _global_forward_pre_hooks):
    -> 1110     return forward_call(*input, **kwargs)
       1111 # Do not call functions when jit is used
       1112 full_backward_hooks, non_full_backward_hooks = [], []
    
    File F:\BertPunc-master\model.py:19, in BertPunc.forward(self, input)
         16 def forward(self, input):
         18     x = self.bert(input)
    ---> 19     x = x.view(x.shape[0], -1)
         20     x = self.fc(self.dropout(self.bn(x)))
         21     return x
    
    AttributeError: 'MaskedLMOutput' object has no attribute 'view'

Solution

  • You can refer to the documentation of MaskedLMOutput. Basically, it is an object holding the loss, logits, hidden_states and attentions. It is not a tensor so you are getting this error. I think you are interested in logits, i.e., score for each token before applying softmax. Then, in the forward function, you can just access to the logits tensor like this

    x = self.bert(input).logits