-
Notifications
You must be signed in to change notification settings - Fork 5
Open
Labels
bugSomething isn't workingSomething isn't workinggood first issueGood for newcomersGood for newcomerspriority-high
Description
Traceback:
RuntimeError Traceback (most recent call last)
<ipython-input-5-04fce7cff438> in <module>()
19
20 # Run inference using the instantiated models
---> 21 answers = model.get_answer(context, questions)
22
23 # Print the output
8 frames
/usr/local/lib/python3.7/dist-packages/pyqna/models/reading_comprehension/transformer_models.py in get_answer(self, context, question)
133 return self._infer_from_model(context, question)
134 elif isinstance(question, list):
--> 135 return [self._infer_from_model(context, q) for q in question]
/usr/local/lib/python3.7/dist-packages/pyqna/models/reading_comprehension/transformer_models.py in <listcomp>(.0)
133 return self._infer_from_model(context, question)
134 elif isinstance(question, list):
--> 135 return [self._infer_from_model(context, q) for q in question]
/usr/local/lib/python3.7/dist-packages/pyqna/models/reading_comprehension/transformer_models.py in _infer_from_model(self, context, question)
66 ).to(self.device)
67
---> 68 outputs = self.model(**inputs)
69
70 non_answer_tokens = [x if x in [0, 1] else 0 for x in inputs.sequence_ids()]
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.7/dist-packages/transformers/models/distilbert/modeling_distilbert.py in forward(self, input_ids, attention_mask, head_mask, inputs_embeds, start_positions, end_positions, output_attentions, output_hidden_states, return_dict)
855 output_attentions=output_attentions,
856 output_hidden_states=output_hidden_states,
--> 857 return_dict=return_dict,
858 )
859 hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.7/dist-packages/transformers/models/distilbert/modeling_distilbert.py in forward(self, input_ids, attention_mask, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict)
548
549 if inputs_embeds is None:
--> 550 inputs_embeds = self.embeddings(input_ids) # (bs, seq_length, dim)
551 return self.transformer(
552 x=inputs_embeds,
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.7/dist-packages/transformers/models/distilbert/modeling_distilbert.py in forward(self, input_ids)
131 position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
132
--> 133 embeddings = word_embeddings + position_embeddings # (bs, max_seq_length, dim)
134 embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
135 embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
RuntimeError: The size of tensor a (692) must match the size of tensor b (512) at non-singleton dimension 1Metadata
Metadata
Assignees
Labels
bugSomething isn't workingSomething isn't workinggood first issueGood for newcomersGood for newcomerspriority-high