- input_ids
- Out [19]:
- tensor([[151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13,
- 151645, 198, 151644, 872, 198, 35127, 752, 264, 2805,
- 16800, 311, 3460, 4128, 1614, 13, 151645, 198, 151644,
- 77091, 198]], device='cuda:0')
- attention_mask
- Out [20]:
- tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1]], device='cuda:0')
- position_ids
- Out [21]:
- tensor([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
- 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]], device='cuda:0')
- past_key_values
- inputs_embeds
- use_cache
- Out [24]: True
- output_attentions
- Out [25]: False
- output_hidden_states
- Out [26]: False
- return_dict
- Out [27]: True
复制代码 |