ericsorides commited on
Commit
7f122be
·
1 Parent(s): 09a25ba

Model without scatter and with tree

Browse files
Files changed (2) hide show
  1. README.md +7 -1
  2. model.onnx +2 -2
README.md CHANGED
@@ -99,8 +99,11 @@ def generate_text(model_path, prompt, tokenizer, max_gen_tokens, total_sequence,
99
  inputs_dict = {}
100
  inputs_dict['input_ids'] = actual_input[:, :window].reshape(1, window).numpy()
101
  inputs_dict['attention_mask'] = first_attention
 
 
 
102
  for name in inputs_names:
103
- if name == 'input_ids' or name == 'attention_mask': continue
104
  inputs_dict[name] = np.zeros([1, n_heads, context-window, 128], dtype="float16")
105
  index = 0
106
  new_token = np.array([10])
@@ -132,6 +135,9 @@ def generate_text(model_path, prompt, tokenizer, max_gen_tokens, total_sequence,
132
  inputs_dict['input_ids']= total_input[:, j:next_index].reshape(1, window)
133
  elif name == 'attention_mask':
134
  inputs_dict['attention_mask'] = np.concatenate((np.zeros((1, total_sequence-next_index), dtype = 'int64'), np.ones((1, next_index), dtype = 'int64')), axis=1)
 
 
 
135
  else:
136
  old_name = name.replace("past_key_values", "present")
137
  inputs_dict[name] = outs_dictionary[old_name][:, :, next_index-old_j:context-window+(next_index - old_j), :]
 
99
  inputs_dict = {}
100
  inputs_dict['input_ids'] = actual_input[:, :window].reshape(1, window).numpy()
101
  inputs_dict['attention_mask'] = first_attention
102
+ index_pos = sum(first_attention[0])
103
+ inputs_dict['position_ids'] = np.concatenate((np.zeros([1, total_sequence - index_pos], dtype = 'int64'), np.arange(index_pos, dtype = 'int64').reshape(1, index_pos)), axis=1)
104
+ inputs_dict['tree_attention'] = np.triu(-65504*np.ones(total_sequence), k= 1).astype('float16').reshape(1, 1, total_sequence, total_sequence)
105
  for name in inputs_names:
106
+ if name == 'input_ids' or name == 'attention_mask' or name == 'position_ids' or name == 'tree_attention': continue
107
  inputs_dict[name] = np.zeros([1, n_heads, context-window, 128], dtype="float16")
108
  index = 0
109
  new_token = np.array([10])
 
135
  inputs_dict['input_ids']= total_input[:, j:next_index].reshape(1, window)
136
  elif name == 'attention_mask':
137
  inputs_dict['attention_mask'] = np.concatenate((np.zeros((1, total_sequence-next_index), dtype = 'int64'), np.ones((1, next_index), dtype = 'int64')), axis=1)
138
+ elif name == 'position_ids':
139
+ inputs_dict['position_ids'] = np.concatenate((np.zeros([1, total_sequence - next_index], dtype = 'int64'), np.arange(next_index, dtype = 'int64').reshape(1, next_index)), axis=1)
140
+ elif name == 'tree_attention': continue
141
  else:
142
  old_name = name.replace("past_key_values", "present")
143
  inputs_dict[name] = outs_dictionary[old_name][:, :, next_index-old_j:context-window+(next_index - old_j), :]
model.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f652af79679d34d3b59b1bfcc4a385bb8bbe37e117d224302512b8d1c576bce
3
- size 1947171
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fb4dc57c626c6f6164d2d98e2f15de289093ddc0f74fc006726ef2fb856a0bd
3
+ size 1934549