|
12 | 12 | import org.beehive.gpullama3.model.Configuration; |
13 | 13 | import org.beehive.gpullama3.model.Model; |
14 | 14 | import org.beehive.gpullama3.model.granite.GraniteConfiguration; |
| 15 | +import org.beehive.gpullama3.model.devstral.DevstralConfiguration; |
15 | 16 | import org.beehive.gpullama3.model.phi3.Phi3Configuration; |
16 | 17 | import org.beehive.gpullama3.model.qwen2.Qwen2Configuration; |
17 | 18 | import org.beehive.gpullama3.model.qwen3.Qwen3Configuration; |
@@ -179,6 +180,95 @@ public static FloatTensor forwardJava(Model model, State state, int token, int p |
179 | 180 | return state.logits; |
180 | 181 | } |
181 | 182 |
|
| 183 | + /** |
| 184 | + * Forward pass for Devstral 2 models where head_dim != dim/num_heads. |
| 185 | + * Q projection outputs qDim (num_heads * head_dim) instead of dim. |
| 186 | + */ |
| 187 | + public static FloatTensor forwardJavaDevstral(Model model, State state, int token, int position) { |
| 188 | + final DevstralConfiguration config = (DevstralConfiguration) model.configuration(); |
| 189 | + final StandardWeights weights = (StandardWeights) model.weights(); |
| 190 | + int dim = config.dim(); |
| 191 | + int headSize = config.headSize(); // 128 (independent head_dim) |
| 192 | + int qDim = config.qDim(); // 4096 = 32 * 128 |
| 193 | + int kvDim = config.kvDim(); // 1024 = 8 * 128 |
| 194 | + int kvMul = config.kvMul(); |
| 195 | + float sqrtHeadSize = (float) Math.sqrt(headSize); |
| 196 | + |
| 197 | + weights.token_embedding_table.copyTo(token * dim, state.x, 0, dim); |
| 198 | + |
| 199 | + for (int l = 0; l < config.numberOfLayers(); l++) { |
| 200 | + rmsnorm(state.xb, state.x, weights.rms_att_weight[l], 0, dim, config.rmsNormEps()); |
| 201 | + |
| 202 | + weights.wq[l].matmul(state.xb, state.q, qDim, dim); |
| 203 | + weights.wk[l].matmul(state.xb, state.k, kvDim, dim); |
| 204 | + weights.wv[l].matmul(state.xb, state.v, kvDim, dim); |
| 205 | + |
| 206 | + // RoPE over qDim (not dim) |
| 207 | + for (int i = 0; i < qDim; i += 2) { |
| 208 | + int head_dim = i % headSize; |
| 209 | + float fcr = weights.freq_cis_real.getFloat(position * (headSize / 2) + (head_dim / 2)); |
| 210 | + float fci = weights.freq_cis_imag.getFloat(position * (headSize / 2) + (head_dim / 2)); |
| 211 | + int rotn = i < kvDim ? 2 : 1; |
| 212 | + for (int v = 0; v < rotn; v++) { |
| 213 | + FloatTensor vec = v == 0 ? state.q : state.k; |
| 214 | + float v0 = vec.getFloat(i); |
| 215 | + float v1 = vec.getFloat(i + 1); |
| 216 | + vec.setFloat(i, v0 * fcr - v1 * fci); |
| 217 | + vec.setFloat(i + 1, v0 * fci + v1 * fcr); |
| 218 | + } |
| 219 | + } |
| 220 | + |
| 221 | + state.k.copyTo(0, state.keyCache[l], position * kvDim, kvDim); |
| 222 | + state.v.copyTo(0, state.valueCache[l], position * kvDim, kvDim); |
| 223 | + |
| 224 | + int curLayer = l; |
| 225 | + |
| 226 | + Parallel.parallelFor(0, config.numberOfHeads(), h -> { |
| 227 | + int qOffset = h * headSize; |
| 228 | + int attOffset = h * config.contextLength(); |
| 229 | + |
| 230 | + for (int t = 0; t <= position; t++) { |
| 231 | + int keyCacheOffset = t * kvDim + (h / kvMul) * headSize; |
| 232 | + float score = state.q.dot(qOffset, state.keyCache[curLayer], keyCacheOffset, headSize); |
| 233 | + score /= sqrtHeadSize; |
| 234 | + state.att.setFloat(attOffset + t, score); |
| 235 | + } |
| 236 | + |
| 237 | + state.att.softmaxInPlace(attOffset, position + 1); |
| 238 | + |
| 239 | + int xbOffset = h * headSize; |
| 240 | + state.xb.fillInPlace(xbOffset, headSize, 0f); |
| 241 | + |
| 242 | + for (int t = 0; t <= position; t++) { |
| 243 | + int vOffset = t * kvDim + (h / kvMul) * headSize; |
| 244 | + float a = state.att.getFloat(attOffset + t); |
| 245 | + state.xb.saxpyInPlace(xbOffset, state.valueCache[curLayer], vOffset, headSize, a); |
| 246 | + } |
| 247 | + }); |
| 248 | + |
| 249 | + // O projection: input qDim, output dim |
| 250 | + weights.wo[l].matmul(state.xb, state.xb2, dim, qDim); |
| 251 | + |
| 252 | + state.x.addInPlace(state.xb2); |
| 253 | + |
| 254 | + rmsnorm(state.xb, state.x, weights.rms_ffn_weight[l], 0, dim, config.rmsNormEps()); |
| 255 | + |
| 256 | + weights.w1[l].matmul(state.xb, state.hb, config.hiddenDim(), dim); |
| 257 | + weights.w3[l].matmul(state.xb, state.hb2, config.hiddenDim(), dim); |
| 258 | + |
| 259 | + state.hb.mapInPlace(value -> value / (float) (1.0 + Math.exp(-value))); |
| 260 | + state.hb.multiplyInPlace(state.hb2); |
| 261 | + |
| 262 | + weights.w2[l].matmul(state.hb, state.xb, dim, config.hiddenDim()); |
| 263 | + state.x.addInPlace(state.xb); |
| 264 | + } |
| 265 | + |
| 266 | + rmsnorm(state.x, state.x, weights.rms_final_weight, 0, dim, config.rmsNormEps()); |
| 267 | + weights.wcls.matmul(state.x, state.logits, config.vocabularySize(), dim); |
| 268 | + |
| 269 | + return state.logits; |
| 270 | + } |
| 271 | + |
182 | 272 | public static FloatTensor forwardJavaQwen2(Model model, State state, int token, int position) { |
183 | 273 | final Qwen2Configuration config = (Qwen2Configuration) model.configuration(); |
184 | 274 | final Qwen2StandardWeights weights = (Qwen2StandardWeights) model.weights(); |
|
0 commit comments