1#[cfg(test)]
4mod tests;
5pub mod zve64x_muldiv_helpers;
6
7use crate::v::vector_registers::VectorRegistersExt;
8use crate::v::zve64x::zve64x_helpers;
9use crate::{
10 ExecutableInstruction, ExecutionError, InterpreterState, ProgramCounter, VirtualMemory,
11};
12use ab_riscv_macros::instruction_execution;
13use ab_riscv_primitives::instructions::v::zve64x::muldiv::Zve64xMulDivInstruction;
14use ab_riscv_primitives::registers::general_purpose::{RegType, Register};
15use core::fmt;
16use core::ops::ControlFlow;
17
18#[instruction_execution]
19impl<Reg, ExtState, Memory, PC, InstructionHandler, CustomError>
20 ExecutableInstruction<
21 InterpreterState<Reg, ExtState, Memory, PC, InstructionHandler, CustomError>,
22 CustomError,
23 > for Zve64xMulDivInstruction<Reg>
24where
25 Reg: Register,
26 [(); Reg::N]:,
27 ExtState: VectorRegistersExt<Reg, CustomError>,
28 [(); ExtState::ELEN as usize]:,
29 [(); ExtState::VLEN as usize]:,
30 [(); ExtState::VLENB as usize]:,
31 Memory: VirtualMemory,
32 PC: ProgramCounter<Reg::Type, Memory, CustomError>,
33 CustomError: fmt::Debug,
34{
35 #[inline(always)]
36 fn execute(
37 self,
38 state: &mut InterpreterState<Reg, ExtState, Memory, PC, InstructionHandler, CustomError>,
39 ) -> Result<ControlFlow<()>, ExecutionError<Reg::Type, CustomError>> {
40 match self {
41 Self::VmulVv { vd, vs2, vs1, vm } => {
43 if !state.ext_state.vector_instructions_allowed() {
44 Err(ExecutionError::IllegalInstruction {
45 address: state
46 .instruction_fetcher
47 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
48 })?;
49 }
50 let vtype = state
51 .ext_state
52 .vtype()
53 .ok_or(ExecutionError::IllegalInstruction {
54 address: state
55 .instruction_fetcher
56 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
57 })?;
58 let group_regs = vtype.vlmul().register_count();
59 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
60 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
61 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
62 if !vm && vd.bits() == 0 {
63 Err(ExecutionError::IllegalInstruction {
64 address: state
65 .instruction_fetcher
66 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
67 })?;
68 }
69 let sew = vtype.vsew();
70 let vl = state.ext_state.vl();
71 let vstart = u32::from(state.ext_state.vstart());
72 unsafe {
74 zve64x_muldiv_helpers::execute_arith_op(
75 state,
76 vd,
77 vs2,
78 zve64x_muldiv_helpers::OpSrc::Vreg(vs1.bits()),
79 vm,
80 vl,
81 vstart,
82 sew,
83 |a, b, _| a.wrapping_mul(b),
84 );
85 }
86 }
87 Self::VmulVx { vd, vs2, rs1, vm } => {
88 if !state.ext_state.vector_instructions_allowed() {
89 Err(ExecutionError::IllegalInstruction {
90 address: state
91 .instruction_fetcher
92 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
93 })?;
94 }
95 let vtype = state
96 .ext_state
97 .vtype()
98 .ok_or(ExecutionError::IllegalInstruction {
99 address: state
100 .instruction_fetcher
101 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
102 })?;
103 let group_regs = vtype.vlmul().register_count();
104 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
105 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
106 if !vm && vd.bits() == 0 {
107 Err(ExecutionError::IllegalInstruction {
108 address: state
109 .instruction_fetcher
110 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
111 })?;
112 }
113 let sew = vtype.vsew();
114 let vl = state.ext_state.vl();
115 let vstart = u32::from(state.ext_state.vstart());
116 let scalar = state.regs.read(rs1).as_u64();
117 unsafe {
119 zve64x_muldiv_helpers::execute_arith_op(
120 state,
121 vd,
122 vs2,
123 zve64x_muldiv_helpers::OpSrc::Scalar(scalar),
124 vm,
125 vl,
126 vstart,
127 sew,
128 |a, b, _| a.wrapping_mul(b),
129 );
130 }
131 }
132 Self::VmulhVv { vd, vs2, vs1, vm } => {
134 if !state.ext_state.vector_instructions_allowed() {
135 Err(ExecutionError::IllegalInstruction {
136 address: state
137 .instruction_fetcher
138 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
139 })?;
140 }
141 let vtype = state
142 .ext_state
143 .vtype()
144 .ok_or(ExecutionError::IllegalInstruction {
145 address: state
146 .instruction_fetcher
147 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
148 })?;
149 if u32::from(vtype.vsew().bits()) == u64::BITS {
151 Err(ExecutionError::IllegalInstruction {
152 address: state
153 .instruction_fetcher
154 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
155 })?;
156 }
157 let group_regs = vtype.vlmul().register_count();
158 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
159 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
160 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
161 if !vm && vd.bits() == 0 {
162 Err(ExecutionError::IllegalInstruction {
163 address: state
164 .instruction_fetcher
165 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
166 })?;
167 }
168 let sew = vtype.vsew();
169 let vl = state.ext_state.vl();
170 let vstart = u32::from(state.ext_state.vstart());
171 unsafe {
173 zve64x_muldiv_helpers::execute_arith_op(
174 state,
175 vd,
176 vs2,
177 zve64x_muldiv_helpers::OpSrc::Vreg(vs1.bits()),
178 vm,
179 vl,
180 vstart,
181 sew,
182 zve64x_muldiv_helpers::mulh_ss,
183 );
184 }
185 }
186 Self::VmulhVx { vd, vs2, rs1, vm } => {
187 if !state.ext_state.vector_instructions_allowed() {
188 Err(ExecutionError::IllegalInstruction {
189 address: state
190 .instruction_fetcher
191 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
192 })?;
193 }
194 let vtype = state
195 .ext_state
196 .vtype()
197 .ok_or(ExecutionError::IllegalInstruction {
198 address: state
199 .instruction_fetcher
200 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
201 })?;
202 if u32::from(vtype.vsew().bits()) == u64::BITS {
203 Err(ExecutionError::IllegalInstruction {
204 address: state
205 .instruction_fetcher
206 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
207 })?;
208 }
209 let group_regs = vtype.vlmul().register_count();
210 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
211 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
212 if !vm && vd.bits() == 0 {
213 Err(ExecutionError::IllegalInstruction {
214 address: state
215 .instruction_fetcher
216 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
217 })?;
218 }
219 let sew = vtype.vsew();
220 let vl = state.ext_state.vl();
221 let vstart = u32::from(state.ext_state.vstart());
222 let scalar = state.regs.read(rs1).as_u64();
223 unsafe {
225 zve64x_muldiv_helpers::execute_arith_op(
226 state,
227 vd,
228 vs2,
229 zve64x_muldiv_helpers::OpSrc::Scalar(scalar),
230 vm,
231 vl,
232 vstart,
233 sew,
234 zve64x_muldiv_helpers::mulh_ss,
235 );
236 }
237 }
238 Self::VmulhuVv { vd, vs2, vs1, vm } => {
240 if !state.ext_state.vector_instructions_allowed() {
241 Err(ExecutionError::IllegalInstruction {
242 address: state
243 .instruction_fetcher
244 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
245 })?;
246 }
247 let vtype = state
248 .ext_state
249 .vtype()
250 .ok_or(ExecutionError::IllegalInstruction {
251 address: state
252 .instruction_fetcher
253 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
254 })?;
255 if u32::from(vtype.vsew().bits()) == u64::BITS {
256 Err(ExecutionError::IllegalInstruction {
257 address: state
258 .instruction_fetcher
259 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
260 })?;
261 }
262 let group_regs = vtype.vlmul().register_count();
263 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
264 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
265 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
266 if !vm && vd.bits() == 0 {
267 Err(ExecutionError::IllegalInstruction {
268 address: state
269 .instruction_fetcher
270 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
271 })?;
272 }
273 let sew = vtype.vsew();
274 let vl = state.ext_state.vl();
275 let vstart = u32::from(state.ext_state.vstart());
276 unsafe {
278 zve64x_muldiv_helpers::execute_arith_op(
279 state,
280 vd,
281 vs2,
282 zve64x_muldiv_helpers::OpSrc::Vreg(vs1.bits()),
283 vm,
284 vl,
285 vstart,
286 sew,
287 zve64x_muldiv_helpers::mulhu_uu,
288 );
289 }
290 }
291 Self::VmulhuVx { vd, vs2, rs1, vm } => {
292 if !state.ext_state.vector_instructions_allowed() {
293 Err(ExecutionError::IllegalInstruction {
294 address: state
295 .instruction_fetcher
296 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
297 })?;
298 }
299 let vtype = state
300 .ext_state
301 .vtype()
302 .ok_or(ExecutionError::IllegalInstruction {
303 address: state
304 .instruction_fetcher
305 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
306 })?;
307 if u32::from(vtype.vsew().bits()) == u64::BITS {
308 Err(ExecutionError::IllegalInstruction {
309 address: state
310 .instruction_fetcher
311 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
312 })?;
313 }
314 let group_regs = vtype.vlmul().register_count();
315 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
316 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
317 if !vm && vd.bits() == 0 {
318 Err(ExecutionError::IllegalInstruction {
319 address: state
320 .instruction_fetcher
321 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
322 })?;
323 }
324 let sew = vtype.vsew();
325 let vl = state.ext_state.vl();
326 let vstart = u32::from(state.ext_state.vstart());
327 let scalar = state.regs.read(rs1).as_u64();
328 unsafe {
330 zve64x_muldiv_helpers::execute_arith_op(
331 state,
332 vd,
333 vs2,
334 zve64x_muldiv_helpers::OpSrc::Scalar(scalar),
335 vm,
336 vl,
337 vstart,
338 sew,
339 zve64x_muldiv_helpers::mulhu_uu,
340 );
341 }
342 }
343 Self::VmulhsuVv { vd, vs2, vs1, vm } => {
345 if !state.ext_state.vector_instructions_allowed() {
346 Err(ExecutionError::IllegalInstruction {
347 address: state
348 .instruction_fetcher
349 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
350 })?;
351 }
352 let vtype = state
353 .ext_state
354 .vtype()
355 .ok_or(ExecutionError::IllegalInstruction {
356 address: state
357 .instruction_fetcher
358 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
359 })?;
360 if u32::from(vtype.vsew().bits()) == u64::BITS {
361 Err(ExecutionError::IllegalInstruction {
362 address: state
363 .instruction_fetcher
364 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
365 })?;
366 }
367 let group_regs = vtype.vlmul().register_count();
368 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
369 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
370 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
371 if !vm && vd.bits() == 0 {
372 Err(ExecutionError::IllegalInstruction {
373 address: state
374 .instruction_fetcher
375 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
376 })?;
377 }
378 let sew = vtype.vsew();
379 let vl = state.ext_state.vl();
380 let vstart = u32::from(state.ext_state.vstart());
381 unsafe {
383 zve64x_muldiv_helpers::execute_arith_op(
384 state,
385 vd,
386 vs2,
387 zve64x_muldiv_helpers::OpSrc::Vreg(vs1.bits()),
388 vm,
389 vl,
390 vstart,
391 sew,
392 zve64x_muldiv_helpers::mulhsu_su,
394 );
395 }
396 }
397 Self::VmulhsuVx { vd, vs2, rs1, vm } => {
398 if !state.ext_state.vector_instructions_allowed() {
399 Err(ExecutionError::IllegalInstruction {
400 address: state
401 .instruction_fetcher
402 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
403 })?;
404 }
405 let vtype = state
406 .ext_state
407 .vtype()
408 .ok_or(ExecutionError::IllegalInstruction {
409 address: state
410 .instruction_fetcher
411 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
412 })?;
413 if u32::from(vtype.vsew().bits()) == u64::BITS {
414 Err(ExecutionError::IllegalInstruction {
415 address: state
416 .instruction_fetcher
417 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
418 })?;
419 }
420 let group_regs = vtype.vlmul().register_count();
421 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
422 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
423 if !vm && vd.bits() == 0 {
424 Err(ExecutionError::IllegalInstruction {
425 address: state
426 .instruction_fetcher
427 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
428 })?;
429 }
430 let sew = vtype.vsew();
431 let vl = state.ext_state.vl();
432 let vstart = u32::from(state.ext_state.vstart());
433 let scalar = state.regs.read(rs1).as_u64();
435 unsafe {
437 zve64x_muldiv_helpers::execute_arith_op(
438 state,
439 vd,
440 vs2,
441 zve64x_muldiv_helpers::OpSrc::Scalar(scalar),
442 vm,
443 vl,
444 vstart,
445 sew,
446 zve64x_muldiv_helpers::mulhsu_su,
448 );
449 }
450 }
451 Self::VdivuVv { vd, vs2, vs1, vm } => {
453 if !state.ext_state.vector_instructions_allowed() {
454 Err(ExecutionError::IllegalInstruction {
455 address: state
456 .instruction_fetcher
457 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
458 })?;
459 }
460 let vtype = state
461 .ext_state
462 .vtype()
463 .ok_or(ExecutionError::IllegalInstruction {
464 address: state
465 .instruction_fetcher
466 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
467 })?;
468 let group_regs = vtype.vlmul().register_count();
469 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
470 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
471 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
472 if !vm && vd.bits() == 0 {
473 Err(ExecutionError::IllegalInstruction {
474 address: state
475 .instruction_fetcher
476 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
477 })?;
478 }
479 let sew = vtype.vsew();
480 let vl = state.ext_state.vl();
481 let vstart = u32::from(state.ext_state.vstart());
482 unsafe {
484 zve64x_muldiv_helpers::execute_arith_op(
485 state,
486 vd,
487 vs2,
488 zve64x_muldiv_helpers::OpSrc::Vreg(vs1.bits()),
489 vm,
490 vl,
491 vstart,
492 sew,
493 |a, b, sew| {
495 let mask = zve64x_muldiv_helpers::sew_mask(sew);
496 let dividend = a & mask;
497 let divisor = b & mask;
498 dividend.checked_div(divisor).unwrap_or(mask)
499 },
500 );
501 }
502 }
503 Self::VdivuVx { vd, vs2, rs1, vm } => {
504 if !state.ext_state.vector_instructions_allowed() {
505 Err(ExecutionError::IllegalInstruction {
506 address: state
507 .instruction_fetcher
508 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
509 })?;
510 }
511 let vtype = state
512 .ext_state
513 .vtype()
514 .ok_or(ExecutionError::IllegalInstruction {
515 address: state
516 .instruction_fetcher
517 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
518 })?;
519 let group_regs = vtype.vlmul().register_count();
520 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
521 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
522 if !vm && vd.bits() == 0 {
523 Err(ExecutionError::IllegalInstruction {
524 address: state
525 .instruction_fetcher
526 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
527 })?;
528 }
529 let sew = vtype.vsew();
530 let vl = state.ext_state.vl();
531 let vstart = u32::from(state.ext_state.vstart());
532 let scalar = state.regs.read(rs1).as_u64();
533 unsafe {
535 zve64x_muldiv_helpers::execute_arith_op(
536 state,
537 vd,
538 vs2,
539 zve64x_muldiv_helpers::OpSrc::Scalar(scalar),
540 vm,
541 vl,
542 vstart,
543 sew,
544 |a, b, sew| {
545 let mask = zve64x_muldiv_helpers::sew_mask(sew);
546 let dividend = a & mask;
547 let divisor = b & mask;
548 dividend.checked_div(divisor).unwrap_or(mask)
549 },
550 );
551 }
552 }
553 Self::VdivVv { vd, vs2, vs1, vm } => {
555 if !state.ext_state.vector_instructions_allowed() {
556 Err(ExecutionError::IllegalInstruction {
557 address: state
558 .instruction_fetcher
559 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
560 })?;
561 }
562 let vtype = state
563 .ext_state
564 .vtype()
565 .ok_or(ExecutionError::IllegalInstruction {
566 address: state
567 .instruction_fetcher
568 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
569 })?;
570 let group_regs = vtype.vlmul().register_count();
571 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
572 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
573 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
574 if !vm && vd.bits() == 0 {
575 Err(ExecutionError::IllegalInstruction {
576 address: state
577 .instruction_fetcher
578 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
579 })?;
580 }
581 let sew = vtype.vsew();
582 let vl = state.ext_state.vl();
583 let vstart = u32::from(state.ext_state.vstart());
584 unsafe {
586 zve64x_muldiv_helpers::execute_arith_op(
587 state,
588 vd,
589 vs2,
590 zve64x_muldiv_helpers::OpSrc::Vreg(vs1.bits()),
591 vm,
592 vl,
593 vstart,
594 sew,
595 zve64x_muldiv_helpers::sdiv,
596 );
597 }
598 }
599 Self::VdivVx { vd, vs2, rs1, vm } => {
600 if !state.ext_state.vector_instructions_allowed() {
601 Err(ExecutionError::IllegalInstruction {
602 address: state
603 .instruction_fetcher
604 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
605 })?;
606 }
607 let vtype = state
608 .ext_state
609 .vtype()
610 .ok_or(ExecutionError::IllegalInstruction {
611 address: state
612 .instruction_fetcher
613 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
614 })?;
615 let group_regs = vtype.vlmul().register_count();
616 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
617 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
618 if !vm && vd.bits() == 0 {
619 Err(ExecutionError::IllegalInstruction {
620 address: state
621 .instruction_fetcher
622 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
623 })?;
624 }
625 let sew = vtype.vsew();
626 let vl = state.ext_state.vl();
627 let vstart = u32::from(state.ext_state.vstart());
628 let scalar = state.regs.read(rs1).as_u64();
629 unsafe {
631 zve64x_muldiv_helpers::execute_arith_op(
632 state,
633 vd,
634 vs2,
635 zve64x_muldiv_helpers::OpSrc::Scalar(scalar),
636 vm,
637 vl,
638 vstart,
639 sew,
640 zve64x_muldiv_helpers::sdiv,
641 );
642 }
643 }
644 Self::VremuVv { vd, vs2, vs1, vm } => {
646 if !state.ext_state.vector_instructions_allowed() {
647 Err(ExecutionError::IllegalInstruction {
648 address: state
649 .instruction_fetcher
650 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
651 })?;
652 }
653 let vtype = state
654 .ext_state
655 .vtype()
656 .ok_or(ExecutionError::IllegalInstruction {
657 address: state
658 .instruction_fetcher
659 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
660 })?;
661 let group_regs = vtype.vlmul().register_count();
662 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
663 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
664 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
665 if !vm && vd.bits() == 0 {
666 Err(ExecutionError::IllegalInstruction {
667 address: state
668 .instruction_fetcher
669 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
670 })?;
671 }
672 let sew = vtype.vsew();
673 let vl = state.ext_state.vl();
674 let vstart = u32::from(state.ext_state.vstart());
675 unsafe {
677 zve64x_muldiv_helpers::execute_arith_op(
678 state,
679 vd,
680 vs2,
681 zve64x_muldiv_helpers::OpSrc::Vreg(vs1.bits()),
682 vm,
683 vl,
684 vstart,
685 sew,
686 |a, b, sew| {
688 let mask = zve64x_muldiv_helpers::sew_mask(sew);
689 let dividend = a & mask;
690 let divisor = b & mask;
691 if divisor == 0 {
692 dividend
693 } else {
694 dividend % divisor
695 }
696 },
697 );
698 }
699 }
700 Self::VremuVx { vd, vs2, rs1, vm } => {
701 if !state.ext_state.vector_instructions_allowed() {
702 Err(ExecutionError::IllegalInstruction {
703 address: state
704 .instruction_fetcher
705 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
706 })?;
707 }
708 let vtype = state
709 .ext_state
710 .vtype()
711 .ok_or(ExecutionError::IllegalInstruction {
712 address: state
713 .instruction_fetcher
714 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
715 })?;
716 let group_regs = vtype.vlmul().register_count();
717 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
718 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
719 if !vm && vd.bits() == 0 {
720 Err(ExecutionError::IllegalInstruction {
721 address: state
722 .instruction_fetcher
723 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
724 })?;
725 }
726 let sew = vtype.vsew();
727 let vl = state.ext_state.vl();
728 let vstart = u32::from(state.ext_state.vstart());
729 let scalar = state.regs.read(rs1).as_u64();
730 unsafe {
732 zve64x_muldiv_helpers::execute_arith_op(
733 state,
734 vd,
735 vs2,
736 zve64x_muldiv_helpers::OpSrc::Scalar(scalar),
737 vm,
738 vl,
739 vstart,
740 sew,
741 |a, b, sew| {
742 let mask = zve64x_muldiv_helpers::sew_mask(sew);
743 let dividend = a & mask;
744 let divisor = b & mask;
745 if divisor == 0 {
746 dividend
747 } else {
748 dividend % divisor
749 }
750 },
751 );
752 }
753 }
754 Self::VremVv { vd, vs2, vs1, vm } => {
756 if !state.ext_state.vector_instructions_allowed() {
757 Err(ExecutionError::IllegalInstruction {
758 address: state
759 .instruction_fetcher
760 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
761 })?;
762 }
763 let vtype = state
764 .ext_state
765 .vtype()
766 .ok_or(ExecutionError::IllegalInstruction {
767 address: state
768 .instruction_fetcher
769 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
770 })?;
771 let group_regs = vtype.vlmul().register_count();
772 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
773 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
774 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
775 if !vm && vd.bits() == 0 {
776 Err(ExecutionError::IllegalInstruction {
777 address: state
778 .instruction_fetcher
779 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
780 })?;
781 }
782 let sew = vtype.vsew();
783 let vl = state.ext_state.vl();
784 let vstart = u32::from(state.ext_state.vstart());
785 unsafe {
787 zve64x_muldiv_helpers::execute_arith_op(
788 state,
789 vd,
790 vs2,
791 zve64x_muldiv_helpers::OpSrc::Vreg(vs1.bits()),
792 vm,
793 vl,
794 vstart,
795 sew,
796 zve64x_muldiv_helpers::srem,
797 );
798 }
799 }
800 Self::VremVx { vd, vs2, rs1, vm } => {
801 if !state.ext_state.vector_instructions_allowed() {
802 Err(ExecutionError::IllegalInstruction {
803 address: state
804 .instruction_fetcher
805 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
806 })?;
807 }
808 let vtype = state
809 .ext_state
810 .vtype()
811 .ok_or(ExecutionError::IllegalInstruction {
812 address: state
813 .instruction_fetcher
814 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
815 })?;
816 let group_regs = vtype.vlmul().register_count();
817 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
818 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
819 if !vm && vd.bits() == 0 {
820 Err(ExecutionError::IllegalInstruction {
821 address: state
822 .instruction_fetcher
823 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
824 })?;
825 }
826 let sew = vtype.vsew();
827 let vl = state.ext_state.vl();
828 let vstart = u32::from(state.ext_state.vstart());
829 let scalar = state.regs.read(rs1).as_u64();
830 unsafe {
832 zve64x_muldiv_helpers::execute_arith_op(
833 state,
834 vd,
835 vs2,
836 zve64x_muldiv_helpers::OpSrc::Scalar(scalar),
837 vm,
838 vl,
839 vstart,
840 sew,
841 zve64x_muldiv_helpers::srem,
842 );
843 }
844 }
845 Self::VwmuluVv { vd, vs2, vs1, vm } => {
847 if !state.ext_state.vector_instructions_allowed() {
848 Err(ExecutionError::IllegalInstruction {
849 address: state
850 .instruction_fetcher
851 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
852 })?;
853 }
854 let vtype = state
855 .ext_state
856 .vtype()
857 .ok_or(ExecutionError::IllegalInstruction {
858 address: state
859 .instruction_fetcher
860 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
861 })?;
862 if u32::from(vtype.vsew().bits()) == u64::BITS {
864 Err(ExecutionError::IllegalInstruction {
865 address: state
866 .instruction_fetcher
867 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
868 })?;
869 }
870 let group_regs = vtype.vlmul().register_count();
871 let dest_group_regs = zve64x_muldiv_helpers::widening_dest_register_count(
873 vtype.vlmul(),
874 )
875 .ok_or(ExecutionError::IllegalInstruction {
876 address: state
877 .instruction_fetcher
878 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
879 })?;
880 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, dest_group_regs)?;
881 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
882 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
883 if !vm && vd.bits() == 0 {
884 Err(ExecutionError::IllegalInstruction {
885 address: state
886 .instruction_fetcher
887 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
888 })?;
889 }
890 zve64x_muldiv_helpers::check_no_widening_overlap(
892 state,
893 vd,
894 vs2,
895 dest_group_regs,
896 group_regs,
897 )?;
898 zve64x_muldiv_helpers::check_no_widening_overlap(
899 state,
900 vd,
901 vs1,
902 dest_group_regs,
903 group_regs,
904 )?;
905 let sew = vtype.vsew();
906 let vl = state.ext_state.vl();
907 let vstart = u32::from(state.ext_state.vstart());
908 unsafe {
910 zve64x_muldiv_helpers::execute_widening_op(
911 state,
912 vd,
913 vs2,
914 zve64x_muldiv_helpers::OpSrc::Vreg(vs1.bits()),
915 vm,
916 vl,
917 vstart,
918 sew,
919 |a, b, sew| {
920 let mask = zve64x_muldiv_helpers::sew_mask(sew);
921 (a & mask).wrapping_mul(b & mask)
922 },
923 );
924 }
925 }
926 Self::VwmuluVx { vd, vs2, rs1, vm } => {
927 if !state.ext_state.vector_instructions_allowed() {
928 Err(ExecutionError::IllegalInstruction {
929 address: state
930 .instruction_fetcher
931 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
932 })?;
933 }
934 let vtype = state
935 .ext_state
936 .vtype()
937 .ok_or(ExecutionError::IllegalInstruction {
938 address: state
939 .instruction_fetcher
940 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
941 })?;
942 if u32::from(vtype.vsew().bits()) == u64::BITS {
943 Err(ExecutionError::IllegalInstruction {
944 address: state
945 .instruction_fetcher
946 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
947 })?;
948 }
949 let group_regs = vtype.vlmul().register_count();
950 let dest_group_regs = zve64x_muldiv_helpers::widening_dest_register_count(
951 vtype.vlmul(),
952 )
953 .ok_or(ExecutionError::IllegalInstruction {
954 address: state
955 .instruction_fetcher
956 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
957 })?;
958 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, dest_group_regs)?;
959 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
960 if !vm && vd.bits() == 0 {
961 Err(ExecutionError::IllegalInstruction {
962 address: state
963 .instruction_fetcher
964 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
965 })?;
966 }
967 zve64x_muldiv_helpers::check_no_widening_overlap(
968 state,
969 vd,
970 vs2,
971 dest_group_regs,
972 group_regs,
973 )?;
974 let sew = vtype.vsew();
975 let vl = state.ext_state.vl();
976 let vstart = u32::from(state.ext_state.vstart());
977 let scalar = state.regs.read(rs1).as_u64();
978 unsafe {
980 zve64x_muldiv_helpers::execute_widening_op(
981 state,
982 vd,
983 vs2,
984 zve64x_muldiv_helpers::OpSrc::Scalar(scalar),
985 vm,
986 vl,
987 vstart,
988 sew,
989 |a, b, sew| {
990 let mask = zve64x_muldiv_helpers::sew_mask(sew);
991 (a & mask).wrapping_mul(b & mask)
992 },
993 );
994 }
995 }
996 Self::VwmulsuVv { vd, vs2, vs1, vm } => {
998 if !state.ext_state.vector_instructions_allowed() {
999 Err(ExecutionError::IllegalInstruction {
1000 address: state
1001 .instruction_fetcher
1002 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1003 })?;
1004 }
1005 let vtype = state
1006 .ext_state
1007 .vtype()
1008 .ok_or(ExecutionError::IllegalInstruction {
1009 address: state
1010 .instruction_fetcher
1011 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1012 })?;
1013 if u32::from(vtype.vsew().bits()) == u64::BITS {
1014 Err(ExecutionError::IllegalInstruction {
1015 address: state
1016 .instruction_fetcher
1017 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1018 })?;
1019 }
1020 let group_regs = vtype.vlmul().register_count();
1021 let dest_group_regs = zve64x_muldiv_helpers::widening_dest_register_count(
1022 vtype.vlmul(),
1023 )
1024 .ok_or(ExecutionError::IllegalInstruction {
1025 address: state
1026 .instruction_fetcher
1027 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1028 })?;
1029 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, dest_group_regs)?;
1030 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1031 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1032 if !vm && vd.bits() == 0 {
1033 Err(ExecutionError::IllegalInstruction {
1034 address: state
1035 .instruction_fetcher
1036 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1037 })?;
1038 }
1039 zve64x_muldiv_helpers::check_no_widening_overlap(
1040 state,
1041 vd,
1042 vs2,
1043 dest_group_regs,
1044 group_regs,
1045 )?;
1046 zve64x_muldiv_helpers::check_no_widening_overlap(
1047 state,
1048 vd,
1049 vs1,
1050 dest_group_regs,
1051 group_regs,
1052 )?;
1053 let sew = vtype.vsew();
1054 let vl = state.ext_state.vl();
1055 let vstart = u32::from(state.ext_state.vstart());
1056 unsafe {
1058 zve64x_muldiv_helpers::execute_widening_op(
1059 state,
1060 vd,
1061 vs2,
1062 zve64x_muldiv_helpers::OpSrc::Vreg(vs1.bits()),
1063 vm,
1064 vl,
1065 vstart,
1066 sew,
1067 |a, b, sew| {
1069 let sa = zve64x_muldiv_helpers::sign_extend(a, sew);
1070 let ub = b & zve64x_muldiv_helpers::sew_mask(sew);
1071 sa.cast_unsigned().wrapping_mul(ub)
1072 },
1073 );
1074 }
1075 }
1076 Self::VwmulsuVx { vd, vs2, rs1, vm } => {
1077 if !state.ext_state.vector_instructions_allowed() {
1078 Err(ExecutionError::IllegalInstruction {
1079 address: state
1080 .instruction_fetcher
1081 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1082 })?;
1083 }
1084 let vtype = state
1085 .ext_state
1086 .vtype()
1087 .ok_or(ExecutionError::IllegalInstruction {
1088 address: state
1089 .instruction_fetcher
1090 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1091 })?;
1092 if u32::from(vtype.vsew().bits()) == u64::BITS {
1093 Err(ExecutionError::IllegalInstruction {
1094 address: state
1095 .instruction_fetcher
1096 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1097 })?;
1098 }
1099 let group_regs = vtype.vlmul().register_count();
1100 let dest_group_regs = zve64x_muldiv_helpers::widening_dest_register_count(
1101 vtype.vlmul(),
1102 )
1103 .ok_or(ExecutionError::IllegalInstruction {
1104 address: state
1105 .instruction_fetcher
1106 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1107 })?;
1108 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, dest_group_regs)?;
1109 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1110 if !vm && vd.bits() == 0 {
1111 Err(ExecutionError::IllegalInstruction {
1112 address: state
1113 .instruction_fetcher
1114 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1115 })?;
1116 }
1117 zve64x_muldiv_helpers::check_no_widening_overlap(
1118 state,
1119 vd,
1120 vs2,
1121 dest_group_regs,
1122 group_regs,
1123 )?;
1124 let sew = vtype.vsew();
1125 let vl = state.ext_state.vl();
1126 let vstart = u32::from(state.ext_state.vstart());
1127 let scalar = state.regs.read(rs1).as_u64();
1129 unsafe {
1131 zve64x_muldiv_helpers::execute_widening_op(
1132 state,
1133 vd,
1134 vs2,
1135 zve64x_muldiv_helpers::OpSrc::Scalar(scalar),
1136 vm,
1137 vl,
1138 vstart,
1139 sew,
1140 |a, b, sew| {
1141 let sa = zve64x_muldiv_helpers::sign_extend(a, sew);
1142 let ub = b & zve64x_muldiv_helpers::sew_mask(sew);
1143 sa.cast_unsigned().wrapping_mul(ub)
1144 },
1145 );
1146 }
1147 }
1148 Self::VwmulVv { vd, vs2, vs1, vm } => {
1150 if !state.ext_state.vector_instructions_allowed() {
1151 Err(ExecutionError::IllegalInstruction {
1152 address: state
1153 .instruction_fetcher
1154 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1155 })?;
1156 }
1157 let vtype = state
1158 .ext_state
1159 .vtype()
1160 .ok_or(ExecutionError::IllegalInstruction {
1161 address: state
1162 .instruction_fetcher
1163 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1164 })?;
1165 if u32::from(vtype.vsew().bits()) == u64::BITS {
1166 Err(ExecutionError::IllegalInstruction {
1167 address: state
1168 .instruction_fetcher
1169 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1170 })?;
1171 }
1172 let group_regs = vtype.vlmul().register_count();
1173 let dest_group_regs = zve64x_muldiv_helpers::widening_dest_register_count(
1174 vtype.vlmul(),
1175 )
1176 .ok_or(ExecutionError::IllegalInstruction {
1177 address: state
1178 .instruction_fetcher
1179 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1180 })?;
1181 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, dest_group_regs)?;
1182 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1183 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1184 if !vm && vd.bits() == 0 {
1185 Err(ExecutionError::IllegalInstruction {
1186 address: state
1187 .instruction_fetcher
1188 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1189 })?;
1190 }
1191 zve64x_muldiv_helpers::check_no_widening_overlap(
1192 state,
1193 vd,
1194 vs2,
1195 dest_group_regs,
1196 group_regs,
1197 )?;
1198 zve64x_muldiv_helpers::check_no_widening_overlap(
1199 state,
1200 vd,
1201 vs1,
1202 dest_group_regs,
1203 group_regs,
1204 )?;
1205 let sew = vtype.vsew();
1206 let vl = state.ext_state.vl();
1207 let vstart = u32::from(state.ext_state.vstart());
1208 unsafe {
1210 zve64x_muldiv_helpers::execute_widening_op(
1211 state,
1212 vd,
1213 vs2,
1214 zve64x_muldiv_helpers::OpSrc::Vreg(vs1.bits()),
1215 vm,
1216 vl,
1217 vstart,
1218 sew,
1219 |a, b, sew| {
1221 let sa = zve64x_muldiv_helpers::sign_extend(a, sew);
1222 let sb = zve64x_muldiv_helpers::sign_extend(b, sew);
1223 sa.cast_unsigned().wrapping_mul(sb.cast_unsigned())
1224 },
1225 );
1226 }
1227 }
1228 Self::VwmulVx { vd, vs2, rs1, vm } => {
1229 if !state.ext_state.vector_instructions_allowed() {
1230 Err(ExecutionError::IllegalInstruction {
1231 address: state
1232 .instruction_fetcher
1233 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1234 })?;
1235 }
1236 let vtype = state
1237 .ext_state
1238 .vtype()
1239 .ok_or(ExecutionError::IllegalInstruction {
1240 address: state
1241 .instruction_fetcher
1242 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1243 })?;
1244 if u32::from(vtype.vsew().bits()) == u64::BITS {
1245 Err(ExecutionError::IllegalInstruction {
1246 address: state
1247 .instruction_fetcher
1248 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1249 })?;
1250 }
1251 let group_regs = vtype.vlmul().register_count();
1252 let dest_group_regs = zve64x_muldiv_helpers::widening_dest_register_count(
1253 vtype.vlmul(),
1254 )
1255 .ok_or(ExecutionError::IllegalInstruction {
1256 address: state
1257 .instruction_fetcher
1258 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1259 })?;
1260 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, dest_group_regs)?;
1261 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1262 if !vm && vd.bits() == 0 {
1263 Err(ExecutionError::IllegalInstruction {
1264 address: state
1265 .instruction_fetcher
1266 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1267 })?;
1268 }
1269 zve64x_muldiv_helpers::check_no_widening_overlap(
1270 state,
1271 vd,
1272 vs2,
1273 dest_group_regs,
1274 group_regs,
1275 )?;
1276 let sew = vtype.vsew();
1277 let vl = state.ext_state.vl();
1278 let vstart = u32::from(state.ext_state.vstart());
1279 let scalar = state.regs.read(rs1).as_u64();
1281 unsafe {
1283 zve64x_muldiv_helpers::execute_widening_op(
1284 state,
1285 vd,
1286 vs2,
1287 zve64x_muldiv_helpers::OpSrc::Scalar(scalar),
1288 vm,
1289 vl,
1290 vstart,
1291 sew,
1292 |a, b, sew| {
1293 let sa = zve64x_muldiv_helpers::sign_extend(a, sew);
1294 let sb = zve64x_muldiv_helpers::sign_extend(b, sew);
1295 sa.cast_unsigned().wrapping_mul(sb.cast_unsigned())
1296 },
1297 );
1298 }
1299 }
1300 Self::VmaccVv { vd, vs1, vs2, vm } => {
1302 if !state.ext_state.vector_instructions_allowed() {
1303 Err(ExecutionError::IllegalInstruction {
1304 address: state
1305 .instruction_fetcher
1306 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1307 })?;
1308 }
1309 let vtype = state
1310 .ext_state
1311 .vtype()
1312 .ok_or(ExecutionError::IllegalInstruction {
1313 address: state
1314 .instruction_fetcher
1315 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1316 })?;
1317 let group_regs = vtype.vlmul().register_count();
1318 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1319 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1320 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1321 if !vm && vd.bits() == 0 {
1322 Err(ExecutionError::IllegalInstruction {
1323 address: state
1324 .instruction_fetcher
1325 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1326 })?;
1327 }
1328 let sew = vtype.vsew();
1329 let vl = state.ext_state.vl();
1330 let vstart = u32::from(state.ext_state.vstart());
1331 unsafe {
1333 zve64x_muldiv_helpers::execute_muladd_op(
1334 state,
1335 vd,
1336 vs1.bits(),
1337 zve64x_muldiv_helpers::OpSrc::Vreg(vs2.bits()),
1338 vm,
1339 vl,
1340 vstart,
1341 sew,
1342 |acc, a, b, _| acc.wrapping_add(a.wrapping_mul(b)),
1344 );
1345 }
1346 }
1347 Self::VmaccVx { vd, rs1, vs2, vm } => {
1348 if !state.ext_state.vector_instructions_allowed() {
1349 Err(ExecutionError::IllegalInstruction {
1350 address: state
1351 .instruction_fetcher
1352 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1353 })?;
1354 }
1355 let vtype = state
1356 .ext_state
1357 .vtype()
1358 .ok_or(ExecutionError::IllegalInstruction {
1359 address: state
1360 .instruction_fetcher
1361 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1362 })?;
1363 let group_regs = vtype.vlmul().register_count();
1364 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1365 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1366 if !vm && vd.bits() == 0 {
1367 Err(ExecutionError::IllegalInstruction {
1368 address: state
1369 .instruction_fetcher
1370 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1371 })?;
1372 }
1373 let sew = vtype.vsew();
1374 let vl = state.ext_state.vl();
1375 let vstart = u32::from(state.ext_state.vstart());
1376 let scalar = state.regs.read(rs1).as_u64();
1377 unsafe {
1379 zve64x_muldiv_helpers::execute_muladd_scalar_op(
1380 state,
1381 vd,
1382 scalar,
1383 zve64x_muldiv_helpers::OpSrc::Vreg(vs2.bits()),
1384 vm,
1385 vl,
1386 vstart,
1387 sew,
1388 |acc, a, b, _| acc.wrapping_add(a.wrapping_mul(b)),
1389 );
1390 }
1391 }
1392 Self::VnmsacVv { vd, vs1, vs2, vm } => {
1394 if !state.ext_state.vector_instructions_allowed() {
1395 Err(ExecutionError::IllegalInstruction {
1396 address: state
1397 .instruction_fetcher
1398 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1399 })?;
1400 }
1401 let vtype = state
1402 .ext_state
1403 .vtype()
1404 .ok_or(ExecutionError::IllegalInstruction {
1405 address: state
1406 .instruction_fetcher
1407 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1408 })?;
1409 let group_regs = vtype.vlmul().register_count();
1410 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1411 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1412 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1413 if !vm && vd.bits() == 0 {
1414 Err(ExecutionError::IllegalInstruction {
1415 address: state
1416 .instruction_fetcher
1417 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1418 })?;
1419 }
1420 let sew = vtype.vsew();
1421 let vl = state.ext_state.vl();
1422 let vstart = u32::from(state.ext_state.vstart());
1423 unsafe {
1425 zve64x_muldiv_helpers::execute_muladd_op(
1426 state,
1427 vd,
1428 vs1.bits(),
1429 zve64x_muldiv_helpers::OpSrc::Vreg(vs2.bits()),
1430 vm,
1431 vl,
1432 vstart,
1433 sew,
1434 |acc, a, b, _| acc.wrapping_sub(a.wrapping_mul(b)),
1436 );
1437 }
1438 }
1439 Self::VnmsacVx { vd, rs1, vs2, vm } => {
1440 if !state.ext_state.vector_instructions_allowed() {
1441 Err(ExecutionError::IllegalInstruction {
1442 address: state
1443 .instruction_fetcher
1444 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1445 })?;
1446 }
1447 let vtype = state
1448 .ext_state
1449 .vtype()
1450 .ok_or(ExecutionError::IllegalInstruction {
1451 address: state
1452 .instruction_fetcher
1453 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1454 })?;
1455 let group_regs = vtype.vlmul().register_count();
1456 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1457 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1458 if !vm && vd.bits() == 0 {
1459 Err(ExecutionError::IllegalInstruction {
1460 address: state
1461 .instruction_fetcher
1462 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1463 })?;
1464 }
1465 let sew = vtype.vsew();
1466 let vl = state.ext_state.vl();
1467 let vstart = u32::from(state.ext_state.vstart());
1468 let scalar = state.regs.read(rs1).as_u64();
1469 unsafe {
1471 zve64x_muldiv_helpers::execute_muladd_scalar_op(
1472 state,
1473 vd,
1474 scalar,
1475 zve64x_muldiv_helpers::OpSrc::Vreg(vs2.bits()),
1476 vm,
1477 vl,
1478 vstart,
1479 sew,
1480 |acc, a, b, _| acc.wrapping_sub(a.wrapping_mul(b)),
1481 );
1482 }
1483 }
1484 Self::VmaddVv { vd, vs1, vs2, vm } => {
1486 if !state.ext_state.vector_instructions_allowed() {
1487 Err(ExecutionError::IllegalInstruction {
1488 address: state
1489 .instruction_fetcher
1490 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1491 })?;
1492 }
1493 let vtype = state
1494 .ext_state
1495 .vtype()
1496 .ok_or(ExecutionError::IllegalInstruction {
1497 address: state
1498 .instruction_fetcher
1499 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1500 })?;
1501 let group_regs = vtype.vlmul().register_count();
1502 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1503 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1504 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1505 if !vm && vd.bits() == 0 {
1506 Err(ExecutionError::IllegalInstruction {
1507 address: state
1508 .instruction_fetcher
1509 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1510 })?;
1511 }
1512 let sew = vtype.vsew();
1513 let vl = state.ext_state.vl();
1514 let vstart = u32::from(state.ext_state.vstart());
1515 unsafe {
1517 zve64x_muldiv_helpers::execute_muladd_op(
1518 state,
1519 vd,
1520 vs1.bits(),
1521 zve64x_muldiv_helpers::OpSrc::Vreg(vs2.bits()),
1522 vm,
1523 vl,
1524 vstart,
1525 sew,
1526 |acc, a, b, _| a.wrapping_mul(acc).wrapping_add(b),
1528 );
1529 }
1530 }
1531 Self::VmaddVx { vd, rs1, vs2, vm } => {
1532 if !state.ext_state.vector_instructions_allowed() {
1533 Err(ExecutionError::IllegalInstruction {
1534 address: state
1535 .instruction_fetcher
1536 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1537 })?;
1538 }
1539 let vtype = state
1540 .ext_state
1541 .vtype()
1542 .ok_or(ExecutionError::IllegalInstruction {
1543 address: state
1544 .instruction_fetcher
1545 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1546 })?;
1547 let group_regs = vtype.vlmul().register_count();
1548 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1549 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1550 if !vm && vd.bits() == 0 {
1551 Err(ExecutionError::IllegalInstruction {
1552 address: state
1553 .instruction_fetcher
1554 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1555 })?;
1556 }
1557 let sew = vtype.vsew();
1558 let vl = state.ext_state.vl();
1559 let vstart = u32::from(state.ext_state.vstart());
1560 let scalar = state.regs.read(rs1).as_u64();
1561 unsafe {
1563 zve64x_muldiv_helpers::execute_muladd_scalar_op(
1564 state,
1565 vd,
1566 scalar,
1567 zve64x_muldiv_helpers::OpSrc::Vreg(vs2.bits()),
1568 vm,
1569 vl,
1570 vstart,
1571 sew,
1572 |acc, a, b, _| a.wrapping_mul(acc).wrapping_add(b),
1574 );
1575 }
1576 }
1577 Self::VnmsubVv { vd, vs1, vs2, vm } => {
1579 if !state.ext_state.vector_instructions_allowed() {
1580 Err(ExecutionError::IllegalInstruction {
1581 address: state
1582 .instruction_fetcher
1583 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1584 })?;
1585 }
1586 let vtype = state
1587 .ext_state
1588 .vtype()
1589 .ok_or(ExecutionError::IllegalInstruction {
1590 address: state
1591 .instruction_fetcher
1592 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1593 })?;
1594 let group_regs = vtype.vlmul().register_count();
1595 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1596 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1597 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1598 if !vm && vd.bits() == 0 {
1599 Err(ExecutionError::IllegalInstruction {
1600 address: state
1601 .instruction_fetcher
1602 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1603 })?;
1604 }
1605 let sew = vtype.vsew();
1606 let vl = state.ext_state.vl();
1607 let vstart = u32::from(state.ext_state.vstart());
1608 unsafe {
1610 zve64x_muldiv_helpers::execute_muladd_op(
1611 state,
1612 vd,
1613 vs1.bits(),
1614 zve64x_muldiv_helpers::OpSrc::Vreg(vs2.bits()),
1615 vm,
1616 vl,
1617 vstart,
1618 sew,
1619 |acc, a, b, _| b.wrapping_sub(a.wrapping_mul(acc)),
1621 );
1622 }
1623 }
1624 Self::VnmsubVx { vd, rs1, vs2, vm } => {
1625 if !state.ext_state.vector_instructions_allowed() {
1626 Err(ExecutionError::IllegalInstruction {
1627 address: state
1628 .instruction_fetcher
1629 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1630 })?;
1631 }
1632 let vtype = state
1633 .ext_state
1634 .vtype()
1635 .ok_or(ExecutionError::IllegalInstruction {
1636 address: state
1637 .instruction_fetcher
1638 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1639 })?;
1640 let group_regs = vtype.vlmul().register_count();
1641 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1642 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1643 if !vm && vd.bits() == 0 {
1644 Err(ExecutionError::IllegalInstruction {
1645 address: state
1646 .instruction_fetcher
1647 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1648 })?;
1649 }
1650 let sew = vtype.vsew();
1651 let vl = state.ext_state.vl();
1652 let vstart = u32::from(state.ext_state.vstart());
1653 let scalar = state.regs.read(rs1).as_u64();
1654 unsafe {
1656 zve64x_muldiv_helpers::execute_muladd_scalar_op(
1657 state,
1658 vd,
1659 scalar,
1660 zve64x_muldiv_helpers::OpSrc::Vreg(vs2.bits()),
1661 vm,
1662 vl,
1663 vstart,
1664 sew,
1665 |acc, a, b, _| b.wrapping_sub(a.wrapping_mul(acc)),
1667 );
1668 }
1669 }
1670 Self::VwmaccuVv { vd, vs1, vs2, vm } => {
1672 if !state.ext_state.vector_instructions_allowed() {
1673 Err(ExecutionError::IllegalInstruction {
1674 address: state
1675 .instruction_fetcher
1676 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1677 })?;
1678 }
1679 let vtype = state
1680 .ext_state
1681 .vtype()
1682 .ok_or(ExecutionError::IllegalInstruction {
1683 address: state
1684 .instruction_fetcher
1685 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1686 })?;
1687 if u32::from(vtype.vsew().bits()) == u64::BITS {
1688 Err(ExecutionError::IllegalInstruction {
1689 address: state
1690 .instruction_fetcher
1691 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1692 })?;
1693 }
1694 let group_regs = vtype.vlmul().register_count();
1695 let dest_group_regs = zve64x_muldiv_helpers::widening_dest_register_count(
1696 vtype.vlmul(),
1697 )
1698 .ok_or(ExecutionError::IllegalInstruction {
1699 address: state
1700 .instruction_fetcher
1701 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1702 })?;
1703 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, dest_group_regs)?;
1705 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1706 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1707 if !vm && vd.bits() == 0 {
1708 Err(ExecutionError::IllegalInstruction {
1709 address: state
1710 .instruction_fetcher
1711 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1712 })?;
1713 }
1714 zve64x_muldiv_helpers::check_no_widening_overlap(
1715 state,
1716 vd,
1717 vs2,
1718 dest_group_regs,
1719 group_regs,
1720 )?;
1721 zve64x_muldiv_helpers::check_no_widening_overlap(
1722 state,
1723 vd,
1724 vs1,
1725 dest_group_regs,
1726 group_regs,
1727 )?;
1728 let sew = vtype.vsew();
1729 let vl = state.ext_state.vl();
1730 let vstart = u32::from(state.ext_state.vstart());
1731 unsafe {
1733 zve64x_muldiv_helpers::execute_widening_muladd_op(
1734 state,
1735 vd,
1736 vs1.bits(),
1737 zve64x_muldiv_helpers::OpSrc::Vreg(vs2.bits()),
1738 vm,
1739 vl,
1740 vstart,
1741 sew,
1742 |acc, a, b, sew| {
1744 let mask = zve64x_muldiv_helpers::sew_mask(sew);
1745 acc.wrapping_add((a & mask).wrapping_mul(b & mask))
1746 },
1747 );
1748 }
1749 }
1750 Self::VwmaccuVx { vd, rs1, vs2, vm } => {
1751 if !state.ext_state.vector_instructions_allowed() {
1752 Err(ExecutionError::IllegalInstruction {
1753 address: state
1754 .instruction_fetcher
1755 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1756 })?;
1757 }
1758 let vtype = state
1759 .ext_state
1760 .vtype()
1761 .ok_or(ExecutionError::IllegalInstruction {
1762 address: state
1763 .instruction_fetcher
1764 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1765 })?;
1766 if u32::from(vtype.vsew().bits()) == u64::BITS {
1767 Err(ExecutionError::IllegalInstruction {
1768 address: state
1769 .instruction_fetcher
1770 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1771 })?;
1772 }
1773 let group_regs = vtype.vlmul().register_count();
1774 let dest_group_regs = zve64x_muldiv_helpers::widening_dest_register_count(
1775 vtype.vlmul(),
1776 )
1777 .ok_or(ExecutionError::IllegalInstruction {
1778 address: state
1779 .instruction_fetcher
1780 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1781 })?;
1782 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, dest_group_regs)?;
1783 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1784 if !vm && vd.bits() == 0 {
1785 Err(ExecutionError::IllegalInstruction {
1786 address: state
1787 .instruction_fetcher
1788 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1789 })?;
1790 }
1791 zve64x_muldiv_helpers::check_no_widening_overlap(
1792 state,
1793 vd,
1794 vs2,
1795 dest_group_regs,
1796 group_regs,
1797 )?;
1798 let sew = vtype.vsew();
1799 let vl = state.ext_state.vl();
1800 let vstart = u32::from(state.ext_state.vstart());
1801 let scalar = state.regs.read(rs1).as_u64();
1802 unsafe {
1804 zve64x_muldiv_helpers::execute_widening_muladd_scalar_op(
1805 state,
1806 vd,
1807 scalar,
1808 zve64x_muldiv_helpers::OpSrc::Vreg(vs2.bits()),
1809 vm,
1810 vl,
1811 vstart,
1812 sew,
1813 |acc, a, b, sew| {
1814 let mask = zve64x_muldiv_helpers::sew_mask(sew);
1815 acc.wrapping_add((a & mask).wrapping_mul(b & mask))
1816 },
1817 );
1818 }
1819 }
1820 Self::VwmaccVv { vd, vs1, vs2, vm } => {
1822 if !state.ext_state.vector_instructions_allowed() {
1823 Err(ExecutionError::IllegalInstruction {
1824 address: state
1825 .instruction_fetcher
1826 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1827 })?;
1828 }
1829 let vtype = state
1830 .ext_state
1831 .vtype()
1832 .ok_or(ExecutionError::IllegalInstruction {
1833 address: state
1834 .instruction_fetcher
1835 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1836 })?;
1837 if u32::from(vtype.vsew().bits()) == u64::BITS {
1838 Err(ExecutionError::IllegalInstruction {
1839 address: state
1840 .instruction_fetcher
1841 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1842 })?;
1843 }
1844 let group_regs = vtype.vlmul().register_count();
1845 let dest_group_regs = zve64x_muldiv_helpers::widening_dest_register_count(
1846 vtype.vlmul(),
1847 )
1848 .ok_or(ExecutionError::IllegalInstruction {
1849 address: state
1850 .instruction_fetcher
1851 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1852 })?;
1853 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, dest_group_regs)?;
1854 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1855 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1856 if !vm && vd.bits() == 0 {
1857 Err(ExecutionError::IllegalInstruction {
1858 address: state
1859 .instruction_fetcher
1860 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1861 })?;
1862 }
1863 zve64x_muldiv_helpers::check_no_widening_overlap(
1864 state,
1865 vd,
1866 vs2,
1867 dest_group_regs,
1868 group_regs,
1869 )?;
1870 zve64x_muldiv_helpers::check_no_widening_overlap(
1871 state,
1872 vd,
1873 vs1,
1874 dest_group_regs,
1875 group_regs,
1876 )?;
1877 let sew = vtype.vsew();
1878 let vl = state.ext_state.vl();
1879 let vstart = u32::from(state.ext_state.vstart());
1880 unsafe {
1882 zve64x_muldiv_helpers::execute_widening_muladd_op(
1883 state,
1884 vd,
1885 vs1.bits(),
1886 zve64x_muldiv_helpers::OpSrc::Vreg(vs2.bits()),
1887 vm,
1888 vl,
1889 vstart,
1890 sew,
1891 |acc, a, b, sew| {
1893 let sa = zve64x_muldiv_helpers::sign_extend(a, sew);
1894 let sb = zve64x_muldiv_helpers::sign_extend(b, sew);
1895 acc.wrapping_add(sa.cast_unsigned().wrapping_mul(sb.cast_unsigned()))
1896 },
1897 );
1898 }
1899 }
1900 Self::VwmaccVx { vd, rs1, vs2, vm } => {
1901 if !state.ext_state.vector_instructions_allowed() {
1902 Err(ExecutionError::IllegalInstruction {
1903 address: state
1904 .instruction_fetcher
1905 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1906 })?;
1907 }
1908 let vtype = state
1909 .ext_state
1910 .vtype()
1911 .ok_or(ExecutionError::IllegalInstruction {
1912 address: state
1913 .instruction_fetcher
1914 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1915 })?;
1916 if u32::from(vtype.vsew().bits()) == u64::BITS {
1917 Err(ExecutionError::IllegalInstruction {
1918 address: state
1919 .instruction_fetcher
1920 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1921 })?;
1922 }
1923 let group_regs = vtype.vlmul().register_count();
1924 let dest_group_regs = zve64x_muldiv_helpers::widening_dest_register_count(
1925 vtype.vlmul(),
1926 )
1927 .ok_or(ExecutionError::IllegalInstruction {
1928 address: state
1929 .instruction_fetcher
1930 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1931 })?;
1932 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, dest_group_regs)?;
1933 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1934 if !vm && vd.bits() == 0 {
1935 Err(ExecutionError::IllegalInstruction {
1936 address: state
1937 .instruction_fetcher
1938 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1939 })?;
1940 }
1941 zve64x_muldiv_helpers::check_no_widening_overlap(
1942 state,
1943 vd,
1944 vs2,
1945 dest_group_regs,
1946 group_regs,
1947 )?;
1948 let sew = vtype.vsew();
1949 let vl = state.ext_state.vl();
1950 let vstart = u32::from(state.ext_state.vstart());
1951 let scalar = state.regs.read(rs1).as_u64();
1952 unsafe {
1954 zve64x_muldiv_helpers::execute_widening_muladd_scalar_op(
1955 state,
1956 vd,
1957 scalar,
1958 zve64x_muldiv_helpers::OpSrc::Vreg(vs2.bits()),
1959 vm,
1960 vl,
1961 vstart,
1962 sew,
1963 |acc, a, b, sew| {
1964 let sa = zve64x_muldiv_helpers::sign_extend(a, sew);
1965 let sb = zve64x_muldiv_helpers::sign_extend(b, sew);
1966 acc.wrapping_add(sa.cast_unsigned().wrapping_mul(sb.cast_unsigned()))
1967 },
1968 );
1969 }
1970 }
1971 Self::VwmaccsuVv { vd, vs1, vs2, vm } => {
1973 if !state.ext_state.vector_instructions_allowed() {
1974 Err(ExecutionError::IllegalInstruction {
1975 address: state
1976 .instruction_fetcher
1977 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1978 })?;
1979 }
1980 let vtype = state
1981 .ext_state
1982 .vtype()
1983 .ok_or(ExecutionError::IllegalInstruction {
1984 address: state
1985 .instruction_fetcher
1986 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1987 })?;
1988 if u32::from(vtype.vsew().bits()) == u64::BITS {
1989 Err(ExecutionError::IllegalInstruction {
1990 address: state
1991 .instruction_fetcher
1992 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1993 })?;
1994 }
1995 let group_regs = vtype.vlmul().register_count();
1996 let dest_group_regs = zve64x_muldiv_helpers::widening_dest_register_count(
1997 vtype.vlmul(),
1998 )
1999 .ok_or(ExecutionError::IllegalInstruction {
2000 address: state
2001 .instruction_fetcher
2002 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2003 })?;
2004 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, dest_group_regs)?;
2005 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
2006 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
2007 if !vm && vd.bits() == 0 {
2008 Err(ExecutionError::IllegalInstruction {
2009 address: state
2010 .instruction_fetcher
2011 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2012 })?;
2013 }
2014 zve64x_muldiv_helpers::check_no_widening_overlap(
2015 state,
2016 vd,
2017 vs2,
2018 dest_group_regs,
2019 group_regs,
2020 )?;
2021 zve64x_muldiv_helpers::check_no_widening_overlap(
2022 state,
2023 vd,
2024 vs1,
2025 dest_group_regs,
2026 group_regs,
2027 )?;
2028 let sew = vtype.vsew();
2029 let vl = state.ext_state.vl();
2030 let vstart = u32::from(state.ext_state.vstart());
2031 unsafe {
2033 zve64x_muldiv_helpers::execute_widening_muladd_op(
2034 state,
2035 vd,
2036 vs1.bits(),
2037 zve64x_muldiv_helpers::OpSrc::Vreg(vs2.bits()),
2038 vm,
2039 vl,
2040 vstart,
2041 sew,
2042 |acc, a, b, sew| {
2044 let sa = zve64x_muldiv_helpers::sign_extend(a, sew);
2045 let ub = b & zve64x_muldiv_helpers::sew_mask(sew);
2046 acc.wrapping_add(sa.cast_unsigned().wrapping_mul(ub))
2047 },
2048 );
2049 }
2050 }
2051 Self::VwmaccsuVx { vd, rs1, vs2, vm } => {
2052 if !state.ext_state.vector_instructions_allowed() {
2053 Err(ExecutionError::IllegalInstruction {
2054 address: state
2055 .instruction_fetcher
2056 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2057 })?;
2058 }
2059 let vtype = state
2060 .ext_state
2061 .vtype()
2062 .ok_or(ExecutionError::IllegalInstruction {
2063 address: state
2064 .instruction_fetcher
2065 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2066 })?;
2067 if u32::from(vtype.vsew().bits()) == u64::BITS {
2068 Err(ExecutionError::IllegalInstruction {
2069 address: state
2070 .instruction_fetcher
2071 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2072 })?;
2073 }
2074 let group_regs = vtype.vlmul().register_count();
2075 let dest_group_regs = zve64x_muldiv_helpers::widening_dest_register_count(
2076 vtype.vlmul(),
2077 )
2078 .ok_or(ExecutionError::IllegalInstruction {
2079 address: state
2080 .instruction_fetcher
2081 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2082 })?;
2083 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, dest_group_regs)?;
2084 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
2085 if !vm && vd.bits() == 0 {
2086 Err(ExecutionError::IllegalInstruction {
2087 address: state
2088 .instruction_fetcher
2089 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2090 })?;
2091 }
2092 zve64x_muldiv_helpers::check_no_widening_overlap(
2093 state,
2094 vd,
2095 vs2,
2096 dest_group_regs,
2097 group_regs,
2098 )?;
2099 let sew = vtype.vsew();
2100 let vl = state.ext_state.vl();
2101 let vstart = u32::from(state.ext_state.vstart());
2102 let scalar = state.regs.read(rs1).as_u64();
2104 unsafe {
2106 zve64x_muldiv_helpers::execute_widening_muladd_scalar_op(
2107 state,
2108 vd,
2109 scalar,
2110 zve64x_muldiv_helpers::OpSrc::Vreg(vs2.bits()),
2111 vm,
2112 vl,
2113 vstart,
2114 sew,
2115 |acc, a, b, sew| {
2116 let sa = zve64x_muldiv_helpers::sign_extend(a, sew);
2117 let ub = b & zve64x_muldiv_helpers::sew_mask(sew);
2118 acc.wrapping_add(sa.cast_unsigned().wrapping_mul(ub))
2119 },
2120 );
2121 }
2122 }
2123 Self::VwmaccusVx { vd, rs1, vs2, vm } => {
2125 if !state.ext_state.vector_instructions_allowed() {
2126 Err(ExecutionError::IllegalInstruction {
2127 address: state
2128 .instruction_fetcher
2129 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2130 })?;
2131 }
2132 let vtype = state
2133 .ext_state
2134 .vtype()
2135 .ok_or(ExecutionError::IllegalInstruction {
2136 address: state
2137 .instruction_fetcher
2138 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2139 })?;
2140 if u32::from(vtype.vsew().bits()) == u64::BITS {
2141 Err(ExecutionError::IllegalInstruction {
2142 address: state
2143 .instruction_fetcher
2144 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2145 })?;
2146 }
2147 let group_regs = vtype.vlmul().register_count();
2148 let dest_group_regs = zve64x_muldiv_helpers::widening_dest_register_count(
2149 vtype.vlmul(),
2150 )
2151 .ok_or(ExecutionError::IllegalInstruction {
2152 address: state
2153 .instruction_fetcher
2154 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2155 })?;
2156 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vd, dest_group_regs)?;
2157 zve64x_muldiv_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
2158 if !vm && vd.bits() == 0 {
2159 Err(ExecutionError::IllegalInstruction {
2160 address: state
2161 .instruction_fetcher
2162 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2163 })?;
2164 }
2165 zve64x_muldiv_helpers::check_no_widening_overlap(
2166 state,
2167 vd,
2168 vs2,
2169 dest_group_regs,
2170 group_regs,
2171 )?;
2172 let sew = vtype.vsew();
2173 let vl = state.ext_state.vl();
2174 let vstart = u32::from(state.ext_state.vstart());
2175 let scalar = state.regs.read(rs1).as_u64();
2177 unsafe {
2179 zve64x_muldiv_helpers::execute_widening_muladd_scalar_op(
2180 state,
2181 vd,
2182 scalar,
2183 zve64x_muldiv_helpers::OpSrc::Vreg(vs2.bits()),
2184 vm,
2185 vl,
2186 vstart,
2187 sew,
2188 |acc, a, b, sew| {
2190 let ua = a & zve64x_muldiv_helpers::sew_mask(sew);
2191 let sb = zve64x_muldiv_helpers::sign_extend(b, sew);
2192 acc.wrapping_add(ua.wrapping_mul(sb.cast_unsigned()))
2193 },
2194 );
2195 }
2196 }
2197 }
2198 Ok(ControlFlow::Continue(()))
2199 }
2200}