1#[cfg(test)]
4mod tests;
5pub mod zve64x_arith_helpers;
6
7use crate::v::vector_registers::VectorRegistersExt;
8use crate::v::zve64x::zve64x_helpers;
9use crate::{
10 ExecutableInstruction, ExecutionError, InterpreterState, ProgramCounter, VirtualMemory,
11};
12use ab_riscv_macros::instruction_execution;
13use ab_riscv_primitives::instructions::v::zve64x::arith::Zve64xArithInstruction;
14use ab_riscv_primitives::registers::general_purpose::{RegType, Register};
15use core::fmt;
16use core::ops::ControlFlow;
17
18#[instruction_execution]
19impl<Reg, ExtState, Memory, PC, InstructionHandler, CustomError>
20 ExecutableInstruction<
21 InterpreterState<Reg, ExtState, Memory, PC, InstructionHandler, CustomError>,
22 CustomError,
23 > for Zve64xArithInstruction<Reg>
24where
25 Reg: Register,
26 [(); Reg::N]:,
27 ExtState: VectorRegistersExt<Reg, CustomError>,
28 [(); ExtState::ELEN as usize]:,
29 [(); ExtState::VLEN as usize]:,
30 [(); ExtState::VLENB as usize]:,
31 Memory: VirtualMemory,
32 PC: ProgramCounter<Reg::Type, Memory, CustomError>,
33 CustomError: fmt::Debug,
34{
35 #[inline(always)]
36 fn execute(
37 self,
38 state: &mut InterpreterState<Reg, ExtState, Memory, PC, InstructionHandler, CustomError>,
39 ) -> Result<ControlFlow<()>, ExecutionError<Reg::Type, CustomError>> {
40 match self {
41 Self::VaddVv { vd, vs2, vs1, vm } => {
43 if !state.ext_state.vector_instructions_allowed() {
44 Err(ExecutionError::IllegalInstruction {
45 address: state
46 .instruction_fetcher
47 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
48 })?;
49 }
50 let vtype = state
51 .ext_state
52 .vtype()
53 .ok_or(ExecutionError::IllegalInstruction {
54 address: state
55 .instruction_fetcher
56 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
57 })?;
58 let group_regs = vtype.vlmul().register_count();
59 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
60 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
61 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
62 if !vm && vd.bits() == 0 {
63 Err(ExecutionError::IllegalInstruction {
64 address: state
65 .instruction_fetcher
66 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
67 })?;
68 }
69 let sew = vtype.vsew();
70 let vl = state.ext_state.vl();
71 let vstart = u32::from(state.ext_state.vstart());
72 unsafe {
75 zve64x_arith_helpers::execute_arith_op(
76 state,
77 vd,
78 vs2,
79 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
80 vm,
81 vl,
82 vstart,
83 sew,
84 |a, b, _| a.wrapping_add(b),
85 );
86 }
87 }
88 Self::VaddVx { vd, vs2, rs1, vm } => {
89 if !state.ext_state.vector_instructions_allowed() {
90 Err(ExecutionError::IllegalInstruction {
91 address: state
92 .instruction_fetcher
93 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
94 })?;
95 }
96 let vtype = state
97 .ext_state
98 .vtype()
99 .ok_or(ExecutionError::IllegalInstruction {
100 address: state
101 .instruction_fetcher
102 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
103 })?;
104 let group_regs = vtype.vlmul().register_count();
105 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
106 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
107 if !vm && vd.bits() == 0 {
108 Err(ExecutionError::IllegalInstruction {
109 address: state
110 .instruction_fetcher
111 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
112 })?;
113 }
114 let sew = vtype.vsew();
115 let vl = state.ext_state.vl();
116 let vstart = u32::from(state.ext_state.vstart());
117 let scalar = state.regs.read(rs1).as_u64();
118 unsafe {
120 zve64x_arith_helpers::execute_arith_op(
121 state,
122 vd,
123 vs2,
124 zve64x_arith_helpers::OpSrc::Scalar(scalar),
125 vm,
126 vl,
127 vstart,
128 sew,
129 |a, b, _| a.wrapping_add(b),
130 );
131 }
132 }
133 Self::VaddVi { vd, vs2, imm, vm } => {
134 if !state.ext_state.vector_instructions_allowed() {
135 Err(ExecutionError::IllegalInstruction {
136 address: state
137 .instruction_fetcher
138 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
139 })?;
140 }
141 let vtype = state
142 .ext_state
143 .vtype()
144 .ok_or(ExecutionError::IllegalInstruction {
145 address: state
146 .instruction_fetcher
147 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
148 })?;
149 let group_regs = vtype.vlmul().register_count();
150 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
151 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
152 if !vm && vd.bits() == 0 {
153 Err(ExecutionError::IllegalInstruction {
154 address: state
155 .instruction_fetcher
156 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
157 })?;
158 }
159 let sew = vtype.vsew();
160 let vl = state.ext_state.vl();
161 let vstart = u32::from(state.ext_state.vstart());
162 let scalar = i64::from(imm).cast_unsigned();
164 unsafe {
166 zve64x_arith_helpers::execute_arith_op(
167 state,
168 vd,
169 vs2,
170 zve64x_arith_helpers::OpSrc::Scalar(scalar),
171 vm,
172 vl,
173 vstart,
174 sew,
175 |a, b, _| a.wrapping_add(b),
176 );
177 }
178 }
179 Self::VsubVv { vd, vs2, vs1, vm } => {
181 if !state.ext_state.vector_instructions_allowed() {
182 Err(ExecutionError::IllegalInstruction {
183 address: state
184 .instruction_fetcher
185 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
186 })?;
187 }
188 let vtype = state
189 .ext_state
190 .vtype()
191 .ok_or(ExecutionError::IllegalInstruction {
192 address: state
193 .instruction_fetcher
194 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
195 })?;
196 let group_regs = vtype.vlmul().register_count();
197 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
198 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
199 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
200 if !vm && vd.bits() == 0 {
201 Err(ExecutionError::IllegalInstruction {
202 address: state
203 .instruction_fetcher
204 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
205 })?;
206 }
207 let sew = vtype.vsew();
208 let vl = state.ext_state.vl();
209 let vstart = u32::from(state.ext_state.vstart());
210 unsafe {
212 zve64x_arith_helpers::execute_arith_op(
213 state,
214 vd,
215 vs2,
216 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
217 vm,
218 vl,
219 vstart,
220 sew,
221 |a, b, _| a.wrapping_sub(b),
222 );
223 }
224 }
225 Self::VsubVx { vd, vs2, rs1, vm } => {
226 if !state.ext_state.vector_instructions_allowed() {
227 Err(ExecutionError::IllegalInstruction {
228 address: state
229 .instruction_fetcher
230 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
231 })?;
232 }
233 let vtype = state
234 .ext_state
235 .vtype()
236 .ok_or(ExecutionError::IllegalInstruction {
237 address: state
238 .instruction_fetcher
239 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
240 })?;
241 let group_regs = vtype.vlmul().register_count();
242 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
243 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
244 if !vm && vd.bits() == 0 {
245 Err(ExecutionError::IllegalInstruction {
246 address: state
247 .instruction_fetcher
248 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
249 })?;
250 }
251 let sew = vtype.vsew();
252 let vl = state.ext_state.vl();
253 let vstart = u32::from(state.ext_state.vstart());
254 let scalar = state.regs.read(rs1).as_u64();
255 unsafe {
257 zve64x_arith_helpers::execute_arith_op(
258 state,
259 vd,
260 vs2,
261 zve64x_arith_helpers::OpSrc::Scalar(scalar),
262 vm,
263 vl,
264 vstart,
265 sew,
266 |a, b, _| a.wrapping_sub(b),
267 );
268 }
269 }
270 Self::VrsubVx { vd, vs2, rs1, vm } => {
271 if !state.ext_state.vector_instructions_allowed() {
272 Err(ExecutionError::IllegalInstruction {
273 address: state
274 .instruction_fetcher
275 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
276 })?;
277 }
278 let vtype = state
279 .ext_state
280 .vtype()
281 .ok_or(ExecutionError::IllegalInstruction {
282 address: state
283 .instruction_fetcher
284 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
285 })?;
286 let group_regs = vtype.vlmul().register_count();
287 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
288 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
289 if !vm && vd.bits() == 0 {
290 Err(ExecutionError::IllegalInstruction {
291 address: state
292 .instruction_fetcher
293 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
294 })?;
295 }
296 let sew = vtype.vsew();
297 let vl = state.ext_state.vl();
298 let vstart = u32::from(state.ext_state.vstart());
299 let scalar = state.regs.read(rs1).as_u64();
300 unsafe {
303 zve64x_arith_helpers::execute_arith_op(
304 state,
305 vd,
306 vs2,
307 zve64x_arith_helpers::OpSrc::Scalar(scalar),
308 vm,
309 vl,
310 vstart,
311 sew,
312 |a, b, _| b.wrapping_sub(a),
313 );
314 }
315 }
316 Self::VrsubVi { vd, vs2, imm, vm } => {
317 if !state.ext_state.vector_instructions_allowed() {
318 Err(ExecutionError::IllegalInstruction {
319 address: state
320 .instruction_fetcher
321 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
322 })?;
323 }
324 let vtype = state
325 .ext_state
326 .vtype()
327 .ok_or(ExecutionError::IllegalInstruction {
328 address: state
329 .instruction_fetcher
330 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
331 })?;
332 let group_regs = vtype.vlmul().register_count();
333 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
334 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
335 if !vm && vd.bits() == 0 {
336 Err(ExecutionError::IllegalInstruction {
337 address: state
338 .instruction_fetcher
339 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
340 })?;
341 }
342 let sew = vtype.vsew();
343 let vl = state.ext_state.vl();
344 let vstart = u32::from(state.ext_state.vstart());
345 let scalar = i64::from(imm).cast_unsigned();
346 unsafe {
348 zve64x_arith_helpers::execute_arith_op(
349 state,
350 vd,
351 vs2,
352 zve64x_arith_helpers::OpSrc::Scalar(scalar),
353 vm,
354 vl,
355 vstart,
356 sew,
357 |a, b, _| b.wrapping_sub(a),
358 );
359 }
360 }
361 Self::VandVv { vd, vs2, vs1, vm } => {
363 if !state.ext_state.vector_instructions_allowed() {
364 Err(ExecutionError::IllegalInstruction {
365 address: state
366 .instruction_fetcher
367 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
368 })?;
369 }
370 let vtype = state
371 .ext_state
372 .vtype()
373 .ok_or(ExecutionError::IllegalInstruction {
374 address: state
375 .instruction_fetcher
376 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
377 })?;
378 let group_regs = vtype.vlmul().register_count();
379 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
380 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
381 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
382 if !vm && vd.bits() == 0 {
383 Err(ExecutionError::IllegalInstruction {
384 address: state
385 .instruction_fetcher
386 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
387 })?;
388 }
389 let sew = vtype.vsew();
390 let vl = state.ext_state.vl();
391 let vstart = u32::from(state.ext_state.vstart());
392 unsafe {
394 zve64x_arith_helpers::execute_arith_op(
395 state,
396 vd,
397 vs2,
398 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
399 vm,
400 vl,
401 vstart,
402 sew,
403 |a, b, _| a & b,
404 );
405 }
406 }
407 Self::VandVx { vd, vs2, rs1, vm } => {
408 if !state.ext_state.vector_instructions_allowed() {
409 Err(ExecutionError::IllegalInstruction {
410 address: state
411 .instruction_fetcher
412 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
413 })?;
414 }
415 let vtype = state
416 .ext_state
417 .vtype()
418 .ok_or(ExecutionError::IllegalInstruction {
419 address: state
420 .instruction_fetcher
421 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
422 })?;
423 let group_regs = vtype.vlmul().register_count();
424 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
425 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
426 if !vm && vd.bits() == 0 {
427 Err(ExecutionError::IllegalInstruction {
428 address: state
429 .instruction_fetcher
430 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
431 })?;
432 }
433 let sew = vtype.vsew();
434 let vl = state.ext_state.vl();
435 let vstart = u32::from(state.ext_state.vstart());
436 let scalar = state.regs.read(rs1).as_u64();
437 unsafe {
439 zve64x_arith_helpers::execute_arith_op(
440 state,
441 vd,
442 vs2,
443 zve64x_arith_helpers::OpSrc::Scalar(scalar),
444 vm,
445 vl,
446 vstart,
447 sew,
448 |a, b, _| a & b,
449 );
450 }
451 }
452 Self::VandVi { vd, vs2, imm, vm } => {
453 if !state.ext_state.vector_instructions_allowed() {
454 Err(ExecutionError::IllegalInstruction {
455 address: state
456 .instruction_fetcher
457 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
458 })?;
459 }
460 let vtype = state
461 .ext_state
462 .vtype()
463 .ok_or(ExecutionError::IllegalInstruction {
464 address: state
465 .instruction_fetcher
466 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
467 })?;
468 let group_regs = vtype.vlmul().register_count();
469 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
470 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
471 if !vm && vd.bits() == 0 {
472 Err(ExecutionError::IllegalInstruction {
473 address: state
474 .instruction_fetcher
475 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
476 })?;
477 }
478 let sew = vtype.vsew();
479 let vl = state.ext_state.vl();
480 let vstart = u32::from(state.ext_state.vstart());
481 let scalar = i64::from(imm).cast_unsigned();
482 unsafe {
484 zve64x_arith_helpers::execute_arith_op(
485 state,
486 vd,
487 vs2,
488 zve64x_arith_helpers::OpSrc::Scalar(scalar),
489 vm,
490 vl,
491 vstart,
492 sew,
493 |a, b, _| a & b,
494 );
495 }
496 }
497 Self::VorVv { vd, vs2, vs1, vm } => {
499 if !state.ext_state.vector_instructions_allowed() {
500 Err(ExecutionError::IllegalInstruction {
501 address: state
502 .instruction_fetcher
503 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
504 })?;
505 }
506 let vtype = state
507 .ext_state
508 .vtype()
509 .ok_or(ExecutionError::IllegalInstruction {
510 address: state
511 .instruction_fetcher
512 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
513 })?;
514 let group_regs = vtype.vlmul().register_count();
515 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
516 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
517 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
518 if !vm && vd.bits() == 0 {
519 Err(ExecutionError::IllegalInstruction {
520 address: state
521 .instruction_fetcher
522 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
523 })?;
524 }
525 let sew = vtype.vsew();
526 let vl = state.ext_state.vl();
527 let vstart = u32::from(state.ext_state.vstart());
528 unsafe {
530 zve64x_arith_helpers::execute_arith_op(
531 state,
532 vd,
533 vs2,
534 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
535 vm,
536 vl,
537 vstart,
538 sew,
539 |a, b, _| a | b,
540 );
541 }
542 }
543 Self::VorVx { vd, vs2, rs1, vm } => {
544 if !state.ext_state.vector_instructions_allowed() {
545 Err(ExecutionError::IllegalInstruction {
546 address: state
547 .instruction_fetcher
548 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
549 })?;
550 }
551 let vtype = state
552 .ext_state
553 .vtype()
554 .ok_or(ExecutionError::IllegalInstruction {
555 address: state
556 .instruction_fetcher
557 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
558 })?;
559 let group_regs = vtype.vlmul().register_count();
560 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
561 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
562 if !vm && vd.bits() == 0 {
563 Err(ExecutionError::IllegalInstruction {
564 address: state
565 .instruction_fetcher
566 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
567 })?;
568 }
569 let sew = vtype.vsew();
570 let vl = state.ext_state.vl();
571 let vstart = u32::from(state.ext_state.vstart());
572 let scalar = state.regs.read(rs1).as_u64();
573 unsafe {
575 zve64x_arith_helpers::execute_arith_op(
576 state,
577 vd,
578 vs2,
579 zve64x_arith_helpers::OpSrc::Scalar(scalar),
580 vm,
581 vl,
582 vstart,
583 sew,
584 |a, b, _| a | b,
585 );
586 }
587 }
588 Self::VorVi { vd, vs2, imm, vm } => {
589 if !state.ext_state.vector_instructions_allowed() {
590 Err(ExecutionError::IllegalInstruction {
591 address: state
592 .instruction_fetcher
593 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
594 })?;
595 }
596 let vtype = state
597 .ext_state
598 .vtype()
599 .ok_or(ExecutionError::IllegalInstruction {
600 address: state
601 .instruction_fetcher
602 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
603 })?;
604 let group_regs = vtype.vlmul().register_count();
605 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
606 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
607 if !vm && vd.bits() == 0 {
608 Err(ExecutionError::IllegalInstruction {
609 address: state
610 .instruction_fetcher
611 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
612 })?;
613 }
614 let sew = vtype.vsew();
615 let vl = state.ext_state.vl();
616 let vstart = u32::from(state.ext_state.vstart());
617 let scalar = i64::from(imm).cast_unsigned();
618 unsafe {
620 zve64x_arith_helpers::execute_arith_op(
621 state,
622 vd,
623 vs2,
624 zve64x_arith_helpers::OpSrc::Scalar(scalar),
625 vm,
626 vl,
627 vstart,
628 sew,
629 |a, b, _| a | b,
630 );
631 }
632 }
633 Self::VxorVv { vd, vs2, vs1, vm } => {
635 if !state.ext_state.vector_instructions_allowed() {
636 Err(ExecutionError::IllegalInstruction {
637 address: state
638 .instruction_fetcher
639 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
640 })?;
641 }
642 let vtype = state
643 .ext_state
644 .vtype()
645 .ok_or(ExecutionError::IllegalInstruction {
646 address: state
647 .instruction_fetcher
648 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
649 })?;
650 let group_regs = vtype.vlmul().register_count();
651 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
652 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
653 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
654 if !vm && vd.bits() == 0 {
655 Err(ExecutionError::IllegalInstruction {
656 address: state
657 .instruction_fetcher
658 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
659 })?;
660 }
661 let sew = vtype.vsew();
662 let vl = state.ext_state.vl();
663 let vstart = u32::from(state.ext_state.vstart());
664 unsafe {
666 zve64x_arith_helpers::execute_arith_op(
667 state,
668 vd,
669 vs2,
670 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
671 vm,
672 vl,
673 vstart,
674 sew,
675 |a, b, _| a ^ b,
676 );
677 }
678 }
679 Self::VxorVx { vd, vs2, rs1, vm } => {
680 if !state.ext_state.vector_instructions_allowed() {
681 Err(ExecutionError::IllegalInstruction {
682 address: state
683 .instruction_fetcher
684 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
685 })?;
686 }
687 let vtype = state
688 .ext_state
689 .vtype()
690 .ok_or(ExecutionError::IllegalInstruction {
691 address: state
692 .instruction_fetcher
693 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
694 })?;
695 let group_regs = vtype.vlmul().register_count();
696 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
697 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
698 if !vm && vd.bits() == 0 {
699 Err(ExecutionError::IllegalInstruction {
700 address: state
701 .instruction_fetcher
702 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
703 })?;
704 }
705 let sew = vtype.vsew();
706 let vl = state.ext_state.vl();
707 let vstart = u32::from(state.ext_state.vstart());
708 let scalar = state.regs.read(rs1).as_u64();
709 unsafe {
711 zve64x_arith_helpers::execute_arith_op(
712 state,
713 vd,
714 vs2,
715 zve64x_arith_helpers::OpSrc::Scalar(scalar),
716 vm,
717 vl,
718 vstart,
719 sew,
720 |a, b, _| a ^ b,
721 );
722 }
723 }
724 Self::VxorVi { vd, vs2, imm, vm } => {
725 if !state.ext_state.vector_instructions_allowed() {
726 Err(ExecutionError::IllegalInstruction {
727 address: state
728 .instruction_fetcher
729 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
730 })?;
731 }
732 let vtype = state
733 .ext_state
734 .vtype()
735 .ok_or(ExecutionError::IllegalInstruction {
736 address: state
737 .instruction_fetcher
738 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
739 })?;
740 let group_regs = vtype.vlmul().register_count();
741 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
742 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
743 if !vm && vd.bits() == 0 {
744 Err(ExecutionError::IllegalInstruction {
745 address: state
746 .instruction_fetcher
747 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
748 })?;
749 }
750 let sew = vtype.vsew();
751 let vl = state.ext_state.vl();
752 let vstart = u32::from(state.ext_state.vstart());
753 let scalar = i64::from(imm).cast_unsigned();
754 unsafe {
756 zve64x_arith_helpers::execute_arith_op(
757 state,
758 vd,
759 vs2,
760 zve64x_arith_helpers::OpSrc::Scalar(scalar),
761 vm,
762 vl,
763 vstart,
764 sew,
765 |a, b, _| a ^ b,
766 );
767 }
768 }
769 Self::VsllVv { vd, vs2, vs1, vm } => {
771 if !state.ext_state.vector_instructions_allowed() {
772 Err(ExecutionError::IllegalInstruction {
773 address: state
774 .instruction_fetcher
775 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
776 })?;
777 }
778 let vtype = state
779 .ext_state
780 .vtype()
781 .ok_or(ExecutionError::IllegalInstruction {
782 address: state
783 .instruction_fetcher
784 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
785 })?;
786 let group_regs = vtype.vlmul().register_count();
787 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
788 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
789 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
790 if !vm && vd.bits() == 0 {
791 Err(ExecutionError::IllegalInstruction {
792 address: state
793 .instruction_fetcher
794 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
795 })?;
796 }
797 let sew = vtype.vsew();
798 let vl = state.ext_state.vl();
799 let vstart = u32::from(state.ext_state.vstart());
800 unsafe {
802 zve64x_arith_helpers::execute_arith_op(
803 state,
804 vd,
805 vs2,
806 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
807 vm,
808 vl,
809 vstart,
810 sew,
811 |a, b, sew| a << (b & u64::from(sew.bits() - 1)),
813 );
814 }
815 }
816 Self::VsllVx { vd, vs2, rs1, vm } => {
817 if !state.ext_state.vector_instructions_allowed() {
818 Err(ExecutionError::IllegalInstruction {
819 address: state
820 .instruction_fetcher
821 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
822 })?;
823 }
824 let vtype = state
825 .ext_state
826 .vtype()
827 .ok_or(ExecutionError::IllegalInstruction {
828 address: state
829 .instruction_fetcher
830 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
831 })?;
832 let group_regs = vtype.vlmul().register_count();
833 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
834 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
835 if !vm && vd.bits() == 0 {
836 Err(ExecutionError::IllegalInstruction {
837 address: state
838 .instruction_fetcher
839 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
840 })?;
841 }
842 let sew = vtype.vsew();
843 let vl = state.ext_state.vl();
844 let vstart = u32::from(state.ext_state.vstart());
845 let scalar = state.regs.read(rs1).as_u64();
846 unsafe {
848 zve64x_arith_helpers::execute_arith_op(
849 state,
850 vd,
851 vs2,
852 zve64x_arith_helpers::OpSrc::Scalar(scalar),
853 vm,
854 vl,
855 vstart,
856 sew,
857 |a, b, sew| a << (b & u64::from(sew.bits() - 1)),
858 );
859 }
860 }
861 Self::VsllVi { vd, vs2, uimm, vm } => {
862 if !state.ext_state.vector_instructions_allowed() {
863 Err(ExecutionError::IllegalInstruction {
864 address: state
865 .instruction_fetcher
866 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
867 })?;
868 }
869 let vtype = state
870 .ext_state
871 .vtype()
872 .ok_or(ExecutionError::IllegalInstruction {
873 address: state
874 .instruction_fetcher
875 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
876 })?;
877 let group_regs = vtype.vlmul().register_count();
878 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
879 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
880 if !vm && vd.bits() == 0 {
881 Err(ExecutionError::IllegalInstruction {
882 address: state
883 .instruction_fetcher
884 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
885 })?;
886 }
887 let sew = vtype.vsew();
888 let vl = state.ext_state.vl();
889 let vstart = u32::from(state.ext_state.vstart());
890 let shamt = u64::from(uimm) & u64::from(sew.bits() - 1);
892 unsafe {
894 zve64x_arith_helpers::execute_arith_op(
895 state,
896 vd,
897 vs2,
898 zve64x_arith_helpers::OpSrc::Scalar(shamt),
899 vm,
900 vl,
901 vstart,
902 sew,
903 |a, b, _| a << b,
904 );
905 }
906 }
907 Self::VsrlVv { vd, vs2, vs1, vm } => {
909 if !state.ext_state.vector_instructions_allowed() {
910 Err(ExecutionError::IllegalInstruction {
911 address: state
912 .instruction_fetcher
913 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
914 })?;
915 }
916 let vtype = state
917 .ext_state
918 .vtype()
919 .ok_or(ExecutionError::IllegalInstruction {
920 address: state
921 .instruction_fetcher
922 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
923 })?;
924 let group_regs = vtype.vlmul().register_count();
925 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
926 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
927 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
928 if !vm && vd.bits() == 0 {
929 Err(ExecutionError::IllegalInstruction {
930 address: state
931 .instruction_fetcher
932 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
933 })?;
934 }
935 let sew = vtype.vsew();
936 let vl = state.ext_state.vl();
937 let vstart = u32::from(state.ext_state.vstart());
938 unsafe {
940 zve64x_arith_helpers::execute_arith_op(
941 state,
942 vd,
943 vs2,
944 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
945 vm,
946 vl,
947 vstart,
948 sew,
949 |a, b, sew| {
951 let mask = zve64x_arith_helpers::sew_mask(sew);
952 let shamt = b & u64::from(sew.bits() - 1);
953 (a & mask) >> shamt
954 },
955 );
956 }
957 }
958 Self::VsrlVx { vd, vs2, rs1, vm } => {
959 if !state.ext_state.vector_instructions_allowed() {
960 Err(ExecutionError::IllegalInstruction {
961 address: state
962 .instruction_fetcher
963 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
964 })?;
965 }
966 let vtype = state
967 .ext_state
968 .vtype()
969 .ok_or(ExecutionError::IllegalInstruction {
970 address: state
971 .instruction_fetcher
972 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
973 })?;
974 let group_regs = vtype.vlmul().register_count();
975 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
976 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
977 if !vm && vd.bits() == 0 {
978 Err(ExecutionError::IllegalInstruction {
979 address: state
980 .instruction_fetcher
981 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
982 })?;
983 }
984 let sew = vtype.vsew();
985 let vl = state.ext_state.vl();
986 let vstart = u32::from(state.ext_state.vstart());
987 let scalar = state.regs.read(rs1).as_u64();
988 unsafe {
990 zve64x_arith_helpers::execute_arith_op(
991 state,
992 vd,
993 vs2,
994 zve64x_arith_helpers::OpSrc::Scalar(scalar),
995 vm,
996 vl,
997 vstart,
998 sew,
999 |a, b, sew| {
1000 let mask = zve64x_arith_helpers::sew_mask(sew);
1001 let shamt = b & u64::from(sew.bits() - 1);
1002 (a & mask) >> shamt
1003 },
1004 );
1005 }
1006 }
1007 Self::VsrlVi { vd, vs2, uimm, vm } => {
1008 if !state.ext_state.vector_instructions_allowed() {
1009 Err(ExecutionError::IllegalInstruction {
1010 address: state
1011 .instruction_fetcher
1012 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1013 })?;
1014 }
1015 let vtype = state
1016 .ext_state
1017 .vtype()
1018 .ok_or(ExecutionError::IllegalInstruction {
1019 address: state
1020 .instruction_fetcher
1021 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1022 })?;
1023 let group_regs = vtype.vlmul().register_count();
1024 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1025 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1026 if !vm && vd.bits() == 0 {
1027 Err(ExecutionError::IllegalInstruction {
1028 address: state
1029 .instruction_fetcher
1030 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1031 })?;
1032 }
1033 let sew = vtype.vsew();
1034 let vl = state.ext_state.vl();
1035 let vstart = u32::from(state.ext_state.vstart());
1036 let shamt = u64::from(uimm) & u64::from(sew.bits() - 1);
1037 unsafe {
1039 zve64x_arith_helpers::execute_arith_op(
1040 state,
1041 vd,
1042 vs2,
1043 zve64x_arith_helpers::OpSrc::Scalar(shamt),
1044 vm,
1045 vl,
1046 vstart,
1047 sew,
1048 |a, b, sew| (a & zve64x_arith_helpers::sew_mask(sew)) >> b,
1049 );
1050 }
1051 }
1052 Self::VsraVv { vd, vs2, vs1, vm } => {
1054 if !state.ext_state.vector_instructions_allowed() {
1055 Err(ExecutionError::IllegalInstruction {
1056 address: state
1057 .instruction_fetcher
1058 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1059 })?;
1060 }
1061 let vtype = state
1062 .ext_state
1063 .vtype()
1064 .ok_or(ExecutionError::IllegalInstruction {
1065 address: state
1066 .instruction_fetcher
1067 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1068 })?;
1069 let group_regs = vtype.vlmul().register_count();
1070 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1071 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1072 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1073 if !vm && vd.bits() == 0 {
1074 Err(ExecutionError::IllegalInstruction {
1075 address: state
1076 .instruction_fetcher
1077 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1078 })?;
1079 }
1080 let sew = vtype.vsew();
1081 let vl = state.ext_state.vl();
1082 let vstart = u32::from(state.ext_state.vstart());
1083 unsafe {
1085 zve64x_arith_helpers::execute_arith_op(
1086 state,
1087 vd,
1088 vs2,
1089 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
1090 vm,
1091 vl,
1092 vstart,
1093 sew,
1094 |a, b, sew| {
1095 let shamt = b & u64::from(sew.bits() - 1);
1096 let signed = zve64x_arith_helpers::sign_extend(a, sew);
1097 (signed >> shamt).cast_unsigned()
1098 },
1099 );
1100 }
1101 }
1102 Self::VsraVx { vd, vs2, rs1, vm } => {
1103 if !state.ext_state.vector_instructions_allowed() {
1104 Err(ExecutionError::IllegalInstruction {
1105 address: state
1106 .instruction_fetcher
1107 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1108 })?;
1109 }
1110 let vtype = state
1111 .ext_state
1112 .vtype()
1113 .ok_or(ExecutionError::IllegalInstruction {
1114 address: state
1115 .instruction_fetcher
1116 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1117 })?;
1118 let group_regs = vtype.vlmul().register_count();
1119 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1120 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1121 if !vm && vd.bits() == 0 {
1122 Err(ExecutionError::IllegalInstruction {
1123 address: state
1124 .instruction_fetcher
1125 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1126 })?;
1127 }
1128 let sew = vtype.vsew();
1129 let vl = state.ext_state.vl();
1130 let vstart = u32::from(state.ext_state.vstart());
1131 let scalar = state.regs.read(rs1).as_u64();
1132 unsafe {
1134 zve64x_arith_helpers::execute_arith_op(
1135 state,
1136 vd,
1137 vs2,
1138 zve64x_arith_helpers::OpSrc::Scalar(scalar),
1139 vm,
1140 vl,
1141 vstart,
1142 sew,
1143 |a, b, sew| {
1144 let shamt = b & u64::from(sew.bits() - 1);
1145 let signed = zve64x_arith_helpers::sign_extend(a, sew);
1146 (signed >> shamt).cast_unsigned()
1147 },
1148 );
1149 }
1150 }
1151 Self::VsraVi { vd, vs2, uimm, vm } => {
1152 if !state.ext_state.vector_instructions_allowed() {
1153 Err(ExecutionError::IllegalInstruction {
1154 address: state
1155 .instruction_fetcher
1156 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1157 })?;
1158 }
1159 let vtype = state
1160 .ext_state
1161 .vtype()
1162 .ok_or(ExecutionError::IllegalInstruction {
1163 address: state
1164 .instruction_fetcher
1165 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1166 })?;
1167 let group_regs = vtype.vlmul().register_count();
1168 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1169 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1170 if !vm && vd.bits() == 0 {
1171 Err(ExecutionError::IllegalInstruction {
1172 address: state
1173 .instruction_fetcher
1174 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1175 })?;
1176 }
1177 let sew = vtype.vsew();
1178 let vl = state.ext_state.vl();
1179 let vstart = u32::from(state.ext_state.vstart());
1180 let shamt = u64::from(uimm) & u64::from(sew.bits() - 1);
1181 unsafe {
1183 zve64x_arith_helpers::execute_arith_op(
1184 state,
1185 vd,
1186 vs2,
1187 zve64x_arith_helpers::OpSrc::Scalar(shamt),
1188 vm,
1189 vl,
1190 vstart,
1191 sew,
1192 |a, b, sew| {
1193 let signed = zve64x_arith_helpers::sign_extend(a, sew);
1194 (signed >> b).cast_unsigned()
1195 },
1196 );
1197 }
1198 }
1199 Self::VminuVv { vd, vs2, vs1, vm } => {
1201 if !state.ext_state.vector_instructions_allowed() {
1202 Err(ExecutionError::IllegalInstruction {
1203 address: state
1204 .instruction_fetcher
1205 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1206 })?;
1207 }
1208 let vtype = state
1209 .ext_state
1210 .vtype()
1211 .ok_or(ExecutionError::IllegalInstruction {
1212 address: state
1213 .instruction_fetcher
1214 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1215 })?;
1216 let group_regs = vtype.vlmul().register_count();
1217 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1218 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1219 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1220 if !vm && vd.bits() == 0 {
1221 Err(ExecutionError::IllegalInstruction {
1222 address: state
1223 .instruction_fetcher
1224 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1225 })?;
1226 }
1227 let sew = vtype.vsew();
1228 let vl = state.ext_state.vl();
1229 let vstart = u32::from(state.ext_state.vstart());
1230 unsafe {
1232 zve64x_arith_helpers::execute_arith_op(
1233 state,
1234 vd,
1235 vs2,
1236 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
1237 vm,
1238 vl,
1239 vstart,
1240 sew,
1241 |a, b, sew| {
1242 let mask = zve64x_arith_helpers::sew_mask(sew);
1243 if a & mask <= b & mask { a } else { b }
1244 },
1245 );
1246 }
1247 }
1248 Self::VminuVx { vd, vs2, rs1, vm } => {
1249 if !state.ext_state.vector_instructions_allowed() {
1250 Err(ExecutionError::IllegalInstruction {
1251 address: state
1252 .instruction_fetcher
1253 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1254 })?;
1255 }
1256 let vtype = state
1257 .ext_state
1258 .vtype()
1259 .ok_or(ExecutionError::IllegalInstruction {
1260 address: state
1261 .instruction_fetcher
1262 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1263 })?;
1264 let group_regs = vtype.vlmul().register_count();
1265 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1266 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1267 if !vm && vd.bits() == 0 {
1268 Err(ExecutionError::IllegalInstruction {
1269 address: state
1270 .instruction_fetcher
1271 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1272 })?;
1273 }
1274 let sew = vtype.vsew();
1275 let vl = state.ext_state.vl();
1276 let vstart = u32::from(state.ext_state.vstart());
1277 let scalar = state.regs.read(rs1).as_u64();
1278 unsafe {
1280 zve64x_arith_helpers::execute_arith_op(
1281 state,
1282 vd,
1283 vs2,
1284 zve64x_arith_helpers::OpSrc::Scalar(scalar),
1285 vm,
1286 vl,
1287 vstart,
1288 sew,
1289 |a, b, sew| {
1290 let mask = zve64x_arith_helpers::sew_mask(sew);
1291 if a & mask <= b & mask { a } else { b }
1292 },
1293 );
1294 }
1295 }
1296 Self::VminVv { vd, vs2, vs1, vm } => {
1297 if !state.ext_state.vector_instructions_allowed() {
1298 Err(ExecutionError::IllegalInstruction {
1299 address: state
1300 .instruction_fetcher
1301 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1302 })?;
1303 }
1304 let vtype = state
1305 .ext_state
1306 .vtype()
1307 .ok_or(ExecutionError::IllegalInstruction {
1308 address: state
1309 .instruction_fetcher
1310 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1311 })?;
1312 let group_regs = vtype.vlmul().register_count();
1313 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1314 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1315 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1316 if !vm && vd.bits() == 0 {
1317 Err(ExecutionError::IllegalInstruction {
1318 address: state
1319 .instruction_fetcher
1320 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1321 })?;
1322 }
1323 let sew = vtype.vsew();
1324 let vl = state.ext_state.vl();
1325 let vstart = u32::from(state.ext_state.vstart());
1326 unsafe {
1328 zve64x_arith_helpers::execute_arith_op(
1329 state,
1330 vd,
1331 vs2,
1332 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
1333 vm,
1334 vl,
1335 vstart,
1336 sew,
1337 |a, b, sew| {
1338 if zve64x_arith_helpers::sign_extend(a, sew)
1339 <= zve64x_arith_helpers::sign_extend(b, sew)
1340 {
1341 a
1342 } else {
1343 b
1344 }
1345 },
1346 );
1347 }
1348 }
1349 Self::VminVx { vd, vs2, rs1, vm } => {
1350 if !state.ext_state.vector_instructions_allowed() {
1351 Err(ExecutionError::IllegalInstruction {
1352 address: state
1353 .instruction_fetcher
1354 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1355 })?;
1356 }
1357 let vtype = state
1358 .ext_state
1359 .vtype()
1360 .ok_or(ExecutionError::IllegalInstruction {
1361 address: state
1362 .instruction_fetcher
1363 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1364 })?;
1365 let group_regs = vtype.vlmul().register_count();
1366 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1367 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1368 if !vm && vd.bits() == 0 {
1369 Err(ExecutionError::IllegalInstruction {
1370 address: state
1371 .instruction_fetcher
1372 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1373 })?;
1374 }
1375 let sew = vtype.vsew();
1376 let vl = state.ext_state.vl();
1377 let vstart = u32::from(state.ext_state.vstart());
1378 let scalar = state.regs.read(rs1).as_u64();
1379 unsafe {
1381 zve64x_arith_helpers::execute_arith_op(
1382 state,
1383 vd,
1384 vs2,
1385 zve64x_arith_helpers::OpSrc::Scalar(scalar),
1386 vm,
1387 vl,
1388 vstart,
1389 sew,
1390 |a, b, sew| {
1391 if zve64x_arith_helpers::sign_extend(a, sew)
1392 <= zve64x_arith_helpers::sign_extend(b, sew)
1393 {
1394 a
1395 } else {
1396 b
1397 }
1398 },
1399 );
1400 }
1401 }
1402 Self::VmaxuVv { vd, vs2, vs1, vm } => {
1404 if !state.ext_state.vector_instructions_allowed() {
1405 Err(ExecutionError::IllegalInstruction {
1406 address: state
1407 .instruction_fetcher
1408 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1409 })?;
1410 }
1411 let vtype = state
1412 .ext_state
1413 .vtype()
1414 .ok_or(ExecutionError::IllegalInstruction {
1415 address: state
1416 .instruction_fetcher
1417 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1418 })?;
1419 let group_regs = vtype.vlmul().register_count();
1420 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1421 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1422 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1423 if !vm && vd.bits() == 0 {
1424 Err(ExecutionError::IllegalInstruction {
1425 address: state
1426 .instruction_fetcher
1427 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1428 })?;
1429 }
1430 let sew = vtype.vsew();
1431 let vl = state.ext_state.vl();
1432 let vstart = u32::from(state.ext_state.vstart());
1433 unsafe {
1435 zve64x_arith_helpers::execute_arith_op(
1436 state,
1437 vd,
1438 vs2,
1439 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
1440 vm,
1441 vl,
1442 vstart,
1443 sew,
1444 |a, b, sew| {
1445 let mask = zve64x_arith_helpers::sew_mask(sew);
1446 if a & mask >= b & mask { a } else { b }
1447 },
1448 );
1449 }
1450 }
1451 Self::VmaxuVx { vd, vs2, rs1, vm } => {
1452 if !state.ext_state.vector_instructions_allowed() {
1453 Err(ExecutionError::IllegalInstruction {
1454 address: state
1455 .instruction_fetcher
1456 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1457 })?;
1458 }
1459 let vtype = state
1460 .ext_state
1461 .vtype()
1462 .ok_or(ExecutionError::IllegalInstruction {
1463 address: state
1464 .instruction_fetcher
1465 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1466 })?;
1467 let group_regs = vtype.vlmul().register_count();
1468 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1469 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1470 if !vm && vd.bits() == 0 {
1471 Err(ExecutionError::IllegalInstruction {
1472 address: state
1473 .instruction_fetcher
1474 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1475 })?;
1476 }
1477 let sew = vtype.vsew();
1478 let vl = state.ext_state.vl();
1479 let vstart = u32::from(state.ext_state.vstart());
1480 let scalar = state.regs.read(rs1).as_u64();
1481 unsafe {
1483 zve64x_arith_helpers::execute_arith_op(
1484 state,
1485 vd,
1486 vs2,
1487 zve64x_arith_helpers::OpSrc::Scalar(scalar),
1488 vm,
1489 vl,
1490 vstart,
1491 sew,
1492 |a, b, sew| {
1493 let mask = zve64x_arith_helpers::sew_mask(sew);
1494 if a & mask >= b & mask { a } else { b }
1495 },
1496 );
1497 }
1498 }
1499 Self::VmaxVv { vd, vs2, vs1, vm } => {
1500 if !state.ext_state.vector_instructions_allowed() {
1501 Err(ExecutionError::IllegalInstruction {
1502 address: state
1503 .instruction_fetcher
1504 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1505 })?;
1506 }
1507 let vtype = state
1508 .ext_state
1509 .vtype()
1510 .ok_or(ExecutionError::IllegalInstruction {
1511 address: state
1512 .instruction_fetcher
1513 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1514 })?;
1515 let group_regs = vtype.vlmul().register_count();
1516 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1517 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1518 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1519 if !vm && vd.bits() == 0 {
1520 Err(ExecutionError::IllegalInstruction {
1521 address: state
1522 .instruction_fetcher
1523 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1524 })?;
1525 }
1526 let sew = vtype.vsew();
1527 let vl = state.ext_state.vl();
1528 let vstart = u32::from(state.ext_state.vstart());
1529 unsafe {
1531 zve64x_arith_helpers::execute_arith_op(
1532 state,
1533 vd,
1534 vs2,
1535 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
1536 vm,
1537 vl,
1538 vstart,
1539 sew,
1540 |a, b, sew| {
1541 if zve64x_arith_helpers::sign_extend(a, sew)
1542 >= zve64x_arith_helpers::sign_extend(b, sew)
1543 {
1544 a
1545 } else {
1546 b
1547 }
1548 },
1549 );
1550 }
1551 }
1552 Self::VmaxVx { vd, vs2, rs1, vm } => {
1553 if !state.ext_state.vector_instructions_allowed() {
1554 Err(ExecutionError::IllegalInstruction {
1555 address: state
1556 .instruction_fetcher
1557 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1558 })?;
1559 }
1560 let vtype = state
1561 .ext_state
1562 .vtype()
1563 .ok_or(ExecutionError::IllegalInstruction {
1564 address: state
1565 .instruction_fetcher
1566 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1567 })?;
1568 let group_regs = vtype.vlmul().register_count();
1569 zve64x_arith_helpers::check_vreg_group_alignment(state, vd, group_regs)?;
1570 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1571 if !vm && vd.bits() == 0 {
1572 Err(ExecutionError::IllegalInstruction {
1573 address: state
1574 .instruction_fetcher
1575 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1576 })?;
1577 }
1578 let sew = vtype.vsew();
1579 let vl = state.ext_state.vl();
1580 let vstart = u32::from(state.ext_state.vstart());
1581 let scalar = state.regs.read(rs1).as_u64();
1582 unsafe {
1584 zve64x_arith_helpers::execute_arith_op(
1585 state,
1586 vd,
1587 vs2,
1588 zve64x_arith_helpers::OpSrc::Scalar(scalar),
1589 vm,
1590 vl,
1591 vstart,
1592 sew,
1593 |a, b, sew| {
1594 if zve64x_arith_helpers::sign_extend(a, sew)
1595 >= zve64x_arith_helpers::sign_extend(b, sew)
1596 {
1597 a
1598 } else {
1599 b
1600 }
1601 },
1602 );
1603 }
1604 }
1605 Self::VmseqVv { vd, vs2, vs1, vm } => {
1607 if !state.ext_state.vector_instructions_allowed() {
1608 Err(ExecutionError::IllegalInstruction {
1609 address: state
1610 .instruction_fetcher
1611 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1612 })?;
1613 }
1614 let vtype = state
1615 .ext_state
1616 .vtype()
1617 .ok_or(ExecutionError::IllegalInstruction {
1618 address: state
1619 .instruction_fetcher
1620 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1621 })?;
1622 let group_regs = vtype.vlmul().register_count();
1623 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1624 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1625 let sew = vtype.vsew();
1626 let vl = state.ext_state.vl();
1627 let vstart = u32::from(state.ext_state.vstart());
1628 unsafe {
1632 zve64x_arith_helpers::execute_compare_op(
1633 state,
1634 vd,
1635 vs2,
1636 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
1637 vm,
1638 vl,
1639 vstart,
1640 sew,
1641 |a, b, sew| {
1642 (a & zve64x_arith_helpers::sew_mask(sew))
1643 == (b & zve64x_arith_helpers::sew_mask(sew))
1644 },
1645 );
1646 }
1647 }
1648 Self::VmseqVx { vd, vs2, rs1, vm } => {
1649 if !state.ext_state.vector_instructions_allowed() {
1650 Err(ExecutionError::IllegalInstruction {
1651 address: state
1652 .instruction_fetcher
1653 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1654 })?;
1655 }
1656 let vtype = state
1657 .ext_state
1658 .vtype()
1659 .ok_or(ExecutionError::IllegalInstruction {
1660 address: state
1661 .instruction_fetcher
1662 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1663 })?;
1664 let group_regs = vtype.vlmul().register_count();
1665 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1666 let sew = vtype.vsew();
1667 let vl = state.ext_state.vl();
1668 let vstart = u32::from(state.ext_state.vstart());
1669 let scalar = state.regs.read(rs1).as_u64();
1670 unsafe {
1672 zve64x_arith_helpers::execute_compare_op(
1673 state,
1674 vd,
1675 vs2,
1676 zve64x_arith_helpers::OpSrc::Scalar(scalar),
1677 vm,
1678 vl,
1679 vstart,
1680 sew,
1681 |a, b, sew| {
1682 (a & zve64x_arith_helpers::sew_mask(sew))
1683 == (b & zve64x_arith_helpers::sew_mask(sew))
1684 },
1685 );
1686 }
1687 }
1688 Self::VmseqVi { vd, vs2, imm, vm } => {
1689 if !state.ext_state.vector_instructions_allowed() {
1690 Err(ExecutionError::IllegalInstruction {
1691 address: state
1692 .instruction_fetcher
1693 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1694 })?;
1695 }
1696 let vtype = state
1697 .ext_state
1698 .vtype()
1699 .ok_or(ExecutionError::IllegalInstruction {
1700 address: state
1701 .instruction_fetcher
1702 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1703 })?;
1704 let group_regs = vtype.vlmul().register_count();
1705 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1706 let sew = vtype.vsew();
1707 let vl = state.ext_state.vl();
1708 let vstart = u32::from(state.ext_state.vstart());
1709 let scalar = i64::from(imm).cast_unsigned();
1710 unsafe {
1712 zve64x_arith_helpers::execute_compare_op(
1713 state,
1714 vd,
1715 vs2,
1716 zve64x_arith_helpers::OpSrc::Scalar(scalar),
1717 vm,
1718 vl,
1719 vstart,
1720 sew,
1721 |a, b, sew| {
1722 (a & zve64x_arith_helpers::sew_mask(sew))
1723 == (b & zve64x_arith_helpers::sew_mask(sew))
1724 },
1725 );
1726 }
1727 }
1728 Self::VmsneVv { vd, vs2, vs1, vm } => {
1730 if !state.ext_state.vector_instructions_allowed() {
1731 Err(ExecutionError::IllegalInstruction {
1732 address: state
1733 .instruction_fetcher
1734 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1735 })?;
1736 }
1737 let vtype = state
1738 .ext_state
1739 .vtype()
1740 .ok_or(ExecutionError::IllegalInstruction {
1741 address: state
1742 .instruction_fetcher
1743 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1744 })?;
1745 let group_regs = vtype.vlmul().register_count();
1746 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1747 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1748 let sew = vtype.vsew();
1749 let vl = state.ext_state.vl();
1750 let vstart = u32::from(state.ext_state.vstart());
1751 unsafe {
1753 zve64x_arith_helpers::execute_compare_op(
1754 state,
1755 vd,
1756 vs2,
1757 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
1758 vm,
1759 vl,
1760 vstart,
1761 sew,
1762 |a, b, sew| {
1763 (a & zve64x_arith_helpers::sew_mask(sew))
1764 != (b & zve64x_arith_helpers::sew_mask(sew))
1765 },
1766 );
1767 }
1768 }
1769 Self::VmsneVx { vd, vs2, rs1, vm } => {
1770 if !state.ext_state.vector_instructions_allowed() {
1771 Err(ExecutionError::IllegalInstruction {
1772 address: state
1773 .instruction_fetcher
1774 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1775 })?;
1776 }
1777 let vtype = state
1778 .ext_state
1779 .vtype()
1780 .ok_or(ExecutionError::IllegalInstruction {
1781 address: state
1782 .instruction_fetcher
1783 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1784 })?;
1785 let group_regs = vtype.vlmul().register_count();
1786 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1787 let sew = vtype.vsew();
1788 let vl = state.ext_state.vl();
1789 let vstart = u32::from(state.ext_state.vstart());
1790 let scalar = state.regs.read(rs1).as_u64();
1791 unsafe {
1793 zve64x_arith_helpers::execute_compare_op(
1794 state,
1795 vd,
1796 vs2,
1797 zve64x_arith_helpers::OpSrc::Scalar(scalar),
1798 vm,
1799 vl,
1800 vstart,
1801 sew,
1802 |a, b, sew| {
1803 (a & zve64x_arith_helpers::sew_mask(sew))
1804 != (b & zve64x_arith_helpers::sew_mask(sew))
1805 },
1806 );
1807 }
1808 }
1809 Self::VmsneVi { vd, vs2, imm, vm } => {
1810 if !state.ext_state.vector_instructions_allowed() {
1811 Err(ExecutionError::IllegalInstruction {
1812 address: state
1813 .instruction_fetcher
1814 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1815 })?;
1816 }
1817 let vtype = state
1818 .ext_state
1819 .vtype()
1820 .ok_or(ExecutionError::IllegalInstruction {
1821 address: state
1822 .instruction_fetcher
1823 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1824 })?;
1825 let group_regs = vtype.vlmul().register_count();
1826 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1827 let sew = vtype.vsew();
1828 let vl = state.ext_state.vl();
1829 let vstart = u32::from(state.ext_state.vstart());
1830 let scalar = i64::from(imm).cast_unsigned();
1831 unsafe {
1833 zve64x_arith_helpers::execute_compare_op(
1834 state,
1835 vd,
1836 vs2,
1837 zve64x_arith_helpers::OpSrc::Scalar(scalar),
1838 vm,
1839 vl,
1840 vstart,
1841 sew,
1842 |a, b, sew| {
1843 (a & zve64x_arith_helpers::sew_mask(sew))
1844 != (b & zve64x_arith_helpers::sew_mask(sew))
1845 },
1846 );
1847 }
1848 }
1849 Self::VmsltuVv { vd, vs2, vs1, vm } => {
1851 if !state.ext_state.vector_instructions_allowed() {
1852 Err(ExecutionError::IllegalInstruction {
1853 address: state
1854 .instruction_fetcher
1855 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1856 })?;
1857 }
1858 let vtype = state
1859 .ext_state
1860 .vtype()
1861 .ok_or(ExecutionError::IllegalInstruction {
1862 address: state
1863 .instruction_fetcher
1864 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1865 })?;
1866 let group_regs = vtype.vlmul().register_count();
1867 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1868 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1869 let sew = vtype.vsew();
1870 let vl = state.ext_state.vl();
1871 let vstart = u32::from(state.ext_state.vstart());
1872 unsafe {
1874 zve64x_arith_helpers::execute_compare_op(
1875 state,
1876 vd,
1877 vs2,
1878 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
1879 vm,
1880 vl,
1881 vstart,
1882 sew,
1883 |a, b, sew| {
1884 (a & zve64x_arith_helpers::sew_mask(sew))
1885 < (b & zve64x_arith_helpers::sew_mask(sew))
1886 },
1887 );
1888 }
1889 }
1890 Self::VmsltuVx { vd, vs2, rs1, vm } => {
1891 if !state.ext_state.vector_instructions_allowed() {
1892 Err(ExecutionError::IllegalInstruction {
1893 address: state
1894 .instruction_fetcher
1895 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1896 })?;
1897 }
1898 let vtype = state
1899 .ext_state
1900 .vtype()
1901 .ok_or(ExecutionError::IllegalInstruction {
1902 address: state
1903 .instruction_fetcher
1904 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1905 })?;
1906 let group_regs = vtype.vlmul().register_count();
1907 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1908 let sew = vtype.vsew();
1909 let vl = state.ext_state.vl();
1910 let vstart = u32::from(state.ext_state.vstart());
1911 let scalar = state.regs.read(rs1).as_u64();
1912 unsafe {
1914 zve64x_arith_helpers::execute_compare_op(
1915 state,
1916 vd,
1917 vs2,
1918 zve64x_arith_helpers::OpSrc::Scalar(scalar),
1919 vm,
1920 vl,
1921 vstart,
1922 sew,
1923 |a, b, sew| {
1924 (a & zve64x_arith_helpers::sew_mask(sew))
1925 < (b & zve64x_arith_helpers::sew_mask(sew))
1926 },
1927 );
1928 }
1929 }
1930 Self::VmsltVv { vd, vs2, vs1, vm } => {
1932 if !state.ext_state.vector_instructions_allowed() {
1933 Err(ExecutionError::IllegalInstruction {
1934 address: state
1935 .instruction_fetcher
1936 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1937 })?;
1938 }
1939 let vtype = state
1940 .ext_state
1941 .vtype()
1942 .ok_or(ExecutionError::IllegalInstruction {
1943 address: state
1944 .instruction_fetcher
1945 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1946 })?;
1947 let group_regs = vtype.vlmul().register_count();
1948 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1949 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
1950 let sew = vtype.vsew();
1951 let vl = state.ext_state.vl();
1952 let vstart = u32::from(state.ext_state.vstart());
1953 unsafe {
1955 zve64x_arith_helpers::execute_compare_op(
1956 state,
1957 vd,
1958 vs2,
1959 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
1960 vm,
1961 vl,
1962 vstart,
1963 sew,
1964 |a, b, sew| {
1965 zve64x_arith_helpers::sign_extend(a, sew)
1966 < zve64x_arith_helpers::sign_extend(b, sew)
1967 },
1968 );
1969 }
1970 }
1971 Self::VmsltVx { vd, vs2, rs1, vm } => {
1972 if !state.ext_state.vector_instructions_allowed() {
1973 Err(ExecutionError::IllegalInstruction {
1974 address: state
1975 .instruction_fetcher
1976 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1977 })?;
1978 }
1979 let vtype = state
1980 .ext_state
1981 .vtype()
1982 .ok_or(ExecutionError::IllegalInstruction {
1983 address: state
1984 .instruction_fetcher
1985 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
1986 })?;
1987 let group_regs = vtype.vlmul().register_count();
1988 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
1989 let sew = vtype.vsew();
1990 let vl = state.ext_state.vl();
1991 let vstart = u32::from(state.ext_state.vstart());
1992 let scalar = state.regs.read(rs1).as_u64();
1993 unsafe {
1995 zve64x_arith_helpers::execute_compare_op(
1996 state,
1997 vd,
1998 vs2,
1999 zve64x_arith_helpers::OpSrc::Scalar(scalar),
2000 vm,
2001 vl,
2002 vstart,
2003 sew,
2004 |a, b, sew| {
2005 zve64x_arith_helpers::sign_extend(a, sew)
2006 < zve64x_arith_helpers::sign_extend(b, sew)
2007 },
2008 );
2009 }
2010 }
2011 Self::VmsleuVv { vd, vs2, vs1, vm } => {
2013 if !state.ext_state.vector_instructions_allowed() {
2014 Err(ExecutionError::IllegalInstruction {
2015 address: state
2016 .instruction_fetcher
2017 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2018 })?;
2019 }
2020 let vtype = state
2021 .ext_state
2022 .vtype()
2023 .ok_or(ExecutionError::IllegalInstruction {
2024 address: state
2025 .instruction_fetcher
2026 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2027 })?;
2028 let group_regs = vtype.vlmul().register_count();
2029 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
2030 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
2031 let sew = vtype.vsew();
2032 let vl = state.ext_state.vl();
2033 let vstart = u32::from(state.ext_state.vstart());
2034 unsafe {
2036 zve64x_arith_helpers::execute_compare_op(
2037 state,
2038 vd,
2039 vs2,
2040 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
2041 vm,
2042 vl,
2043 vstart,
2044 sew,
2045 |a, b, sew| {
2046 (a & zve64x_arith_helpers::sew_mask(sew))
2047 <= (b & zve64x_arith_helpers::sew_mask(sew))
2048 },
2049 );
2050 }
2051 }
2052 Self::VmsleuVx { vd, vs2, rs1, vm } => {
2053 if !state.ext_state.vector_instructions_allowed() {
2054 Err(ExecutionError::IllegalInstruction {
2055 address: state
2056 .instruction_fetcher
2057 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2058 })?;
2059 }
2060 let vtype = state
2061 .ext_state
2062 .vtype()
2063 .ok_or(ExecutionError::IllegalInstruction {
2064 address: state
2065 .instruction_fetcher
2066 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2067 })?;
2068 let group_regs = vtype.vlmul().register_count();
2069 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
2070 let sew = vtype.vsew();
2071 let vl = state.ext_state.vl();
2072 let vstart = u32::from(state.ext_state.vstart());
2073 let scalar = state.regs.read(rs1).as_u64();
2074 unsafe {
2076 zve64x_arith_helpers::execute_compare_op(
2077 state,
2078 vd,
2079 vs2,
2080 zve64x_arith_helpers::OpSrc::Scalar(scalar),
2081 vm,
2082 vl,
2083 vstart,
2084 sew,
2085 |a, b, sew| {
2086 (a & zve64x_arith_helpers::sew_mask(sew))
2087 <= (b & zve64x_arith_helpers::sew_mask(sew))
2088 },
2089 );
2090 }
2091 }
2092 Self::VmsleuVi { vd, vs2, imm, vm } => {
2093 if !state.ext_state.vector_instructions_allowed() {
2094 Err(ExecutionError::IllegalInstruction {
2095 address: state
2096 .instruction_fetcher
2097 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2098 })?;
2099 }
2100 let vtype = state
2101 .ext_state
2102 .vtype()
2103 .ok_or(ExecutionError::IllegalInstruction {
2104 address: state
2105 .instruction_fetcher
2106 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2107 })?;
2108 let group_regs = vtype.vlmul().register_count();
2109 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
2110 let sew = vtype.vsew();
2111 let vl = state.ext_state.vl();
2112 let vstart = u32::from(state.ext_state.vstart());
2113 let scalar = i64::from(imm).cast_unsigned();
2121 unsafe {
2123 zve64x_arith_helpers::execute_compare_op(
2124 state,
2125 vd,
2126 vs2,
2127 zve64x_arith_helpers::OpSrc::Scalar(scalar),
2128 vm,
2129 vl,
2130 vstart,
2131 sew,
2132 |a, b, sew| {
2133 (a & zve64x_arith_helpers::sew_mask(sew))
2134 <= (b & zve64x_arith_helpers::sew_mask(sew))
2135 },
2136 );
2137 }
2138 }
2139 Self::VmsleVv { vd, vs2, vs1, vm } => {
2141 if !state.ext_state.vector_instructions_allowed() {
2142 Err(ExecutionError::IllegalInstruction {
2143 address: state
2144 .instruction_fetcher
2145 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2146 })?;
2147 }
2148 let vtype = state
2149 .ext_state
2150 .vtype()
2151 .ok_or(ExecutionError::IllegalInstruction {
2152 address: state
2153 .instruction_fetcher
2154 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2155 })?;
2156 let group_regs = vtype.vlmul().register_count();
2157 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
2158 zve64x_arith_helpers::check_vreg_group_alignment(state, vs1, group_regs)?;
2159 let sew = vtype.vsew();
2160 let vl = state.ext_state.vl();
2161 let vstart = u32::from(state.ext_state.vstart());
2162 unsafe {
2164 zve64x_arith_helpers::execute_compare_op(
2165 state,
2166 vd,
2167 vs2,
2168 zve64x_arith_helpers::OpSrc::Vreg(vs1.bits()),
2169 vm,
2170 vl,
2171 vstart,
2172 sew,
2173 |a, b, sew| {
2174 zve64x_arith_helpers::sign_extend(a, sew)
2175 <= zve64x_arith_helpers::sign_extend(b, sew)
2176 },
2177 );
2178 }
2179 }
2180 Self::VmsleVx { vd, vs2, rs1, vm } => {
2181 if !state.ext_state.vector_instructions_allowed() {
2182 Err(ExecutionError::IllegalInstruction {
2183 address: state
2184 .instruction_fetcher
2185 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2186 })?;
2187 }
2188 let vtype = state
2189 .ext_state
2190 .vtype()
2191 .ok_or(ExecutionError::IllegalInstruction {
2192 address: state
2193 .instruction_fetcher
2194 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2195 })?;
2196 let group_regs = vtype.vlmul().register_count();
2197 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
2198 let sew = vtype.vsew();
2199 let vl = state.ext_state.vl();
2200 let vstart = u32::from(state.ext_state.vstart());
2201 let scalar = state.regs.read(rs1).as_u64();
2202 unsafe {
2204 zve64x_arith_helpers::execute_compare_op(
2205 state,
2206 vd,
2207 vs2,
2208 zve64x_arith_helpers::OpSrc::Scalar(scalar),
2209 vm,
2210 vl,
2211 vstart,
2212 sew,
2213 |a, b, sew| {
2214 zve64x_arith_helpers::sign_extend(a, sew)
2215 <= zve64x_arith_helpers::sign_extend(b, sew)
2216 },
2217 );
2218 }
2219 }
2220 Self::VmsleVi { vd, vs2, imm, vm } => {
2221 if !state.ext_state.vector_instructions_allowed() {
2222 Err(ExecutionError::IllegalInstruction {
2223 address: state
2224 .instruction_fetcher
2225 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2226 })?;
2227 }
2228 let vtype = state
2229 .ext_state
2230 .vtype()
2231 .ok_or(ExecutionError::IllegalInstruction {
2232 address: state
2233 .instruction_fetcher
2234 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2235 })?;
2236 let group_regs = vtype.vlmul().register_count();
2237 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
2238 let sew = vtype.vsew();
2239 let vl = state.ext_state.vl();
2240 let vstart = u32::from(state.ext_state.vstart());
2241 let scalar = i64::from(imm).cast_unsigned();
2242 unsafe {
2244 zve64x_arith_helpers::execute_compare_op(
2245 state,
2246 vd,
2247 vs2,
2248 zve64x_arith_helpers::OpSrc::Scalar(scalar),
2249 vm,
2250 vl,
2251 vstart,
2252 sew,
2253 |a, b, sew| {
2254 zve64x_arith_helpers::sign_extend(a, sew)
2255 <= zve64x_arith_helpers::sign_extend(b, sew)
2256 },
2257 );
2258 }
2259 }
2260 Self::VmsgtuVx { vd, vs2, rs1, vm } => {
2262 if !state.ext_state.vector_instructions_allowed() {
2263 Err(ExecutionError::IllegalInstruction {
2264 address: state
2265 .instruction_fetcher
2266 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2267 })?;
2268 }
2269 let vtype = state
2270 .ext_state
2271 .vtype()
2272 .ok_or(ExecutionError::IllegalInstruction {
2273 address: state
2274 .instruction_fetcher
2275 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2276 })?;
2277 let group_regs = vtype.vlmul().register_count();
2278 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
2279 let sew = vtype.vsew();
2280 let vl = state.ext_state.vl();
2281 let vstart = u32::from(state.ext_state.vstart());
2282 let scalar = state.regs.read(rs1).as_u64();
2283 unsafe {
2285 zve64x_arith_helpers::execute_compare_op(
2286 state,
2287 vd,
2288 vs2,
2289 zve64x_arith_helpers::OpSrc::Scalar(scalar),
2290 vm,
2291 vl,
2292 vstart,
2293 sew,
2294 |a, b, sew| {
2295 (a & zve64x_arith_helpers::sew_mask(sew))
2296 > (b & zve64x_arith_helpers::sew_mask(sew))
2297 },
2298 );
2299 }
2300 }
2301 Self::VmsgtuVi { vd, vs2, imm, vm } => {
2302 if !state.ext_state.vector_instructions_allowed() {
2303 Err(ExecutionError::IllegalInstruction {
2304 address: state
2305 .instruction_fetcher
2306 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2307 })?;
2308 }
2309 let vtype = state
2310 .ext_state
2311 .vtype()
2312 .ok_or(ExecutionError::IllegalInstruction {
2313 address: state
2314 .instruction_fetcher
2315 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2316 })?;
2317 let group_regs = vtype.vlmul().register_count();
2318 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
2319 let sew = vtype.vsew();
2320 let vl = state.ext_state.vl();
2321 let vstart = u32::from(state.ext_state.vstart());
2322 let scalar = i64::from(imm).cast_unsigned();
2323 unsafe {
2325 zve64x_arith_helpers::execute_compare_op(
2326 state,
2327 vd,
2328 vs2,
2329 zve64x_arith_helpers::OpSrc::Scalar(scalar),
2330 vm,
2331 vl,
2332 vstart,
2333 sew,
2334 |a, b, sew| {
2335 (a & zve64x_arith_helpers::sew_mask(sew))
2336 > (b & zve64x_arith_helpers::sew_mask(sew))
2337 },
2338 );
2339 }
2340 }
2341 Self::VmsgtVx { vd, vs2, rs1, vm } => {
2343 if !state.ext_state.vector_instructions_allowed() {
2344 Err(ExecutionError::IllegalInstruction {
2345 address: state
2346 .instruction_fetcher
2347 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2348 })?;
2349 }
2350 let vtype = state
2351 .ext_state
2352 .vtype()
2353 .ok_or(ExecutionError::IllegalInstruction {
2354 address: state
2355 .instruction_fetcher
2356 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2357 })?;
2358 let group_regs = vtype.vlmul().register_count();
2359 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
2360 let sew = vtype.vsew();
2361 let vl = state.ext_state.vl();
2362 let vstart = u32::from(state.ext_state.vstart());
2363 let scalar = state.regs.read(rs1).as_u64();
2364 unsafe {
2366 zve64x_arith_helpers::execute_compare_op(
2367 state,
2368 vd,
2369 vs2,
2370 zve64x_arith_helpers::OpSrc::Scalar(scalar),
2371 vm,
2372 vl,
2373 vstart,
2374 sew,
2375 |a, b, sew| {
2376 zve64x_arith_helpers::sign_extend(a, sew)
2377 > zve64x_arith_helpers::sign_extend(b, sew)
2378 },
2379 );
2380 }
2381 }
2382 Self::VmsgtVi { vd, vs2, imm, vm } => {
2383 if !state.ext_state.vector_instructions_allowed() {
2384 Err(ExecutionError::IllegalInstruction {
2385 address: state
2386 .instruction_fetcher
2387 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2388 })?;
2389 }
2390 let vtype = state
2391 .ext_state
2392 .vtype()
2393 .ok_or(ExecutionError::IllegalInstruction {
2394 address: state
2395 .instruction_fetcher
2396 .old_pc(zve64x_helpers::INSTRUCTION_SIZE),
2397 })?;
2398 let group_regs = vtype.vlmul().register_count();
2399 zve64x_arith_helpers::check_vreg_group_alignment(state, vs2, group_regs)?;
2400 let sew = vtype.vsew();
2401 let vl = state.ext_state.vl();
2402 let vstart = u32::from(state.ext_state.vstart());
2403 let scalar = i64::from(imm).cast_unsigned();
2404 unsafe {
2406 zve64x_arith_helpers::execute_compare_op(
2407 state,
2408 vd,
2409 vs2,
2410 zve64x_arith_helpers::OpSrc::Scalar(scalar),
2411 vm,
2412 vl,
2413 vstart,
2414 sew,
2415 |a, b, sew| {
2416 zve64x_arith_helpers::sign_extend(a, sew)
2417 > zve64x_arith_helpers::sign_extend(b, sew)
2418 },
2419 );
2420 }
2421 }
2422 }
2423
2424 Ok(ControlFlow::Continue(()))
2425 }
2426}