pub enum Rv64Zve64xInstruction<Reg> {
Show 217 variants
Vsetvli {
rd: Reg,
rs1: Reg,
vtypei: u16,
},
Vsetivli {
rd: Reg,
uimm: u8,
vtypei: u16,
},
Vsetvl {
rd: Reg,
rs1: Reg,
rs2: Reg,
},
Vle {
vd: VReg,
rs1: Reg,
vm: bool,
eew: Eew,
},
Vleff {
vd: VReg,
rs1: Reg,
vm: bool,
eew: Eew,
},
Vlm {
vd: VReg,
rs1: Reg,
},
Vlse {
vd: VReg,
rs1: Reg,
rs2: Reg,
vm: bool,
eew: Eew,
},
Vluxei {
vd: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
eew: Eew,
},
Vloxei {
vd: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
eew: Eew,
},
Vlr {
vd: VReg,
rs1: Reg,
nreg: u8,
eew: Eew,
},
Vlseg {
vd: VReg,
rs1: Reg,
vm: bool,
eew: Eew,
nf: u8,
},
Vlsegff {
vd: VReg,
rs1: Reg,
vm: bool,
eew: Eew,
nf: u8,
},
Vlsseg {
vd: VReg,
rs1: Reg,
rs2: Reg,
vm: bool,
eew: Eew,
nf: u8,
},
Vluxseg {
vd: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
eew: Eew,
nf: u8,
},
Vloxseg {
vd: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
eew: Eew,
nf: u8,
},
Vse {
vs3: VReg,
rs1: Reg,
vm: bool,
eew: Eew,
},
Vsm {
vs3: VReg,
rs1: Reg,
},
Vsse {
vs3: VReg,
rs1: Reg,
rs2: Reg,
vm: bool,
eew: Eew,
},
Vsuxei {
vs3: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
eew: Eew,
},
Vsoxei {
vs3: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
eew: Eew,
},
Vsr {
vs3: VReg,
rs1: Reg,
nreg: u8,
},
Vsseg {
vs3: VReg,
rs1: Reg,
vm: bool,
eew: Eew,
nf: u8,
},
Vssseg {
vs3: VReg,
rs1: Reg,
rs2: Reg,
vm: bool,
eew: Eew,
nf: u8,
},
Vsuxseg {
vs3: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
eew: Eew,
nf: u8,
},
Vsoxseg {
vs3: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
eew: Eew,
nf: u8,
},
VaddVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VaddVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VaddVi {
vd: VReg,
vs2: VReg,
imm: i8,
vm: bool,
},
VsubVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VsubVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VrsubVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VrsubVi {
vd: VReg,
vs2: VReg,
imm: i8,
vm: bool,
},
VandVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VandVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VandVi {
vd: VReg,
vs2: VReg,
imm: i8,
vm: bool,
},
VorVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VorVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VorVi {
vd: VReg,
vs2: VReg,
imm: i8,
vm: bool,
},
VxorVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VxorVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VxorVi {
vd: VReg,
vs2: VReg,
imm: i8,
vm: bool,
},
VsllVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VsllVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VsllVi {
vd: VReg,
vs2: VReg,
uimm: u8,
vm: bool,
},
VsrlVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VsrlVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VsrlVi {
vd: VReg,
vs2: VReg,
uimm: u8,
vm: bool,
},
VsraVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VsraVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VsraVi {
vd: VReg,
vs2: VReg,
uimm: u8,
vm: bool,
},
VminuVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VminuVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VminVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VminVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VmaxuVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VmaxuVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VmaxVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VmaxVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VmseqVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VmseqVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VmseqVi {
vd: VReg,
vs2: VReg,
imm: i8,
vm: bool,
},
VmsneVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VmsneVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VmsneVi {
vd: VReg,
vs2: VReg,
imm: i8,
vm: bool,
},
VmsltuVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VmsltuVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VmsltVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VmsltVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VmsleuVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VmsleuVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VmsleuVi {
vd: VReg,
vs2: VReg,
imm: i8,
vm: bool,
},
VmsleVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VmsleVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VmsleVi {
vd: VReg,
vs2: VReg,
imm: i8,
vm: bool,
},
VmsgtuVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VmsgtuVi {
vd: VReg,
vs2: VReg,
imm: i8,
vm: bool,
},
VmsgtVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VmsgtVi {
vd: VReg,
vs2: VReg,
imm: i8,
vm: bool,
},
VmulVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VmulVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VmulhVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VmulhVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VmulhuVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VmulhuVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VmulhsuVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VmulhsuVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VdivuVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VdivuVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VdivVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VdivVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VremuVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VremuVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VremVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VremVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VwmulVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VwmulVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VwmuluVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VwmuluVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VwmulsuVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VwmulsuVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VmaccVv {
vd: VReg,
vs1: VReg,
vs2: VReg,
vm: bool,
},
VmaccVx {
vd: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
},
VnmsacVv {
vd: VReg,
vs1: VReg,
vs2: VReg,
vm: bool,
},
VnmsacVx {
vd: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
},
VmaddVv {
vd: VReg,
vs1: VReg,
vs2: VReg,
vm: bool,
},
VmaddVx {
vd: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
},
VnmsubVv {
vd: VReg,
vs1: VReg,
vs2: VReg,
vm: bool,
},
VnmsubVx {
vd: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
},
VwmaccuVv {
vd: VReg,
vs1: VReg,
vs2: VReg,
vm: bool,
},
VwmaccuVx {
vd: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
},
VwmaccVv {
vd: VReg,
vs1: VReg,
vs2: VReg,
vm: bool,
},
VwmaccVx {
vd: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
},
VwmaccsuVv {
vd: VReg,
vs1: VReg,
vs2: VReg,
vm: bool,
},
VwmaccsuVx {
vd: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
},
VwmaccusVx {
vd: VReg,
rs1: Reg,
vs2: VReg,
vm: bool,
},
VwadduVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VwadduVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VwaddVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VwaddVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VwsubuVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VwsubuVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VwsubVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VwsubVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VwadduWv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VwadduWx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VwaddWv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VwaddWx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VwsubuWv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VwsubuWx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VwsubWv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VwsubWx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VnsrlWv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VnsrlWx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VnsrlWi {
vd: VReg,
vs2: VReg,
uimm: u8,
vm: bool,
},
VnsraWv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VnsraWx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VnsraWi {
vd: VReg,
vs2: VReg,
uimm: u8,
vm: bool,
},
VzextVf2 {
vd: VReg,
vs2: VReg,
vm: bool,
},
VzextVf4 {
vd: VReg,
vs2: VReg,
vm: bool,
},
VzextVf8 {
vd: VReg,
vs2: VReg,
vm: bool,
},
VsextVf2 {
vd: VReg,
vs2: VReg,
vm: bool,
},
VsextVf4 {
vd: VReg,
vs2: VReg,
vm: bool,
},
VsextVf8 {
vd: VReg,
vs2: VReg,
vm: bool,
},
VsadduVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VsadduVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VsadduVi {
vd: VReg,
vs2: VReg,
imm: i8,
vm: bool,
},
VsaddVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VsaddVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VsaddVi {
vd: VReg,
vs2: VReg,
imm: i8,
vm: bool,
},
VssubuVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VssubuVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VssubVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VssubVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VaadduVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VaadduVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VaaddVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VaaddVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VasubuVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VasubuVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VasubVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VasubVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VsmulVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VsmulVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VssrlVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VssrlVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VssrlVi {
vd: VReg,
vs2: VReg,
imm: u8,
vm: bool,
},
VssraVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VssraVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VssraVi {
vd: VReg,
vs2: VReg,
imm: u8,
vm: bool,
},
VnclipuWv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VnclipuWx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VnclipuWi {
vd: VReg,
vs2: VReg,
imm: u8,
vm: bool,
},
VnclipWv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VnclipWx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VnclipWi {
vd: VReg,
vs2: VReg,
imm: u8,
vm: bool,
},
Vmandn {
vd: VReg,
vs2: VReg,
vs1: VReg,
},
Vmand {
vd: VReg,
vs2: VReg,
vs1: VReg,
},
Vmor {
vd: VReg,
vs2: VReg,
vs1: VReg,
},
Vmxor {
vd: VReg,
vs2: VReg,
vs1: VReg,
},
Vmorn {
vd: VReg,
vs2: VReg,
vs1: VReg,
},
Vmnand {
vd: VReg,
vs2: VReg,
vs1: VReg,
},
Vmnor {
vd: VReg,
vs2: VReg,
vs1: VReg,
},
Vmxnor {
vd: VReg,
vs2: VReg,
vs1: VReg,
},
Vcpop {
rd: Reg,
vs2: VReg,
vm: bool,
},
Vfirst {
rd: Reg,
vs2: VReg,
vm: bool,
},
Vmsbf {
vd: VReg,
vs2: VReg,
vm: bool,
},
Vmsof {
vd: VReg,
vs2: VReg,
vm: bool,
},
Vmsif {
vd: VReg,
vs2: VReg,
vm: bool,
},
Viota {
vd: VReg,
vs2: VReg,
vm: bool,
},
Vid {
vd: VReg,
vm: bool,
},
Vredsum {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
Vredand {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
Vredor {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
Vredxor {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
Vredminu {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
Vredmin {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
Vredmaxu {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
Vredmax {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
Vwredsumu {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
Vwredsum {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VmvXS {
rd: Reg,
vs2: VReg,
},
VmvSX {
vd: VReg,
rs1: Reg,
},
VslideupVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VslideupVi {
vd: VReg,
vs2: VReg,
uimm: u8,
vm: bool,
},
VslidedownVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VslidedownVi {
vd: VReg,
vs2: VReg,
uimm: u8,
vm: bool,
},
Vslide1upVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
Vslide1downVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VrgatherVv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VrgatherVx {
vd: VReg,
vs2: VReg,
rs1: Reg,
vm: bool,
},
VrgatherVi {
vd: VReg,
vs2: VReg,
uimm: u8,
vm: bool,
},
Vrgatherei16Vv {
vd: VReg,
vs2: VReg,
vs1: VReg,
vm: bool,
},
VcompressVm {
vd: VReg,
vs2: VReg,
vs1: VReg,
},
Vmv1rV {
vd: VReg,
vs2: VReg,
},
Vmv2rV {
vd: VReg,
vs2: VReg,
},
Vmv4rV {
vd: VReg,
vs2: VReg,
},
Vmv8rV {
vd: VReg,
vs2: VReg,
},
}Variants§
Vsetvli
Set vector length and type from GPR
vsetvli rd, rs1, vtypei
rd = new vl, rs1 = AVL, vtypei = new vtype setting (11-bit immediate)
Vsetivli
Set vector length and type from immediate AVL
vsetivli rd, uimm, vtypei
rd = new vl, uimm[4:0] = AVL, vtypei = new vtype setting (10-bit immediate)
Vsetvl
Set vector length and type from GPRs
vsetvl rd, rs1, rs2
rd = new vl, rs1 = AVL, rs2 = new vtype value
Vle
Unit-stride load: vle{eew}.v vd, (rs1), vm
mop=00, lumop=00000, nf=000
Vleff
Unit-stride fault-only-first load: vle{eew}ff.v vd, (rs1), vm
mop=00, lumop=10000, nf=000
Vlm
Unit-stride mask load: vlm.v vd, (rs1)
mop=00, lumop=01011, nf=000, eew=e8, vm=1
Vlse
Strided load: vlse{eew}.v vd, (rs1), rs2, vm
mop=10, nf=000
Vluxei
Indexed-unordered load: vluxei{eew}.v vd, (rs1), vs2, vm
mop=01, nf=000. eew is the index element width.
Vloxei
Indexed-ordered load: vloxei{eew}.v vd, (rs1), vs2, vm
mop=11, nf=000. eew is the index element width.
Vlr
Whole-register load: vl{nreg}re{eew}.v vd, (rs1)
mop=00, lumop=01000, vm=1. nreg must be 1, 2, 4, or 8.
Vlseg
Unit-stride segment load: vlseg{nf}e{eew}.v vd, (rs1), vm
mop=00, lumop=00000, nf>0
Vlsegff
Unit-stride fault-only-first segment load: vlseg{nf}e{eew}ff.v vd, (rs1), vm
mop=00, lumop=10000, nf>0
Vlsseg
Strided segment load: vlsseg{nf}e{eew}.v vd, (rs1), rs2, vm
mop=10, nf>0
Vluxseg
Indexed-unordered segment load: vluxseg{nf}ei{eew}.v vd, (rs1), vs2, vm
mop=01, nf>0
Vloxseg
Indexed-ordered segment load: vloxseg{nf}ei{eew}.v vd, (rs1), vs2, vm
mop=11, nf>0
Vse
Unit-stride store: vse{eew}.v vs3, (rs1), vm
mop=00, sumop=00000, nf=000
Vsm
Unit-stride mask store: vsm.v vs3, (rs1)
mop=00, sumop=01011, nf=000, eew=e8, vm=1
Vsse
Strided store: vsse{eew}.v vs3, (rs1), rs2, vm
mop=10, nf=000
Vsuxei
Indexed-unordered store: vsuxei{eew}.v vs3, (rs1), vs2, vm
mop=01, nf=000. eew is the index element width.
Vsoxei
Indexed-ordered store: vsoxei{eew}.v vs3, (rs1), vs2, vm
mop=11, nf=000. eew is the index element width.
Vsr
Whole-register store: vs{nreg}r.v vs3, (rs1)
mop=00, sumop=01000, vm=1. nreg must be 1, 2, 4, or 8.
Vsseg
Unit-stride segment store: vsseg{nf}e{eew}.v vs3, (rs1), vm
mop=00, sumop=00000, nf>0
Vssseg
Strided segment store: vssseg{nf}e{eew}.v vs3, (rs1), rs2, vm
mop=10, nf>0
Vsuxseg
Indexed-unordered segment store: vsuxseg{nf}ei{eew}.v vs3, (rs1), vs2, vm
mop=01, nf>0
Vsoxseg
Indexed-ordered segment store: vsoxseg{nf}ei{eew}.v vs3, (rs1), vs2, vm
mop=11, nf>0
VaddVv
vadd.vv vd, vs2, vs1, vm
VaddVx
vadd.vx vd, vs2, rs1, vm
VaddVi
vadd.vi vd, vs2, imm, vm
VsubVv
vsub.vv vd, vs2, vs1, vm
VsubVx
vsub.vx vd, vs2, rs1, vm
VrsubVx
vrsub.vx vd, vs2, rs1, vm
VrsubVi
vrsub.vi vd, vs2, imm, vm
VandVv
vand.vv vd, vs2, vs1, vm
VandVx
vand.vx vd, vs2, rs1, vm
VandVi
vand.vi vd, vs2, imm, vm
VorVv
vor.vv vd, vs2, vs1, vm
VorVx
vor.vx vd, vs2, rs1, vm
VorVi
vor.vi vd, vs2, imm, vm
VxorVv
vxor.vv vd, vs2, vs1, vm
VxorVx
vxor.vx vd, vs2, rs1, vm
VxorVi
vxor.vi vd, vs2, imm, vm
VsllVv
vsll.vv vd, vs2, vs1, vm
VsllVx
vsll.vx vd, vs2, rs1, vm
VsllVi
vsll.vi vd, vs2, uimm, vm
VsrlVv
vsrl.vv vd, vs2, vs1, vm
VsrlVx
vsrl.vx vd, vs2, rs1, vm
VsrlVi
vsrl.vi vd, vs2, uimm, vm
VsraVv
vsra.vv vd, vs2, vs1, vm
VsraVx
vsra.vx vd, vs2, rs1, vm
VsraVi
vsra.vi vd, vs2, uimm, vm
VminuVv
vminu.vv vd, vs2, vs1, vm
VminuVx
vminu.vx vd, vs2, rs1, vm
VminVv
vmin.vv vd, vs2, vs1, vm
VminVx
vmin.vx vd, vs2, rs1, vm
VmaxuVv
vmaxu.vv vd, vs2, vs1, vm
VmaxuVx
vmaxu.vx vd, vs2, rs1, vm
VmaxVv
vmax.vv vd, vs2, vs1, vm
VmaxVx
vmax.vx vd, vs2, rs1, vm
VmseqVv
vmseq.vv vd, vs2, vs1, vm
VmseqVx
vmseq.vx vd, vs2, rs1, vm
VmseqVi
vmseq.vi vd, vs2, imm, vm
VmsneVv
vmsne.vv vd, vs2, vs1, vm
VmsneVx
vmsne.vx vd, vs2, rs1, vm
VmsneVi
vmsne.vi vd, vs2, imm, vm
VmsltuVv
vmsltu.vv vd, vs2, vs1, vm
VmsltuVx
vmsltu.vx vd, vs2, rs1, vm
VmsltVv
vmslt.vv vd, vs2, vs1, vm
VmsltVx
vmslt.vx vd, vs2, rs1, vm
VmsleuVv
vmsleu.vv vd, vs2, vs1, vm
VmsleuVx
vmsleu.vx vd, vs2, rs1, vm
VmsleuVi
vmsleu.vi vd, vs2, imm, vm
VmsleVv
vmsle.vv vd, vs2, vs1, vm
VmsleVx
vmsle.vx vd, vs2, rs1, vm
VmsleVi
vmsle.vi vd, vs2, imm, vm
VmsgtuVx
vmsgtu.vx vd, vs2, rs1, vm
VmsgtuVi
vmsgtu.vi vd, vs2, imm, vm
VmsgtVx
vmsgt.vx vd, vs2, rs1, vm
VmsgtVi
vmsgt.vi vd, vs2, imm, vm
VmulVv
vmul.vv vd, vs2, vs1, vm - signed multiply, low bits
VmulVx
vmul.vx vd, vs2, rs1, vm - signed multiply, low bits
VmulhVv
vmulh.vv vd, vs2, vs1, vm - signed×signed multiply, high bits
VmulhVx
vmulh.vx vd, vs2, rs1, vm - signed×signed multiply, high bits
VmulhuVv
vmulhu.vv vd, vs2, vs1, vm - unsigned×unsigned multiply, high bits
VmulhuVx
vmulhu.vx vd, vs2, rs1, vm - unsigned×unsigned multiply, high bits
VmulhsuVv
vmulhsu.vv vd, vs2, vs1, vm - signed×unsigned multiply, high bits
VmulhsuVx
vmulhsu.vx vd, vs2, rs1, vm - signed×unsigned multiply, high bits
VdivuVv
vdivu.vv vd, vs2, vs1, vm - unsigned divide
VdivuVx
vdivu.vx vd, vs2, rs1, vm - unsigned divide
VdivVv
vdiv.vv vd, vs2, vs1, vm - signed divide
VdivVx
vdiv.vx vd, vs2, rs1, vm - signed divide
VremuVv
vremu.vv vd, vs2, vs1, vm - unsigned remainder
VremuVx
vremu.vx vd, vs2, rs1, vm - unsigned remainder
VremVv
vrem.vv vd, vs2, vs1, vm - signed remainder
VremVx
vrem.vx vd, vs2, rs1, vm - signed remainder
VwmulVv
vwmul.vv vd, vs2, vs1, vm - signed widening multiply
VwmulVx
vwmul.vx vd, vs2, rs1, vm - signed widening multiply
VwmuluVv
vwmulu.vv vd, vs2, vs1, vm - unsigned widening multiply
VwmuluVx
vwmulu.vx vd, vs2, rs1, vm - unsigned widening multiply
VwmulsuVv
vwmulsu.vv vd, vs2, vs1, vm - signed×unsigned widening multiply
VwmulsuVx
vwmulsu.vx vd, vs2, rs1, vm - signed×unsigned widening multiply
VmaccVv
vmacc.vv vd, vs1, vs2, vm - vd = vd + vs1 * vs2
VmaccVx
vmacc.vx vd, rs1, vs2, vm - vd = vd + rs1 * vs2
VnmsacVv
vnmsac.vv vd, vs1, vs2, vm - vd = vd - vs1 * vs2
VnmsacVx
vnmsac.vx vd, rs1, vs2, vm - vd = vd - rs1 * vs2
VmaddVv
vmadd.vv vd, vs1, vs2, vm - vd = vs1 * vd + vs2
VmaddVx
vmadd.vx vd, rs1, vs2, vm - vd = rs1 * vd + vs2
VnmsubVv
vnmsub.vv vd, vs1, vs2, vm - vd = -(vs1 * vd - vs2)
VnmsubVx
vnmsub.vx vd, rs1, vs2, vm - vd = -(rs1 * vd - vs2)
VwmaccuVv
vwmaccu.vv vd, vs1, vs2, vm - unsigned widening multiply-add
VwmaccuVx
vwmaccu.vx vd, rs1, vs2, vm - unsigned widening multiply-add
VwmaccVv
vwmacc.vv vd, vs1, vs2, vm - signed widening multiply-add
VwmaccVx
vwmacc.vx vd, rs1, vs2, vm - signed widening multiply-add
VwmaccsuVv
vwmaccsu.vv vd, vs1, vs2, vm - signed×unsigned widening multiply-add
VwmaccsuVx
vwmaccsu.vx vd, rs1, vs2, vm - signed×unsigned widening multiply-add
VwmaccusVx
vwmaccus.vx vd, rs1, vs2, vm - unsigned×signed widening multiply-add (vx only)
VwadduVv
vwaddu.vv vd, vs2, vs1, vm
VwadduVx
vwaddu.vx vd, vs2, rs1, vm
VwaddVv
vwadd.vv vd, vs2, vs1, vm
VwaddVx
vwadd.vx vd, vs2, rs1, vm
VwsubuVv
vwsubu.vv vd, vs2, vs1, vm
VwsubuVx
vwsubu.vx vd, vs2, rs1, vm
VwsubVv
vwsub.vv vd, vs2, vs1, vm
VwsubVx
vwsub.vx vd, vs2, rs1, vm
VwadduWv
vwaddu.wv vd, vs2, vs1, vm
VwadduWx
vwaddu.wx vd, vs2, rs1, vm
VwaddWv
vwadd.wv vd, vs2, vs1, vm
VwaddWx
vwadd.wx vd, vs2, rs1, vm
VwsubuWv
vwsubu.wv vd, vs2, vs1, vm
VwsubuWx
vwsubu.wx vd, vs2, rs1, vm
VwsubWv
vwsub.wv vd, vs2, vs1, vm
VwsubWx
vwsub.wx vd, vs2, rs1, vm
VnsrlWv
vnsrl.wv vd, vs2, vs1, vm
VnsrlWx
vnsrl.wx vd, vs2, rs1, vm
VnsrlWi
vnsrl.wi vd, vs2, uimm, vm
VnsraWv
vnsra.wv vd, vs2, vs1, vm
VnsraWx
vnsra.wx vd, vs2, rs1, vm
VnsraWi
vnsra.wi vd, vs2, uimm, vm
VzextVf2
vzext.vf2 vd, vs2, vm - zero-extend SEW/2 source to SEW destination
VzextVf4
vzext.vf4 vd, vs2, vm - zero-extend SEW/4 source to SEW destination
VzextVf8
vzext.vf8 vd, vs2, vm - zero-extend SEW/8 source to SEW destination
VsextVf2
vsext.vf2 vd, vs2, vm - sign-extend SEW/2 source to SEW destination
VsextVf4
vsext.vf4 vd, vs2, vm - sign-extend SEW/4 source to SEW destination
VsextVf8
vsext.vf8 vd, vs2, vm - sign-extend SEW/8 source to SEW destination
VsadduVv
vsaddu.vv vd, vs2, vs1, vm - Saturating unsigned add, vector-vector
VsadduVx
vsaddu.vx vd, vs2, rs1, vm - Saturating unsigned add, vector-scalar
VsadduVi
vsaddu.vi vd, vs2, imm, vm - Saturating unsigned add, vector-immediate
VsaddVv
vsadd.vv vd, vs2, vs1, vm - Saturating signed add, vector-vector
VsaddVx
vsadd.vx vd, vs2, rs1, vm - Saturating signed add, vector-scalar
VsaddVi
vsadd.vi vd, vs2, imm, vm - Saturating signed add, vector-immediate
VssubuVv
vssubu.vv vd, vs2, vs1, vm - Saturating unsigned subtract, vector-vector
VssubuVx
vssubu.vx vd, vs2, rs1, vm - Saturating unsigned subtract, vector-scalar
VssubVv
vssub.vv vd, vs2, vs1, vm - Saturating signed subtract, vector-vector
VssubVx
vssub.vx vd, vs2, rs1, vm - Saturating signed subtract, vector-scalar
VaadduVv
vaaddu.vv vd, vs2, vs1, vm - Averaging unsigned add, vector-vector
VaadduVx
vaaddu.vx vd, vs2, rs1, vm - Averaging unsigned add, vector-scalar
VaaddVv
vaadd.vv vd, vs2, vs1, vm - Averaging signed add, vector-vector
VaaddVx
vaadd.vx vd, vs2, rs1, vm - Averaging signed add, vector-scalar
VasubuVv
vasubu.vv vd, vs2, vs1, vm - Averaging unsigned subtract, vector-vector
VasubuVx
vasubu.vx vd, vs2, rs1, vm - Averaging unsigned subtract, vector-scalar
VasubVv
vasub.vv vd, vs2, vs1, vm - Averaging signed subtract, vector-vector
VasubVx
vasub.vx vd, vs2, rs1, vm - Averaging signed subtract, vector-scalar
VsmulVv
vsmul.vv vd, vs2, vs1, vm - Fractional multiply with rounding and saturation
VsmulVx
vsmul.vx vd, vs2, rs1, vm - Fractional multiply with rounding and saturation
VssrlVv
vssrl.vv vd, vs2, vs1, vm - Scaling shift right logical, vector-vector
VssrlVx
vssrl.vx vd, vs2, rs1, vm - Scaling shift right logical, vector-scalar
VssrlVi
vssrl.vi vd, vs2, imm, vm - Scaling shift right logical, vector-immediate
VssraVv
vssra.vv vd, vs2, vs1, vm - Scaling shift right arithmetic, vector-vector
VssraVx
vssra.vx vd, vs2, rs1, vm - Scaling shift right arithmetic, vector-scalar
VssraVi
vssra.vi vd, vs2, imm, vm - Scaling shift right arithmetic, vector-immediate
VnclipuWv
vnclipu.wv vd, vs2, vs1, vm - Narrowing unsigned clip, vector-vector
VnclipuWx
vnclipu.wx vd, vs2, rs1, vm - Narrowing unsigned clip, vector-scalar
VnclipuWi
vnclipu.wi vd, vs2, imm, vm - Narrowing unsigned clip, vector-immediate
VnclipWv
vnclip.wv vd, vs2, vs1, vm - Narrowing signed clip, vector-vector
VnclipWx
vnclip.wx vd, vs2, rs1, vm - Narrowing signed clip, vector-scalar
VnclipWi
vnclip.wi vd, vs2, imm, vm - Narrowing signed clip, vector-immediate
Vmandn
vmandn.mm vd, vs2, vs1 - vd = vs2 AND NOT vs1
funct6=011000, OPMVV, vm=1
Vmand
vmand.mm vd, vs2, vs1 - vd = vs2 AND vs1
funct6=011001, OPMVV, vm=1
Vmor
vmor.mm vd, vs2, vs1 - vd = vs2 OR vs1
funct6=011010, OPMVV, vm=1
Vmxor
vmxor.mm vd, vs2, vs1 - vd = vs2 XOR vs1
funct6=011011, OPMVV, vm=1
Vmorn
vmorn.mm vd, vs2, vs1 - vd = vs2 OR NOT vs1
funct6=011100, OPMVV, vm=1
Vmnand
vmnand.mm vd, vs2, vs1 - vd = NOT(vs2 AND vs1)
funct6=011101, OPMVV, vm=1
Vmnor
vmnor.mm vd, vs2, vs1 - vd = NOT(vs2 OR vs1)
funct6=011110, OPMVV, vm=1
Vmxnor
vmxnor.mm vd, vs2, vs1 - vd = NOT(vs2 XOR vs1)
funct6=011111, OPMVV, vm=1
Vcpop
vcpop.m rd, vs2, vm - rd = population count of mask vs2
funct6=010000, OPMVV, vs1=10000
Vfirst
vfirst.m rd, vs2, vm - rd = index of first set bit in mask vs2, or -1
funct6=010000, OPMVV, vs1=10001
Vmsbf
vmsbf.m vd, vs2, vm - set-before-first mask bit
funct6=010100, OPMVV, vs1=00001
Vmsof
vmsof.m vd, vs2, vm - set-only-first mask bit
funct6=010100, OPMVV, vs1=00010
Vmsif
vmsif.m vd, vs2, vm - set-including-first mask bit
funct6=010100, OPMVV, vs1=00011
Viota
viota.m vd, vs2, vm - iota: vd[i] = popcount of vs2[0..i-1]
funct6=010100, OPMVV, vs1=10000
Vid
vid.v vd, vm - vector element index: vd[i] = i
funct6=010100, OPMVV, vs1=10001, vs2=00000
Vredsum
Sum reduction: vredsum.vs vd, vs2, vs1, vm
Vredand
AND reduction: vredand.vs vd, vs2, vs1, vm
Vredor
OR reduction: vredor.vs vd, vs2, vs1, vm
Vredxor
XOR reduction: vredxor.vs vd, vs2, vs1, vm
Vredminu
Unsigned minimum reduction: vredminu.vs vd, vs2, vs1, vm
Vredmin
Signed minimum reduction: vredmin.vs vd, vs2, vs1, vm
Vredmaxu
Unsigned maximum reduction: vredmaxu.vs vd, vs2, vs1, vm
Vredmax
Signed maximum reduction: vredmax.vs vd, vs2, vs1, vm
Vwredsumu
Widening unsigned sum reduction: vwredsumu.vs vd, vs2, vs1, vm
Vwredsum
Widening signed sum reduction: vwredsum.vs vd, vs2, vs1, vm
VmvXS
vmv.x.s rd, vs2 - Copy scalar element 0 of vs2 to GPR rd
funct6=010000, OPMVV, vs1=00000, vm=1
VmvSX
vmv.s.x vd, rs1 - Copy scalar GPR rs1 to element 0 of vd
funct6=010000, OPMVX, vs2=00000, vm=1
VslideupVx
vslideup.vx vd, vs2, rs1, vm - Slide elements up by scalar amount
funct6=001110, OPIVX
VslideupVi
vslideup.vi vd, vs2, uimm, vm - Slide elements up by immediate amount
funct6=001110, OPIVI
VslidedownVx
vslidedown.vx vd, vs2, rs1, vm - Slide elements down by scalar amount
funct6=001111, OPIVX
VslidedownVi
vslidedown.vi vd, vs2, uimm, vm - Slide elements down by immediate amount
funct6=001111, OPIVI
Vslide1upVx
vslide1up.vx vd, vs2, rs1, vm - Slide up by 1 and insert scalar at element 0
funct6=001110, OPMVX
Vslide1downVx
vslide1down.vx vd, vs2, rs1, vm - Slide down by 1 and insert scalar at top
funct6=001111, OPMVX
VrgatherVv
vrgather.vv vd, vs2, vs1, vm - Gather elements from vs2 using indices in vs1
funct6=001100, OPIVV
VrgatherVx
vrgather.vx vd, vs2, rs1, vm - Gather elements from vs2 using scalar index
funct6=001100, OPIVX
VrgatherVi
vrgather.vi vd, vs2, uimm, vm - Gather elements from vs2 using immediate index
funct6=001100, OPIVI
Vrgatherei16Vv
vrgatherei16.vv vd, vs2, vs1, vm - Gather with 16-bit indices
funct6=001110, OPIVV
VcompressVm
vcompress.vm vd, vs2, vs1 - Compress active elements from vs2 under mask vs1
funct6=010111, OPMVV, vm=1 (always unmasked)
Vmv1rV
vmv1r.v vd, vs2 - Whole register move (1 register)
funct6=100111, OPIVI, simm5=00000, vm=1
Vmv2rV
vmv2r.v vd, vs2 - Whole register move (2 registers)
funct6=100111, OPIVI, simm5=00001, vm=1
Vmv4rV
vmv4r.v vd, vs2 - Whole register move (4 registers)
funct6=100111, OPIVI, simm5=00011, vm=1
Vmv8rV
vmv8r.v vd, vs2 - Whole register move (8 registers)
funct6=100111, OPIVI, simm5=00111, vm=1
Trait Implementations§
Source§impl<Reg: Clone> Clone for Rv64Zve64xInstruction<Reg>
impl<Reg: Clone> Clone for Rv64Zve64xInstruction<Reg>
Source§fn clone(&self) -> Rv64Zve64xInstruction<Reg>
fn clone(&self) -> Rv64Zve64xInstruction<Reg>
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read more