sh-dis/c/impl.c
Zack Buhman 8a300ba4c6 initial SH4 emulator implementation in C
This currently only implements the SH2 instructions.
2024-04-22 20:53:36 +08:00

1953 lines
55 KiB
C

#include "impl.h"
#include "operations.h"
#include "exception.h"
#include "state_helpers.h"
/* MOV #imm,Rn */
void mov__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i, const uint32_t n)
{
int64_t imm = sign_extend8(i);
int64_t op2 = imm;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.W @(disp,PC),Rn */
void mov_w__pc_relative_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n)
{
int64_t pc = sign_extend32(state->pc[0]);
int64_t disp = zero_extend8(d) << 1;
if (is_delay_slot(state)) return ILLSLOT(state);
int64_t address = zero_extend32(disp + (pc + 4));
int64_t op2 = sign_extend16(read_memory16(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.L @(disp,PC),Rn */
void mov_l__pc_relative_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n)
{
int64_t pc = sign_extend32(state->pc[0]);
int64_t disp = zero_extend8(d) << 2;
if (is_delay_slot(state)) return ILLSLOT(state);
int64_t address = zero_extend32(disp + ((pc + 4) & (~0x3)));
int64_t op2 = sign_extend32(read_memory32(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV Rm,Rn */
void mov__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, m));
int64_t op2 = op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.B Rm,@Rn */
void mov_b__store_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(op2);
write_memory8(map, address, op1);
state->is_delay_slot = false;
}
/* MOV.W Rm,@Rn */
void mov_w__store_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(op2);
write_memory16(map, address, op1);
state->is_delay_slot = false;
}
/* MOV.L Rm,@Rn */
void mov_l__store_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(op2);
write_memory32(map, address, op1);
state->is_delay_slot = false;
}
/* MOV.B @Rm,Rn */
void mov_b__load_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(op1);
int64_t op2 = sign_extend8(read_memory8(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.W @Rm,Rn */
void mov_w__load_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(op1);
int64_t op2 = sign_extend16(read_memory16(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.L @Rm,Rn */
void mov_l__load_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(op1);
int64_t op2 = sign_extend32(read_memory32(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.B Rm,@-Rn */
void mov_b__store_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(op2 - 1);
write_memory8(map, address, op1);
op2 = address;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.W Rm,@-Rn */
void mov_w__store_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(op2 - 2);
write_memory16(map, address, op1);
op2 = address;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.L Rm,@-Rn */
void mov_l__store_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(op2 - 4);
write_memory32(map, address, op1);
op2 = address;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.B @Rm+,Rn */
void mov_b__load_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t m_field = zero_extend4(m);
int64_t n_field = zero_extend4(n);
int64_t op1 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(op1);
int64_t op2 = sign_extend8(read_memory8(map, address));
if (m_field == n_field) op1 = op2;
else op1 = op1 + 1;
REG(state, m) = _register(op1);
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.W @Rm+,Rn */
void mov_w__load_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t m_field = zero_extend4(m);
int64_t n_field = zero_extend4(n);
int64_t op1 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(op1);
int64_t op2 = sign_extend16(read_memory16(map, address));
if (m_field == n_field) op1 = op2;
else op1 = op1 + 2;
REG(state, m) = _register(op1);
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.L @Rm+,Rn */
void mov_l__load_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t m_field = zero_extend4(m);
int64_t n_field = zero_extend4(n);
int64_t op1 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(op1);
int64_t op2 = sign_extend32(read_memory32(map, address));
if (m_field == n_field) op1 = op2;
else op1 = op1 + 4;
REG(state, m) = _register(op1);
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.B R0,@(disp,Rn) */
void mov_b__store_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n)
{
int64_t r0 = sign_extend32(REG(state, 0));
int64_t disp = zero_extend4(d);
int64_t op2 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(disp + op2);
write_memory8(map, address, r0);
state->is_delay_slot = false;
}
/* MOV.W R0,@(disp,Rn) */
void mov_w__store_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n)
{
int64_t r0 = sign_extend32(REG(state, 0));
int64_t disp = zero_extend4(d) << 1;
int64_t op2 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(disp + op2);
write_memory16(map, address, r0);
state->is_delay_slot = false;
}
/* MOV.L Rm,@(disp,Rn) */
void mov_l__store_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t d, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t disp = zero_extend4(d) << 2;
int64_t op3 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(disp + op3);
write_memory32(map, address, op1);
state->is_delay_slot = false;
}
/* MOV.B @(disp,Rm),R0 */
void mov_b__load_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t m)
{
int64_t disp = zero_extend4(d);
int64_t op2 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(disp + op2);
int64_t r0 = sign_extend8(read_memory8(map, address));
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* MOV.W @(disp,Rm),R0 */
void mov_w__load_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t m)
{
int64_t disp = zero_extend4(d) << 1;
int64_t op2 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(disp + op2);
int64_t r0 = sign_extend16(read_memory16(map, address));
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* MOV.L @(disp,Rm),Rn */
void mov_l__load_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t m, const uint32_t n)
{
int64_t disp = zero_extend4(d) << 2;
int64_t op2 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(disp + op2);
int64_t op3 = sign_extend32(read_memory32(map, address));
REG(state, n) = _register(op3);
state->is_delay_slot = false;
}
/* MOV.B Rm,@(R0,Rn) */
void mov_b__store_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t r0 = sign_extend32(REG(state, 0));
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(r0 + op2);
write_memory8(map, address, op1);
state->is_delay_slot = false;
}
/* MOV.W Rm,@(R0,Rn) */
void mov_w__store_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t r0 = sign_extend32(REG(state, 0));
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(r0 + op2);
write_memory16(map, address, op1);
state->is_delay_slot = false;
}
/* MOV.L Rm,@(R0,Rn) */
void mov_l__store_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t r0 = sign_extend32(REG(state, 0));
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(r0 + op2);
write_memory32(map, address, op1);
state->is_delay_slot = false;
}
/* MOV.B @(R0,Rm),Rn */
void mov_b__load_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t r0 = sign_extend32(REG(state, 0));
int64_t op1 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(r0 + op1);
int64_t op2 = sign_extend8(read_memory8(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.W @(R0,Rm),Rn */
void mov_w__load_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t r0 = sign_extend32(REG(state, 0));
int64_t op1 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(r0 + op1);
int64_t op2 = sign_extend16(read_memory16(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.L @(R0,Rm),Rn */
void mov_l__load_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t r0 = sign_extend32(REG(state, 0));
int64_t op1 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(r0 + op1);
int64_t op2 = sign_extend32(read_memory32(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.B R0,@(disp,GBR) */
void mov_b__store_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t gbr = sign_extend32(state->gbr);
int64_t r0 = sign_extend32(REG(state, 0));
int64_t disp = zero_extend8(d);
int64_t address = zero_extend32(disp + gbr);
write_memory8(map, address, r0);
state->is_delay_slot = false;
}
/* MOV.W R0,@(disp,GBR) */
void mov_w__store_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t gbr = sign_extend32(state->gbr);
int64_t r0 = sign_extend32(REG(state, 0));
int64_t disp = zero_extend8(d) << 1;
int64_t address = zero_extend32(disp + gbr);
write_memory16(map, address, r0);
state->is_delay_slot = false;
}
/* MOV.L R0,@(disp,GBR) */
void mov_l__store_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t gbr = sign_extend32(state->gbr);
int64_t r0 = sign_extend32(REG(state, 0));
int64_t disp = zero_extend8(d) << 2;
int64_t address = zero_extend32(disp + gbr);
write_memory32(map, address, r0);
state->is_delay_slot = false;
}
/* MOV.B @(disp,GBR),R0 */
void mov_b__load_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t gbr = sign_extend32(state->gbr);
int64_t disp = zero_extend8(d);
int64_t address = zero_extend32(disp + gbr);
int64_t r0 = sign_extend8(read_memory8(map, address));
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* MOV.W @(disp,GBR),R0 */
void mov_w__load_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t gbr = sign_extend32(state->gbr);
int64_t disp = zero_extend8(d) << 1;
int64_t address = zero_extend32(disp + gbr);
int64_t r0 = sign_extend16(read_memory16(map, address));
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* MOV.L @(disp,GBR),R0 */
void mov_l__load_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t gbr = sign_extend32(state->gbr);
int64_t disp = zero_extend8(d) << 2;
int64_t address = zero_extend32(disp + gbr);
int64_t r0 = sign_extend32(read_memory32(map, address));
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* MOVA @(disp,PC),R0 */
void mova__pc_relative_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t pc = sign_extend32(state->pc[0]);
int64_t disp = zero_extend8(d) << 2;
if (is_delay_slot(state)) return ILLSLOT(state);
int64_t r0 = disp + ((pc + 4) & (~0x3));
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* MOVT Rn */
void movt__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t t = zero_extend1(state->sr.bits.t);
int64_t op1 = t;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* SWAP.B Rm,Rn */
void swap_b__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, m));
int64_t op2 = ((bit_extract(op1, 16, 16) << 16) | (bit_extract(op1, 0, 8) << 8)) | bit_extract(op1, 8, 8);
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* SWAP.W Rm,Rn */
void swap_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, m));
int64_t op2 = (bit_extract(op1, 0, 16) << 16) | bit_extract(op1, 16, 16);
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* XTRCT Rm,Rn */
void xtrct__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, m));
int64_t op2 = zero_extend32(REG(state, n));
op2 = bit_extract(op2, 16, 16) | (bit_extract(op1, 0, 16) << 16);
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* ADD Rm,Rn */
void add__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
op2 = op2 + op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* ADD #imm,Rn */
void add__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i, const uint32_t n)
{
int64_t imm = sign_extend8(i);
int64_t op2 = sign_extend32(REG(state, n));
op2 = op2 + imm;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* ADDC Rm,Rn */
void addc__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t t = zero_extend1(state->sr.bits.t);
int64_t op1 = zero_extend32(sign_extend32(REG(state, m)));
int64_t op2 = zero_extend32(sign_extend32(REG(state, n)));
op2 = (op2 + op1) + t;
t = bit_extract(op2, 32, 1);
REG(state, n) = _register(op2);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* ADDV Rm,Rn */
void addv__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
op2 = op2 + op1;
int64_t t = unary_int(((op2 < (-(1LL << 31))) || (op2 >= (1LL << 31))));
REG(state, n) = _register(op2);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/EQ #imm,R0 */
void cmp_eq__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0 = sign_extend32(REG(state, 0));
int64_t imm = sign_extend8(i);
int64_t t = unary_int((r0 == imm));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/EQ Rm,Rn */
void cmp_eq__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t t = unary_int((op2 == op1));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/HS Rm,Rn */
void cmp_hs__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = zero_extend32(sign_extend32(REG(state, m)));
int64_t op2 = zero_extend32(sign_extend32(REG(state, n)));
int64_t t = unary_int((op2 >= op1));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/GE Rm,Rn */
void cmp_ge__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t t = unary_int((op2 >= op1));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/HI Rm,Rn */
void cmp_hi__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = zero_extend32(sign_extend32(REG(state, m)));
int64_t op2 = zero_extend32(sign_extend32(REG(state, n)));
int64_t t = unary_int((op2 > op1));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/GT Rm,Rn */
void cmp_gt__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t t = unary_int((op2 > op1));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/PZ Rn */
void cmp_pz__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, n));
int64_t t = unary_int((op1 >= 0));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/PL Rn */
void cmp_pl__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, n));
int64_t t = unary_int((op1 > 0));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/STR Rm,Rn */
void cmp_str__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t temp = op1 ^ op2;
int64_t t = unary_int((bit_extract(temp, 0, 8) == 0));
t = (unary_int((bit_extract(temp, 8, 8) == 0))) | t;
t = (unary_int((bit_extract(temp, 16, 8) == 0))) | t;
t = (unary_int((bit_extract(temp, 24, 8) == 0))) | t;
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* DIV1 Rm,Rn */
void div1__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t q = zero_extend1(state->sr.bits.q);
int64_t _m = zero_extend1(state->sr.bits.m);
int64_t t = zero_extend1(state->sr.bits.t);
int64_t op1 = zero_extend32(sign_extend32(REG(state, m)));
int64_t op2 = zero_extend32(sign_extend32(REG(state, n)));
int64_t oldq = q;
q = bit_extract(op2, 31, 1);
op2 = zero_extend32(op2 << 1) | t;
if (oldq == _m) op2 = op2 - op1;
else op2 = op2 + op1;
q = (q ^ _m) ^ bit_extract(op2, 32, 1);
t = 1 - (q ^ _m);
REG(state, n) = _register(op2);
state->sr.bits.q = bit(q);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* DIV0S Rm,Rn */
void div0s__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t q = bit_extract(op2, 31, 1);
int64_t _m = bit_extract(op1, 31, 1);
int64_t t = _m ^ q;
state->sr.bits.q = bit(q);
state->sr.bits.m = bit(_m);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* DIV0U */
void div0u__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t q = 0;
int64_t _m = 0;
int64_t t = 0;
state->sr.bits.q = bit(q);
state->sr.bits.m = bit(_m);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* DMULS.L Rm,Rn */
void dmuls_l__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t mac = op2 * op1;
int64_t macl = mac;
int64_t mach = mac >> 32;
state->macl = zero_extend32(macl);
state->mach = zero_extend32(mach);
state->is_delay_slot = false;
}
/* DMULU.L Rm,Rn */
void dmulu_l__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = zero_extend32(sign_extend32(REG(state, m)));
int64_t op2 = zero_extend32(sign_extend32(REG(state, n)));
int64_t mac = op2 * op1;
int64_t macl = mac;
int64_t mach = mac >> 32;
state->macl = zero_extend32(macl);
state->mach = zero_extend32(mach);
state->is_delay_slot = false;
}
/* DT Rn */
void dt__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, n));
op1 = op1 - 1;
int64_t t = unary_int((op1 == 0));
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* EXTS.B Rm,Rn */
void exts_b__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend8(REG(state, m));
int64_t op2 = op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* EXTS.W Rm,Rn */
void exts_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend16(REG(state, m));
int64_t op2 = op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* EXTU.B Rm,Rn */
void extu_b__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = zero_extend8(REG(state, m));
int64_t op2 = op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* EXTU.W Rm,Rn */
void extu_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = zero_extend16(REG(state, m));
int64_t op2 = op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MAC.L @Rm+,@Rn+ */
void mac_l__multiply_and_accumulate_operation(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t macl = zero_extend32(state->macl);
int64_t mach = zero_extend32(state->mach);
int64_t s = zero_extend1(state->sr.bits.s);
int64_t m_field = zero_extend4(m);
int64_t n_field = zero_extend4(n);
int64_t m_address = sign_extend32(REG(state, m));
int64_t n_address = sign_extend32(REG(state, n));
int64_t value2 = sign_extend32(read_memory32(map, zero_extend32(n_address)));
n_address = n_address + 4;
if (n_field == m_field)
{
m_address = m_address + 4;
n_address = n_address + 4;
}
int64_t value1 = sign_extend32(read_memory32(map, zero_extend32(m_address)));
m_address = m_address + 4;
int64_t mul = value2 * value1;
int64_t mac = (mach << 32) + macl;
int64_t result = mac + mul;
if (s == 1) if (bit_extract(((result ^ mac) & (result ^ mul)), 63, 1) == 1) if (bit_extract(mac, 63, 1) == 0) result = (1LL << 47) - 1;
else result = -(1LL << 47);
else result = signed_saturate48(result);
macl = result;
mach = result >> 32;
REG(state, m) = _register(m_address);
REG(state, n) = _register(n_address);
state->macl = zero_extend32(macl);
state->mach = zero_extend32(mach);
state->is_delay_slot = false;
}
/* MAC.W @Rm+,@Rn+ */
void mac_w__multiply_and_accumulate_operation(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t macl = zero_extend32(state->macl);
int64_t mach = zero_extend32(state->mach);
int64_t s = zero_extend1(state->sr.bits.s);
int64_t m_field = zero_extend4(m);
int64_t n_field = zero_extend4(n);
int64_t m_address = sign_extend32(REG(state, m));
int64_t n_address = sign_extend32(REG(state, n));
int64_t value2 = sign_extend16(read_memory16(map, zero_extend32(n_address)));
n_address = n_address + 2;
if (n_field == m_field)
{
m_address = m_address + 2;
n_address = n_address + 2;
}
int64_t value1 = sign_extend16(read_memory16(map, zero_extend32(m_address)));
m_address = m_address + 2;
int64_t mul = value2 * value1;
int64_t result = 0;
if (s == 1)
{
macl = sign_extend32(macl) + mul;
int64_t temp = signed_saturate32(macl);
if (macl == temp) result = (mach << 32) | zero_extend32(macl);
else result = (1LL << 32) | zero_extend32(temp);
}
else result = ((mach << 32) + macl) + mul;
macl = result;
mach = result >> 32;
REG(state, m) = _register(m_address);
REG(state, n) = _register(n_address);
state->macl = zero_extend32(macl);
state->mach = zero_extend32(mach);
state->is_delay_slot = false;
}
/* MUL.L Rm,Rn */
void mul_l__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t macl = op1 * op2;
state->macl = zero_extend32(macl);
state->is_delay_slot = false;
}
/* MULS.W Rm,Rn */
void muls_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend16(sign_extend32(REG(state, m)));
int64_t op2 = sign_extend16(sign_extend32(REG(state, n)));
int64_t macl = op1 * op2;
state->macl = zero_extend32(macl);
state->is_delay_slot = false;
}
/* MULU.W Rm,Rn */
void mulu_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = zero_extend16(sign_extend32(REG(state, m)));
int64_t op2 = zero_extend16(sign_extend32(REG(state, n)));
int64_t macl = op1 * op2;
state->macl = zero_extend32(macl);
state->is_delay_slot = false;
}
/* NEG Rm,Rn */
void neg__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = -op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* NEGC Rm,Rn */
void negc__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t t = zero_extend1(state->sr.bits.t);
int64_t op1 = zero_extend32(REG(state, m));
int64_t op2 = (-op1) - t;
t = bit_extract(op2, 32, 1);
REG(state, n) = _register(op2);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SUB Rm,Rn */
void sub__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
op2 = op2 - op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* SUBC Rm,Rn */
void subc__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t t = zero_extend1(state->sr.bits.t);
int64_t op1 = zero_extend32(sign_extend32(REG(state, m)));
int64_t op2 = zero_extend32(sign_extend32(REG(state, n)));
op2 = (op2 - op1) - t;
t = bit_extract(op2, 32, 1);
REG(state, n) = _register(op2);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SUBV Rm,Rn */
void subv__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
op2 = op2 - op1;
int64_t t = unary_int(((op2 < (-(1LL << 31))) || (op2 >= (1LL << 31))));
REG(state, n) = _register(op2);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* AND Rm,Rn */
void and__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, m));
int64_t op2 = zero_extend32(REG(state, n));
op2 = op2 & op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* AND #imm,R0 */
void and__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0 = zero_extend32(REG(state, 0));
int64_t imm = zero_extend8(i);
r0 = r0 & imm;
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* AND.B #imm,@(R0,GBR) */
void and_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0 = sign_extend32(REG(state, 0));
int64_t gbr = sign_extend32(state->gbr);
int64_t imm = zero_extend8(i);
int64_t address = zero_extend32(r0 + gbr);
int64_t value = zero_extend8(read_memory8(map, address));
value = value & imm;
write_memory8(map, address, value);
state->is_delay_slot = false;
}
/* NOT Rm,Rn */
void not__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, m));
int64_t op2 = ~op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* OR Rm,Rn */
void or__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, m));
int64_t op2 = zero_extend32(REG(state, n));
op2 = op2 | op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* OR #imm,R0 */
void or__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0 = zero_extend32(REG(state, 0));
int64_t imm = zero_extend8(i);
r0 = r0 | imm;
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* OR.B #imm,@(R0,GBR) */
void or_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0 = sign_extend32(REG(state, 0));
int64_t gbr = sign_extend32(state->gbr);
int64_t imm = zero_extend8(i);
int64_t address = zero_extend32(r0 + gbr);
int64_t value = zero_extend8(read_memory8(map, address));
value = value | imm;
write_memory8(map, address, value);
state->is_delay_slot = false;
}
/* TAS.B @Rn */
void tas_b__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(op1);
ocbp(state, address);
int64_t value = zero_extend8(read_memory8(map, address));
int64_t t = unary_int((value == 0));
value = value | (1LL << 7);
write_memory8(map, address, value);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* TST Rm,Rn */
void tst__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t op2 = sign_extend32(REG(state, n));
int64_t t = unary_int(((op1 & op2) == 0));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* TST #imm,R0 */
void tst__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0 = sign_extend32(REG(state, 0));
int64_t imm = zero_extend8(i);
int64_t t = unary_int(((r0 & imm) == 0));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* TST.B #imm,@(R0,GBR) */
void tst_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0 = sign_extend32(REG(state, 0));
int64_t gbr = sign_extend32(state->gbr);
int64_t imm = zero_extend8(i);
int64_t address = zero_extend32(r0 + gbr);
int64_t value = zero_extend8(read_memory8(map, address));
int64_t t = ((value & imm) == 0);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* XOR Rm,Rn */
void xor__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, m));
int64_t op2 = zero_extend32(REG(state, n));
op2 = op2 ^ op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* XOR #imm,R0 */
void xor__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0 = zero_extend32(REG(state, 0));
int64_t imm = zero_extend8(i);
r0 = r0 ^ imm;
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* XOR.B #imm,@(R0,GBR) */
void xor_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0 = sign_extend32(REG(state, 0));
int64_t gbr = sign_extend32(state->gbr);
int64_t imm = zero_extend8(i);
int64_t address = zero_extend32(r0 + gbr);
int64_t value = zero_extend8(read_memory8(map, address));
value = value ^ imm;
write_memory8(map, address, value);
state->is_delay_slot = false;
}
/* ROTL Rn */
void rotl__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, n));
int64_t t = bit_extract(op1, 31, 1);
op1 = (op1 << 1) | t;
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* ROTR Rn */
void rotr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, n));
int64_t t = bit_extract(op1, 0, 1);
op1 = (op1 >> 1) | (t << 31);
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* ROTCL Rn */
void rotcl__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t t = zero_extend1(state->sr.bits.t);
int64_t op1 = zero_extend32(REG(state, n));
op1 = (op1 << 1) | t;
t = bit_extract(op1, 32, 1);
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* ROTCR Rn */
void rotcr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t t = zero_extend1(state->sr.bits.t);
int64_t op1 = zero_extend32(REG(state, n));
int64_t oldt = t;
t = bit_extract(op1, 0, 1);
op1 = (op1 >> 1) | (oldt << 31);
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SHAL Rn */
void shal__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, n));
int64_t t = bit_extract(op1, 31, 1);
op1 = op1 << 1;
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SHAR Rn */
void shar__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, n));
int64_t t = bit_extract(op1, 0, 1);
op1 = op1 >> 1;
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SHLL Rn */
void shll__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, n));
int64_t t = bit_extract(op1, 31, 1);
op1 = op1 << 1;
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SHLR Rn */
void shlr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, n));
int64_t t = bit_extract(op1, 0, 1);
op1 = op1 >> 1;
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SHLL2 Rn */
void shll2__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, n));
op1 = op1 << 2;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* SHLR2 Rn */
void shlr2__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, n));
op1 = op1 >> 2;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* SHLL8 Rn */
void shll8__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, n));
op1 = op1 << 8;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* SHLR8 Rn */
void shlr8__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, n));
op1 = op1 >> 8;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* SHLL16 Rn */
void shll16__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, n));
op1 = op1 << 16;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* SHLR16 Rn */
void shlr16__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = zero_extend32(REG(state, n));
op1 = op1 >> 16;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* BF label */
void bf__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t t = zero_extend1(state->sr.bits.t);
int64_t pc = sign_extend32(state->pc[0]);
int64_t newpc = sign_extend32(state->pc[1]);
int64_t delayedpc = sign_extend32(state->pc[2]);
int64_t label = sign_extend8(d) << 1;
if (is_delay_slot(state)) return ILLSLOT(state);
if (t == 0)
{
int64_t temp = zero_extend32(pc + 4 + label);
newpc = temp;
delayedpc = temp + 2;
}
state->pc[1] = _register(newpc);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = false;
}
/* BF/S label */
void bf_s__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t t = zero_extend1(state->sr.bits.t);
int64_t pc = sign_extend32(state->pc[0]);
int64_t delayedpc = sign_extend32(state->pc[2]);
int64_t label = sign_extend8(d) << 1;
if (is_delay_slot(state)) return ILLSLOT(state);
if (t == 0)
{
int64_t temp = zero_extend32(pc + 4 + label);
delayedpc = temp;
}
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* BT label */
void bt__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t t = zero_extend1(state->sr.bits.t);
int64_t pc = sign_extend32(state->pc[0]);
int64_t newpc = sign_extend32(state->pc[1]);
int64_t delayedpc = sign_extend32(state->pc[2]);
int64_t label = sign_extend8(d) << 1;
if (is_delay_slot(state)) return ILLSLOT(state);
if (t == 1)
{
int64_t temp = zero_extend32(pc + 4 + label);
newpc = temp;
delayedpc = temp + 2;
}
state->pc[1] = _register(newpc);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = false;
}
/* BT/S label */
void bt_s__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t t = zero_extend1(state->sr.bits.t);
int64_t pc = sign_extend32(state->pc[0]);
int64_t delayedpc = sign_extend32(state->pc[2]);
int64_t label = sign_extend8(d) << 1;
if (is_delay_slot(state)) return ILLSLOT(state);
if (t == 1)
{
int64_t temp = zero_extend32(pc + 4 + label);
delayedpc = temp;
}
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* BRA label */
void bra__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t pc = sign_extend32(state->pc[0]);
int64_t label = sign_extend12(d) << 1;
if (is_delay_slot(state)) return ILLSLOT(state);
int64_t temp = zero_extend32(pc + 4 + label);
int64_t delayedpc = temp;
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* BRAF Rn */
void braf__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t pc = sign_extend32(state->pc[0]);
int64_t op1 = sign_extend32(REG(state, n));
if (is_delay_slot(state)) return ILLSLOT(state);
int64_t target = zero_extend32(pc + 4 + op1);
int64_t delayedpc = target & (~0x1);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* BSR label */
void bsr__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t pc = sign_extend32(state->pc[0]);
int64_t label = sign_extend12(d) << 1;
if (is_delay_slot(state)) return ILLSLOT(state);
int64_t delayedpr = pc + 4;
int64_t temp = zero_extend32(pc + 4 + label);
int64_t delayedpc = temp;
state->pr[2] = _register(delayedpr);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* BSRF Rn */
void bsrf__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t pc = sign_extend32(state->pc[0]);
int64_t op1 = sign_extend32(REG(state, n));
if (is_delay_slot(state)) return ILLSLOT(state);
int64_t delayedpr = pc + 4;
int64_t target = zero_extend32(pc + 4 + op1);
int64_t delayedpc = target & (~0x1);
state->pr[2] = _register(delayedpr);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* JMP @Rn */
void jmp__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1 = sign_extend32(REG(state, n));
if (is_delay_slot(state)) return ILLSLOT(state);
int64_t target = op1;
int64_t delayedpc = target & (~0x1);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* JSR @Rn */
void jsr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t pc = sign_extend32(state->pc[0]);
int64_t op1 = sign_extend32(REG(state, n));
if (is_delay_slot(state)) return ILLSLOT(state);
int64_t delayedpr = pc + 4;
int64_t target = op1;
int64_t delayedpc = target & (~0x1);
state->pr[2] = _register(delayedpr);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* RTS */
void rts__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t pr = sign_extend32(state->pr[0]);
if (is_delay_slot(state)) return ILLSLOT(state);
int64_t target = pr;
int64_t delayedpc = target & (~0x1);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* CLRMAC */
void clrmac__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t macl = 0;
int64_t mach = 0;
state->macl = zero_extend32(macl);
state->mach = zero_extend32(mach);
state->is_delay_slot = false;
}
/* CLRS */
void clrs__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t s = 0;
state->sr.bits.s = bit(s);
state->is_delay_slot = false;
}
/* CLRT */
void clrt__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t t = 0;
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* LDC Rm,SR */
void ldc__transfer_to_sr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
int64_t op1 = sign_extend32(REG(state, m));
int64_t sr = op1;
state->sr.value = _register(sr);
state->is_delay_slot = false;
}
/* LDC Rm,GBR */
void ldc__transfer_to_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t gbr = op1;
state->gbr = _register(gbr);
state->is_delay_slot = false;
}
/* LDC Rm,VBR */
void ldc__transfer_to_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
int64_t op1 = sign_extend32(REG(state, m));
int64_t vbr = op1;
state->vbr = _register(vbr);
state->is_delay_slot = false;
}
/* LDC.L @Rm+,SR */
void ldc_l__load_to_sr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
int64_t op1 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(op1);
int64_t sr = sign_extend32(read_memory32(map, address));
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->sr.value = _register(sr);
state->is_delay_slot = false;
}
/* LDC.L @Rm+,GBR */
void ldc_l__load_to_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(op1);
int64_t gbr = sign_extend32(read_memory32(map, address));
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->gbr = _register(gbr);
state->is_delay_slot = false;
}
/* LDC.L @Rm+,VBR */
void ldc_l__load_to_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
int64_t op1 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(op1);
int64_t vbr = sign_extend32(read_memory32(map, address));
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->vbr = _register(vbr);
state->is_delay_slot = false;
}
/* LDS Rm,MACH */
void lds__transfer_to_mach(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t mach = op1;
state->mach = zero_extend32(mach);
state->is_delay_slot = false;
}
/* LDS Rm,MACL */
void lds__transfer_to_macl(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t macl = op1;
state->macl = zero_extend32(macl);
state->is_delay_slot = false;
}
/* LDS Rm,PR */
void lds__transfer_to_pr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t newpr = op1;
int64_t delayedpr = newpr;
state->pr[1] = _register(newpr);
state->pr[2] = _register(delayedpr);
state->is_delay_slot = false;
}
/* LDS.L @Rm+,MACH */
void lds_l__load_to_mach(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(op1);
int64_t mach = sign_extend32(read_memory32(map, address));
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->mach = zero_extend32(mach);
state->is_delay_slot = false;
}
/* LDS.L @Rm+,MACL */
void lds_l__load_to_macl(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(op1);
int64_t macl = sign_extend32(read_memory32(map, address));
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->macl = zero_extend32(macl);
state->is_delay_slot = false;
}
/* LDS.L @Rm+,PR */
void lds_l__load_to_pr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1 = sign_extend32(REG(state, m));
int64_t address = zero_extend32(op1);
int64_t newpr = sign_extend32(read_memory32(map, address));
int64_t delayedpr = newpr;
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->pr[1] = _register(newpr);
state->pr[2] = _register(delayedpr);
state->is_delay_slot = false;
}
/* NOP */
void nop__no_operand(struct architectural_state * state, struct memory_map * map)
{
;
state->is_delay_slot = false;
}
/* RTE */
void rte__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
int64_t ssr = sign_extend32(state->ssr);
int64_t pc = sign_extend32(state->pc[0]);
if (is_delay_slot(state)) return ILLSLOT(state);
int64_t target = pc;
int64_t delayedpc = target & (~0x1);
state->pc[2] = _register(delayedpc);
state->sr.value = _register(ssr);
state->is_delay_slot = true;
}
/* SETS */
void sets__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t s = 1;
state->sr.bits.s = bit(s);
state->is_delay_slot = false;
}
/* SETT */
void sett__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t t = 1;
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SLEEP */
void sleep__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
sleep(state);
state->is_delay_slot = false;
}
/* STC SR,Rn */
void stc__transfer_from_sr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
int64_t sr = sign_extend32(state->sr.value);
int64_t op1 = sr;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC GBR,Rn */
void stc__transfer_from_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t gbr = sign_extend32(state->gbr);
int64_t op1 = gbr;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC VBR,Rn */
void stc__transfer_from_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
int64_t vbr = sign_extend32(state->vbr);
int64_t op1 = vbr;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC.L SR,@-Rn */
void stc_l__store_from_sr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
int64_t sr = sign_extend32(state->sr.value);
int64_t op1 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(op1 - 4);
write_memory32(map, address, sr);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC.L GBR,@-Rn */
void stc_l__store_from_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t gbr = sign_extend32(state->gbr);
int64_t op1 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(op1 - 4);
write_memory32(map, address, gbr);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC.L VBR,@-Rn */
void stc_l__store_from_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
int64_t vbr = sign_extend32(state->vbr);
int64_t op1 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(op1 - 4);
write_memory32(map, address, vbr);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STS MACH,Rn */
void sts__transfer_from_mach(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t mach = sign_extend32(state->mach);
int64_t op1 = mach;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STS MACL,Rn */
void sts__transfer_from_macl(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t macl = sign_extend32(state->macl);
int64_t op1 = macl;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STS PR,Rn */
void sts__transfer_from_pr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t pr = sign_extend32(state->pr[1]);
int64_t op1 = pr;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STS.L MACH,@-Rn */
void sts_l__store_from_mach(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t mach = sign_extend32(state->mach);
int64_t op1 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(op1 - 4);
write_memory32(map, address, mach);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STS.L MACL,@-Rn */
void sts_l__store_from_macl(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t macl = sign_extend32(state->macl);
int64_t op1 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(op1 - 4);
write_memory32(map, address, macl);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STS.L PR,@-Rn */
void sts_l__store_from_pr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t pr = sign_extend32(state->pr[1]);
int64_t op1 = sign_extend32(REG(state, n));
int64_t address = zero_extend32(op1 - 4);
write_memory32(map, address, pr);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* TRAPA #imm */
void trapa__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t imm = zero_extend8(i);
if (is_delay_slot(state)) return ILLSLOT(state);
return TRAP(state, imm);
state->is_delay_slot = false;
}