sh-dis/c/impl.c
Zack Buhman ea3c389944 add non-FPU/UBC/MMU/cache SH4 instructions
Previously, ast transformations were performed informally as ad-hoc
modifications to the generated C source code. In this commit, the
same transformations are performed by rewriting the ast prior to code
generation time.

The most significant new transformer is transform_assignment_list.
This transforms assignments such as:

  a, b, c = f(b, c, d)

To:

  a = f(&b, &c, d)

The former syntax is used frequently in the manual's description of
FPU-related instructions.
2024-04-22 21:34:43 +08:00

2540 lines
68 KiB
C

#include "impl.h"
#include "operations.h"
#include "exception.h"
#include "state_helpers.h"
/* MOV #imm,Rn */
void mov__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i, const uint32_t n)
{
int64_t imm, op2;
imm = sign_extend8(i);
op2 = imm;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.W @(disp,PC),Rn */
void mov_w__pc_relative_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n)
{
int64_t pc, disp, address, op2;
pc = sign_extend32(state->pc[0]);
disp = zero_extend8(d) << 1;
if (is_delay_slot(state)) return ILLSLOT(state);
address = zero_extend32(disp + (pc + 4));
op2 = sign_extend16(read_memory16(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.L @(disp,PC),Rn */
void mov_l__pc_relative_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n)
{
int64_t pc, disp, address, op2;
pc = sign_extend32(state->pc[0]);
disp = zero_extend8(d) << 2;
if (is_delay_slot(state)) return ILLSLOT(state);
address = zero_extend32(disp + ((pc + 4) & (~0x3)));
op2 = sign_extend32(read_memory32(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV Rm,Rn */
void mov__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2;
op1 = zero_extend32(REG(state, m));
op2 = op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.B Rm,@Rn */
void mov_b__store_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, address;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
address = zero_extend32(op2);
write_memory8(map, address, op1);
state->is_delay_slot = false;
}
/* MOV.W Rm,@Rn */
void mov_w__store_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, address;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
address = zero_extend32(op2);
write_memory16(map, address, op1);
state->is_delay_slot = false;
}
/* MOV.L Rm,@Rn */
void mov_l__store_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, address;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
address = zero_extend32(op2);
write_memory32(map, address, op1);
state->is_delay_slot = false;
}
/* MOV.B @Rm,Rn */
void mov_b__load_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, address, op2;
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
op2 = sign_extend8(read_memory8(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.W @Rm,Rn */
void mov_w__load_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, address, op2;
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
op2 = sign_extend16(read_memory16(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.L @Rm,Rn */
void mov_l__load_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, address, op2;
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
op2 = sign_extend32(read_memory32(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.B Rm,@-Rn */
void mov_b__store_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, address;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
address = zero_extend32(op2 - 1);
write_memory8(map, address, op1);
op2 = address;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.W Rm,@-Rn */
void mov_w__store_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, address;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
address = zero_extend32(op2 - 2);
write_memory16(map, address, op1);
op2 = address;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.L Rm,@-Rn */
void mov_l__store_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, address;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
address = zero_extend32(op2 - 4);
write_memory32(map, address, op1);
op2 = address;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.B @Rm+,Rn */
void mov_b__load_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t m_field, n_field, op1, address, op2;
m_field = zero_extend4(m);
n_field = zero_extend4(n);
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
op2 = sign_extend8(read_memory8(map, address));
if (m_field == n_field) op1 = op2;
else op1 = op1 + 1;
REG(state, m) = _register(op1);
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.W @Rm+,Rn */
void mov_w__load_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t m_field, n_field, op1, address, op2;
m_field = zero_extend4(m);
n_field = zero_extend4(n);
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
op2 = sign_extend16(read_memory16(map, address));
if (m_field == n_field) op1 = op2;
else op1 = op1 + 2;
REG(state, m) = _register(op1);
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.L @Rm+,Rn */
void mov_l__load_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t m_field, n_field, op1, address, op2;
m_field = zero_extend4(m);
n_field = zero_extend4(n);
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
op2 = sign_extend32(read_memory32(map, address));
if (m_field == n_field) op1 = op2;
else op1 = op1 + 4;
REG(state, m) = _register(op1);
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.B R0,@(disp,Rn) */
void mov_b__store_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n)
{
int64_t r0, disp, op2, address;
r0 = sign_extend32(REG(state, 0));
disp = zero_extend4(d);
op2 = sign_extend32(REG(state, n));
address = zero_extend32(disp + op2);
write_memory8(map, address, r0);
state->is_delay_slot = false;
}
/* MOV.W R0,@(disp,Rn) */
void mov_w__store_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n)
{
int64_t r0, disp, op2, address;
r0 = sign_extend32(REG(state, 0));
disp = zero_extend4(d) << 1;
op2 = sign_extend32(REG(state, n));
address = zero_extend32(disp + op2);
write_memory16(map, address, r0);
state->is_delay_slot = false;
}
/* MOV.L Rm,@(disp,Rn) */
void mov_l__store_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t d, const uint32_t n)
{
int64_t op1, disp, op3, address;
op1 = sign_extend32(REG(state, m));
disp = zero_extend4(d) << 2;
op3 = sign_extend32(REG(state, n));
address = zero_extend32(disp + op3);
write_memory32(map, address, op1);
state->is_delay_slot = false;
}
/* MOV.B @(disp,Rm),R0 */
void mov_b__load_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t m)
{
int64_t disp, op2, address, r0;
disp = zero_extend4(d);
op2 = sign_extend32(REG(state, m));
address = zero_extend32(disp + op2);
r0 = sign_extend8(read_memory8(map, address));
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* MOV.W @(disp,Rm),R0 */
void mov_w__load_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t m)
{
int64_t disp, op2, address, r0;
disp = zero_extend4(d) << 1;
op2 = sign_extend32(REG(state, m));
address = zero_extend32(disp + op2);
r0 = sign_extend16(read_memory16(map, address));
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* MOV.L @(disp,Rm),Rn */
void mov_l__load_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t m, const uint32_t n)
{
int64_t disp, op2, address, op3;
disp = zero_extend4(d) << 2;
op2 = sign_extend32(REG(state, m));
address = zero_extend32(disp + op2);
op3 = sign_extend32(read_memory32(map, address));
REG(state, n) = _register(op3);
state->is_delay_slot = false;
}
/* MOV.B Rm,@(R0,Rn) */
void mov_b__store_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t r0, op1, op2, address;
r0 = sign_extend32(REG(state, 0));
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
address = zero_extend32(r0 + op2);
write_memory8(map, address, op1);
state->is_delay_slot = false;
}
/* MOV.W Rm,@(R0,Rn) */
void mov_w__store_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t r0, op1, op2, address;
r0 = sign_extend32(REG(state, 0));
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
address = zero_extend32(r0 + op2);
write_memory16(map, address, op1);
state->is_delay_slot = false;
}
/* MOV.L Rm,@(R0,Rn) */
void mov_l__store_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t r0, op1, op2, address;
r0 = sign_extend32(REG(state, 0));
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
address = zero_extend32(r0 + op2);
write_memory32(map, address, op1);
state->is_delay_slot = false;
}
/* MOV.B @(R0,Rm),Rn */
void mov_b__load_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t r0, op1, address, op2;
r0 = sign_extend32(REG(state, 0));
op1 = sign_extend32(REG(state, m));
address = zero_extend32(r0 + op1);
op2 = sign_extend8(read_memory8(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.W @(R0,Rm),Rn */
void mov_w__load_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t r0, op1, address, op2;
r0 = sign_extend32(REG(state, 0));
op1 = sign_extend32(REG(state, m));
address = zero_extend32(r0 + op1);
op2 = sign_extend16(read_memory16(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.L @(R0,Rm),Rn */
void mov_l__load_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t r0, op1, address, op2;
r0 = sign_extend32(REG(state, 0));
op1 = sign_extend32(REG(state, m));
address = zero_extend32(r0 + op1);
op2 = sign_extend32(read_memory32(map, address));
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MOV.B R0,@(disp,GBR) */
void mov_b__store_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t gbr, r0, disp, address;
gbr = sign_extend32(state->gbr);
r0 = sign_extend32(REG(state, 0));
disp = zero_extend8(d);
address = zero_extend32(disp + gbr);
write_memory8(map, address, r0);
state->is_delay_slot = false;
}
/* MOV.W R0,@(disp,GBR) */
void mov_w__store_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t gbr, r0, disp, address;
gbr = sign_extend32(state->gbr);
r0 = sign_extend32(REG(state, 0));
disp = zero_extend8(d) << 1;
address = zero_extend32(disp + gbr);
write_memory16(map, address, r0);
state->is_delay_slot = false;
}
/* MOV.L R0,@(disp,GBR) */
void mov_l__store_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t gbr, r0, disp, address;
gbr = sign_extend32(state->gbr);
r0 = sign_extend32(REG(state, 0));
disp = zero_extend8(d) << 2;
address = zero_extend32(disp + gbr);
write_memory32(map, address, r0);
state->is_delay_slot = false;
}
/* MOV.B @(disp,GBR),R0 */
void mov_b__load_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t gbr, disp, address, r0;
gbr = sign_extend32(state->gbr);
disp = zero_extend8(d);
address = zero_extend32(disp + gbr);
r0 = sign_extend8(read_memory8(map, address));
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* MOV.W @(disp,GBR),R0 */
void mov_w__load_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t gbr, disp, address, r0;
gbr = sign_extend32(state->gbr);
disp = zero_extend8(d) << 1;
address = zero_extend32(disp + gbr);
r0 = sign_extend16(read_memory16(map, address));
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* MOV.L @(disp,GBR),R0 */
void mov_l__load_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t gbr, disp, address, r0;
gbr = sign_extend32(state->gbr);
disp = zero_extend8(d) << 2;
address = zero_extend32(disp + gbr);
r0 = sign_extend32(read_memory32(map, address));
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* MOVA @(disp,PC),R0 */
void mova__pc_relative_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t pc, disp, r0;
pc = sign_extend32(state->pc[0]);
disp = zero_extend8(d) << 2;
if (is_delay_slot(state)) return ILLSLOT(state);
r0 = disp + ((pc + 4) & (~0x3));
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* MOVT Rn */
void movt__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t t, op1;
t = zero_extend1(state->sr.bits.t);
op1 = t;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* SWAP.B Rm,Rn */
void swap_b__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2;
op1 = zero_extend32(REG(state, m));
op2 = ((bit_extract(op1, 16, 16) << 16) | (bit_extract(op1, 0, 8) << 8)) | bit_extract(op1, 8, 8);
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* SWAP.W Rm,Rn */
void swap_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2;
op1 = zero_extend32(REG(state, m));
op2 = (bit_extract(op1, 0, 16) << 16) | bit_extract(op1, 16, 16);
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* XTRCT Rm,Rn */
void xtrct__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2;
op1 = zero_extend32(REG(state, m));
op2 = zero_extend32(REG(state, n));
op2 = bit_extract(op2, 16, 16) | (bit_extract(op1, 0, 16) << 16);
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* ADD Rm,Rn */
void add__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
op2 = op2 + op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* ADD #imm,Rn */
void add__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i, const uint32_t n)
{
int64_t imm, op2;
imm = sign_extend8(i);
op2 = sign_extend32(REG(state, n));
op2 = op2 + imm;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* ADDC Rm,Rn */
void addc__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t t, op1, op2;
t = zero_extend1(state->sr.bits.t);
op1 = zero_extend32(sign_extend32(REG(state, m)));
op2 = zero_extend32(sign_extend32(REG(state, n)));
op2 = (op2 + op1) + t;
t = bit_extract(op2, 32, 1);
REG(state, n) = _register(op2);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* ADDV Rm,Rn */
void addv__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, t;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
op2 = op2 + op1;
t = unary_int(((op2 < (-(1LL << 31))) || (op2 >= (1LL << 31))));
REG(state, n) = _register(op2);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/EQ #imm,R0 */
void cmp_eq__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0, imm, t;
r0 = sign_extend32(REG(state, 0));
imm = sign_extend8(i);
t = unary_int((r0 == imm));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/EQ Rm,Rn */
void cmp_eq__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, t;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
t = unary_int((op2 == op1));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/HS Rm,Rn */
void cmp_hs__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, t;
op1 = zero_extend32(sign_extend32(REG(state, m)));
op2 = zero_extend32(sign_extend32(REG(state, n)));
t = unary_int((op2 >= op1));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/GE Rm,Rn */
void cmp_ge__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, t;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
t = unary_int((op2 >= op1));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/HI Rm,Rn */
void cmp_hi__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, t;
op1 = zero_extend32(sign_extend32(REG(state, m)));
op2 = zero_extend32(sign_extend32(REG(state, n)));
t = unary_int((op2 > op1));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/GT Rm,Rn */
void cmp_gt__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, t;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
t = unary_int((op2 > op1));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/PZ Rn */
void cmp_pz__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1, t;
op1 = sign_extend32(REG(state, n));
t = unary_int((op1 >= 0));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/PL Rn */
void cmp_pl__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1, t;
op1 = sign_extend32(REG(state, n));
t = unary_int((op1 > 0));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* CMP/STR Rm,Rn */
void cmp_str__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, temp, t;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
temp = op1 ^ op2;
t = unary_int((bit_extract(temp, 0, 8) == 0));
t = (unary_int((bit_extract(temp, 8, 8) == 0))) | t;
t = (unary_int((bit_extract(temp, 16, 8) == 0))) | t;
t = (unary_int((bit_extract(temp, 24, 8) == 0))) | t;
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* DIV1 Rm,Rn */
void div1__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t q, _m, t, op1, op2, oldq;
q = zero_extend1(state->sr.bits.q);
_m = zero_extend1(state->sr.bits.m);
t = zero_extend1(state->sr.bits.t);
op1 = zero_extend32(sign_extend32(REG(state, m)));
op2 = zero_extend32(sign_extend32(REG(state, n)));
oldq = q;
q = bit_extract(op2, 31, 1);
op2 = zero_extend32(op2 << 1) | t;
if (oldq == _m) op2 = op2 - op1;
else op2 = op2 + op1;
q = (q ^ _m) ^ bit_extract(op2, 32, 1);
t = 1 - (q ^ _m);
REG(state, n) = _register(op2);
state->sr.bits.q = bit(q);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* DIV0S Rm,Rn */
void div0s__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, q, _m, t;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
q = bit_extract(op2, 31, 1);
_m = bit_extract(op1, 31, 1);
t = _m ^ q;
state->sr.bits.q = bit(q);
state->sr.bits.m = bit(_m);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* DIV0U */
void div0u__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t q, _m, t;
q = 0;
_m = 0;
t = 0;
state->sr.bits.q = bit(q);
state->sr.bits.m = bit(_m);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* DMULS.L Rm,Rn */
void dmuls_l__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, mac, macl, mach;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
mac = op2 * op1;
macl = mac;
mach = mac >> 32;
state->macl = zero_extend32(macl);
state->mach = zero_extend32(mach);
state->is_delay_slot = false;
}
/* DMULU.L Rm,Rn */
void dmulu_l__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, mac, macl, mach;
op1 = zero_extend32(sign_extend32(REG(state, m)));
op2 = zero_extend32(sign_extend32(REG(state, n)));
mac = op2 * op1;
macl = mac;
mach = mac >> 32;
state->macl = zero_extend32(macl);
state->mach = zero_extend32(mach);
state->is_delay_slot = false;
}
/* DT Rn */
void dt__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1, t;
op1 = sign_extend32(REG(state, n));
op1 = op1 - 1;
t = unary_int((op1 == 0));
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* EXTS.B Rm,Rn */
void exts_b__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2;
op1 = sign_extend8(REG(state, m));
op2 = op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* EXTS.W Rm,Rn */
void exts_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2;
op1 = sign_extend16(REG(state, m));
op2 = op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* EXTU.B Rm,Rn */
void extu_b__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2;
op1 = zero_extend8(REG(state, m));
op2 = op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* EXTU.W Rm,Rn */
void extu_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2;
op1 = zero_extend16(REG(state, m));
op2 = op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* MAC.L @Rm+,@Rn+ */
void mac_l__multiply_and_accumulate_operation(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t macl, mach, s, m_field, n_field, m_address, n_address, value2, value1, mul, mac, result;
macl = zero_extend32(state->macl);
mach = zero_extend32(state->mach);
s = zero_extend1(state->sr.bits.s);
m_field = zero_extend4(m);
n_field = zero_extend4(n);
m_address = sign_extend32(REG(state, m));
n_address = sign_extend32(REG(state, n));
value2 = sign_extend32(read_memory32(map, zero_extend32(n_address)));
n_address = n_address + 4;
if (n_field == m_field)
{
m_address = m_address + 4;
n_address = n_address + 4;
}
value1 = sign_extend32(read_memory32(map, zero_extend32(m_address)));
m_address = m_address + 4;
mul = value2 * value1;
mac = (mach << 32) + macl;
result = mac + mul;
if (s == 1) if (bit_extract(((result ^ mac) & (result ^ mul)), 63, 1) == 1) if (bit_extract(mac, 63, 1) == 0) result = (1LL << 47) - 1;
else result = -(1LL << 47);
else result = signed_saturate48(result);
macl = result;
mach = result >> 32;
REG(state, m) = _register(m_address);
REG(state, n) = _register(n_address);
state->macl = zero_extend32(macl);
state->mach = zero_extend32(mach);
state->is_delay_slot = false;
}
/* MAC.W @Rm+,@Rn+ */
void mac_w__multiply_and_accumulate_operation(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t macl, mach, s, m_field, n_field, m_address, n_address, value2, value1, mul, result, temp;
macl = zero_extend32(state->macl);
mach = zero_extend32(state->mach);
s = zero_extend1(state->sr.bits.s);
m_field = zero_extend4(m);
n_field = zero_extend4(n);
m_address = sign_extend32(REG(state, m));
n_address = sign_extend32(REG(state, n));
value2 = sign_extend16(read_memory16(map, zero_extend32(n_address)));
n_address = n_address + 2;
if (n_field == m_field)
{
m_address = m_address + 2;
n_address = n_address + 2;
}
value1 = sign_extend16(read_memory16(map, zero_extend32(m_address)));
m_address = m_address + 2;
mul = value2 * value1;
result = 0;
if (s == 1)
{
macl = sign_extend32(macl) + mul;
temp = signed_saturate32(macl);
if (macl == temp) result = (mach << 32) | zero_extend32(macl);
else result = (1LL << 32) | zero_extend32(temp);
}
else result = ((mach << 32) + macl) + mul;
macl = result;
mach = result >> 32;
REG(state, m) = _register(m_address);
REG(state, n) = _register(n_address);
state->macl = zero_extend32(macl);
state->mach = zero_extend32(mach);
state->is_delay_slot = false;
}
/* MUL.L Rm,Rn */
void mul_l__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, macl;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
macl = op1 * op2;
state->macl = zero_extend32(macl);
state->is_delay_slot = false;
}
/* MULS.W Rm,Rn */
void muls_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, macl;
op1 = sign_extend16(sign_extend32(REG(state, m)));
op2 = sign_extend16(sign_extend32(REG(state, n)));
macl = op1 * op2;
state->macl = zero_extend32(macl);
state->is_delay_slot = false;
}
/* MULU.W Rm,Rn */
void mulu_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, macl;
op1 = zero_extend16(sign_extend32(REG(state, m)));
op2 = zero_extend16(sign_extend32(REG(state, n)));
macl = op1 * op2;
state->macl = zero_extend32(macl);
state->is_delay_slot = false;
}
/* NEG Rm,Rn */
void neg__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2;
op1 = sign_extend32(REG(state, m));
op2 = -op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* NEGC Rm,Rn */
void negc__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t t, op1, op2;
t = zero_extend1(state->sr.bits.t);
op1 = zero_extend32(REG(state, m));
op2 = (-op1) - t;
t = bit_extract(op2, 32, 1);
REG(state, n) = _register(op2);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SUB Rm,Rn */
void sub__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
op2 = op2 - op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* SUBC Rm,Rn */
void subc__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t t, op1, op2;
t = zero_extend1(state->sr.bits.t);
op1 = zero_extend32(sign_extend32(REG(state, m)));
op2 = zero_extend32(sign_extend32(REG(state, n)));
op2 = (op2 - op1) - t;
t = bit_extract(op2, 32, 1);
REG(state, n) = _register(op2);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SUBV Rm,Rn */
void subv__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, t;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
op2 = op2 - op1;
t = unary_int(((op2 < (-(1LL << 31))) || (op2 >= (1LL << 31))));
REG(state, n) = _register(op2);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* AND Rm,Rn */
void and__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2;
op1 = zero_extend32(REG(state, m));
op2 = zero_extend32(REG(state, n));
op2 = op2 & op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* AND #imm,R0 */
void and__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0, imm;
r0 = zero_extend32(REG(state, 0));
imm = zero_extend8(i);
r0 = r0 & imm;
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* AND.B #imm,@(R0,GBR) */
void and_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0, gbr, imm, address, value;
r0 = sign_extend32(REG(state, 0));
gbr = sign_extend32(state->gbr);
imm = zero_extend8(i);
address = zero_extend32(r0 + gbr);
value = zero_extend8(read_memory8(map, address));
value = value & imm;
write_memory8(map, address, value);
state->is_delay_slot = false;
}
/* NOT Rm,Rn */
void not__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2;
op1 = zero_extend32(REG(state, m));
op2 = ~op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* OR Rm,Rn */
void or__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2;
op1 = zero_extend32(REG(state, m));
op2 = zero_extend32(REG(state, n));
op2 = op2 | op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* OR #imm,R0 */
void or__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0, imm;
r0 = zero_extend32(REG(state, 0));
imm = zero_extend8(i);
r0 = r0 | imm;
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* OR.B #imm,@(R0,GBR) */
void or_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0, gbr, imm, address, value;
r0 = sign_extend32(REG(state, 0));
gbr = sign_extend32(state->gbr);
imm = zero_extend8(i);
address = zero_extend32(r0 + gbr);
value = zero_extend8(read_memory8(map, address));
value = value | imm;
write_memory8(map, address, value);
state->is_delay_slot = false;
}
/* TAS.B @Rn */
void tas_b__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1, address, value, t;
op1 = sign_extend32(REG(state, n));
address = zero_extend32(op1);
ocbp(state, address);
value = zero_extend8(read_memory8(map, address));
t = unary_int((value == 0));
value = value | (1LL << 7);
write_memory8(map, address, value);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* TST Rm,Rn */
void tst__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, t;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
t = unary_int(((op1 & op2) == 0));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* TST #imm,R0 */
void tst__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0, imm, t;
r0 = sign_extend32(REG(state, 0));
imm = zero_extend8(i);
t = unary_int(((r0 & imm) == 0));
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* TST.B #imm,@(R0,GBR) */
void tst_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0, gbr, imm, address, value, t;
r0 = sign_extend32(REG(state, 0));
gbr = sign_extend32(state->gbr);
imm = zero_extend8(i);
address = zero_extend32(r0 + gbr);
value = zero_extend8(read_memory8(map, address));
t = ((value & imm) == 0);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* XOR Rm,Rn */
void xor__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2;
op1 = zero_extend32(REG(state, m));
op2 = zero_extend32(REG(state, n));
op2 = op2 ^ op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* XOR #imm,R0 */
void xor__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0, imm;
r0 = zero_extend32(REG(state, 0));
imm = zero_extend8(i);
r0 = r0 ^ imm;
REG(state, 0) = _register(r0);
state->is_delay_slot = false;
}
/* XOR.B #imm,@(R0,GBR) */
void xor_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t r0, gbr, imm, address, value;
r0 = sign_extend32(REG(state, 0));
gbr = sign_extend32(state->gbr);
imm = zero_extend8(i);
address = zero_extend32(r0 + gbr);
value = zero_extend8(read_memory8(map, address));
value = value ^ imm;
write_memory8(map, address, value);
state->is_delay_slot = false;
}
/* ROTL Rn */
void rotl__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1, t;
op1 = zero_extend32(REG(state, n));
t = bit_extract(op1, 31, 1);
op1 = (op1 << 1) | t;
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* ROTR Rn */
void rotr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1, t;
op1 = zero_extend32(REG(state, n));
t = bit_extract(op1, 0, 1);
op1 = (op1 >> 1) | (t << 31);
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* ROTCL Rn */
void rotcl__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t t, op1;
t = zero_extend1(state->sr.bits.t);
op1 = zero_extend32(REG(state, n));
op1 = (op1 << 1) | t;
t = bit_extract(op1, 32, 1);
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* ROTCR Rn */
void rotcr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t t, op1, oldt;
t = zero_extend1(state->sr.bits.t);
op1 = zero_extend32(REG(state, n));
oldt = t;
t = bit_extract(op1, 0, 1);
op1 = (op1 >> 1) | (oldt << 31);
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SHAD Rm,Rn */
void shad__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, shift_amount;
op1 = sign_extend32(REG(state, m));
op2 = sign_extend32(REG(state, n));
shift_amount = zero_extend5(op1);
if (op1 >= 0) op2 = op2 << shift_amount;
else if (shift_amount != 0) op2 = op2 >> (32 - shift_amount);
else if (op2 < 0) op2 = -1;
else op2 = 0;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* SHAL Rn */
void shal__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1, t;
op1 = sign_extend32(REG(state, n));
t = bit_extract(op1, 31, 1);
op1 = op1 << 1;
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SHAR Rn */
void shar__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1, t;
op1 = sign_extend32(REG(state, n));
t = bit_extract(op1, 0, 1);
op1 = op1 >> 1;
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SHLD Rm,Rn */
void shld__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t op1, op2, shift_amount;
op1 = sign_extend32(REG(state, m));
op2 = zero_extend32(REG(state, n));
shift_amount = zero_extend5(op1);
if (op1 >= 0) op2 = op2 << shift_amount;
else if (shift_amount != 0) op2 = op2 >> (32 - shift_amount);
else op2 = 0;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* SHLL Rn */
void shll__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1, t;
op1 = zero_extend32(REG(state, n));
t = bit_extract(op1, 31, 1);
op1 = op1 << 1;
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SHLR Rn */
void shlr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1, t;
op1 = zero_extend32(REG(state, n));
t = bit_extract(op1, 0, 1);
op1 = op1 >> 1;
REG(state, n) = _register(op1);
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SHLL2 Rn */
void shll2__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1;
op1 = zero_extend32(REG(state, n));
op1 = op1 << 2;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* SHLR2 Rn */
void shlr2__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1;
op1 = zero_extend32(REG(state, n));
op1 = op1 >> 2;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* SHLL8 Rn */
void shll8__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1;
op1 = zero_extend32(REG(state, n));
op1 = op1 << 8;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* SHLR8 Rn */
void shlr8__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1;
op1 = zero_extend32(REG(state, n));
op1 = op1 >> 8;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* SHLL16 Rn */
void shll16__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1;
op1 = zero_extend32(REG(state, n));
op1 = op1 << 16;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* SHLR16 Rn */
void shlr16__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1;
op1 = zero_extend32(REG(state, n));
op1 = op1 >> 16;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* BF label */
void bf__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t t, pc, newpc, delayedpc, label, temp;
t = zero_extend1(state->sr.bits.t);
pc = sign_extend32(state->pc[0]);
newpc = sign_extend32(state->pc[1]);
delayedpc = sign_extend32(state->pc[2]);
label = sign_extend8(d) << 1;
if (is_delay_slot(state)) return ILLSLOT(state);
if (t == 0)
{
temp = zero_extend32(pc + 4 + label);
newpc = temp;
delayedpc = temp + 2;
}
state->pc[1] = _register(newpc);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = false;
}
/* BF/S label */
void bf_s__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t t, pc, delayedpc, label, temp;
t = zero_extend1(state->sr.bits.t);
pc = sign_extend32(state->pc[0]);
delayedpc = sign_extend32(state->pc[2]);
label = sign_extend8(d) << 1;
if (is_delay_slot(state)) return ILLSLOT(state);
if (t == 0)
{
temp = zero_extend32(pc + 4 + label);
delayedpc = temp;
}
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* BT label */
void bt__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t t, pc, newpc, delayedpc, label, temp;
t = zero_extend1(state->sr.bits.t);
pc = sign_extend32(state->pc[0]);
newpc = sign_extend32(state->pc[1]);
delayedpc = sign_extend32(state->pc[2]);
label = sign_extend8(d) << 1;
if (is_delay_slot(state)) return ILLSLOT(state);
if (t == 1)
{
temp = zero_extend32(pc + 4 + label);
newpc = temp;
delayedpc = temp + 2;
}
state->pc[1] = _register(newpc);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = false;
}
/* BT/S label */
void bt_s__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t t, pc, delayedpc, label, temp;
t = zero_extend1(state->sr.bits.t);
pc = sign_extend32(state->pc[0]);
delayedpc = sign_extend32(state->pc[2]);
label = sign_extend8(d) << 1;
if (is_delay_slot(state)) return ILLSLOT(state);
if (t == 1)
{
temp = zero_extend32(pc + 4 + label);
delayedpc = temp;
}
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* BRA label */
void bra__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t pc, label, temp, delayedpc;
pc = sign_extend32(state->pc[0]);
label = sign_extend12(d) << 1;
if (is_delay_slot(state)) return ILLSLOT(state);
temp = zero_extend32(pc + 4 + label);
delayedpc = temp;
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* BRAF Rn */
void braf__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t pc, op1, target, delayedpc;
pc = sign_extend32(state->pc[0]);
op1 = sign_extend32(REG(state, n));
if (is_delay_slot(state)) return ILLSLOT(state);
target = zero_extend32(pc + 4 + op1);
delayedpc = target & (~0x1);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* BSR label */
void bsr__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d)
{
int64_t pc, label, delayedpr, temp, delayedpc;
pc = sign_extend32(state->pc[0]);
label = sign_extend12(d) << 1;
if (is_delay_slot(state)) return ILLSLOT(state);
delayedpr = pc + 4;
temp = zero_extend32(pc + 4 + label);
delayedpc = temp;
state->pr[2] = _register(delayedpr);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* BSRF Rn */
void bsrf__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t pc, op1, delayedpr, target, delayedpc;
pc = sign_extend32(state->pc[0]);
op1 = sign_extend32(REG(state, n));
if (is_delay_slot(state)) return ILLSLOT(state);
delayedpr = pc + 4;
target = zero_extend32(pc + 4 + op1);
delayedpc = target & (~0x1);
state->pr[2] = _register(delayedpr);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* JMP @Rn */
void jmp__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t op1, target, delayedpc;
op1 = sign_extend32(REG(state, n));
if (is_delay_slot(state)) return ILLSLOT(state);
target = op1;
delayedpc = target & (~0x1);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* JSR @Rn */
void jsr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t pc, op1, delayedpr, target, delayedpc;
pc = sign_extend32(state->pc[0]);
op1 = sign_extend32(REG(state, n));
if (is_delay_slot(state)) return ILLSLOT(state);
delayedpr = pc + 4;
target = op1;
delayedpc = target & (~0x1);
state->pr[2] = _register(delayedpr);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* RTS */
void rts__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t pr, target, delayedpc;
pr = sign_extend32(state->pr[0]);
if (is_delay_slot(state)) return ILLSLOT(state);
target = pr;
delayedpc = target & (~0x1);
state->pc[2] = _register(delayedpc);
state->is_delay_slot = true;
}
/* CLRMAC */
void clrmac__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t macl, mach;
macl = 0;
mach = 0;
state->macl = zero_extend32(macl);
state->mach = zero_extend32(mach);
state->is_delay_slot = false;
}
/* CLRS */
void clrs__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t s;
s = 0;
state->sr.bits.s = bit(s);
state->is_delay_slot = false;
}
/* CLRT */
void clrt__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t t;
t = 0;
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* LDC Rm,SR */
void ldc__transfer_to_sr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t md, op1, sr;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
op1 = sign_extend32(REG(state, m));
sr = op1;
state->sr.value = _register(sr);
state->is_delay_slot = false;
}
/* LDC Rm,GBR */
void ldc__transfer_to_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1, gbr;
op1 = sign_extend32(REG(state, m));
gbr = op1;
state->gbr = _register(gbr);
state->is_delay_slot = false;
}
/* LDC Rm,VBR */
void ldc__transfer_to_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t md, op1, vbr;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
op1 = sign_extend32(REG(state, m));
vbr = op1;
state->vbr = _register(vbr);
state->is_delay_slot = false;
}
/* LDC Rm,SSR */
void ldc__transfer_to_ssr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t md, op1, ssr;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
op1 = sign_extend32(REG(state, m));
ssr = op1;
state->ssr = _register(ssr);
state->is_delay_slot = false;
}
/* LDC Rm,SPC */
void ldc__transfer_to_spc(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t md, op1, spc;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
op1 = sign_extend32(REG(state, m));
spc = op1;
state->spc = _register(spc);
state->is_delay_slot = false;
}
/* LDC Rm,DBR */
void ldc__transfer_to_dbr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t md, op1, dbr;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
op1 = sign_extend32(REG(state, m));
dbr = op1;
state->dbr = _register(dbr);
state->is_delay_slot = false;
}
/* LDC Rm,Rn_BANK */
void ldc__transfer_to_rn_bank(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t md, op1, rn_bank;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
op1 = sign_extend32(REG(state, m));
rn_bank = op1;
REG_BANK(state, n) = _register(rn_bank);
state->is_delay_slot = false;
}
/* LDC.L @Rm+,SR */
void ldc_l__load_to_sr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t md, op1, address, sr;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
sr = sign_extend32(read_memory32(map, address));
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->sr.value = _register(sr);
state->is_delay_slot = false;
}
/* LDC.L @Rm+,GBR */
void ldc_l__load_to_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1, address, gbr;
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
gbr = sign_extend32(read_memory32(map, address));
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->gbr = _register(gbr);
state->is_delay_slot = false;
}
/* LDC.L @Rm+,VBR */
void ldc_l__load_to_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t md, op1, address, vbr;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
vbr = sign_extend32(read_memory32(map, address));
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->vbr = _register(vbr);
state->is_delay_slot = false;
}
/* LDC.L @Rm+,SSR */
void ldc_l__load_to_ssr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t md, op1, address, ssr;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
ssr = sign_extend32(read_memory32(map, address));
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->ssr = _register(ssr);
state->is_delay_slot = false;
}
/* LDC.L @Rm+,SPC */
void ldc_l__load_to_spc(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t md, op1, address, spc;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
spc = sign_extend32(read_memory32(map, address));
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->spc = _register(spc);
state->is_delay_slot = false;
}
/* LDC.L @Rm+,DBR */
void ldc_l__load_to_dbr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t md, op1, address, dbr;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
dbr = sign_extend32(read_memory32(map, address));
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->dbr = _register(dbr);
state->is_delay_slot = false;
}
/* LDC.L @Rm+,Rn_BANK */
void ldc_l__load_to_rn_bank(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t md, op1, address, rn_bank;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
rn_bank = sign_extend32(read_memory32(map, address));
op1 = op1 + 4;
REG(state, m) = _register(op1);
REG_BANK(state, n) = _register(rn_bank);
state->is_delay_slot = false;
}
/* LDS Rm,MACH */
void lds__transfer_to_mach(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1, mach;
op1 = sign_extend32(REG(state, m));
mach = op1;
state->mach = zero_extend32(mach);
state->is_delay_slot = false;
}
/* LDS Rm,MACL */
void lds__transfer_to_macl(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1, macl;
op1 = sign_extend32(REG(state, m));
macl = op1;
state->macl = zero_extend32(macl);
state->is_delay_slot = false;
}
/* LDS Rm,PR */
void lds__transfer_to_pr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1, newpr, delayedpr;
op1 = sign_extend32(REG(state, m));
newpr = op1;
delayedpr = newpr;
state->pr[1] = _register(newpr);
state->pr[2] = _register(delayedpr);
state->is_delay_slot = false;
}
/* LDS.L @Rm+,MACH */
void lds_l__load_to_mach(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1, address, mach;
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
mach = sign_extend32(read_memory32(map, address));
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->mach = zero_extend32(mach);
state->is_delay_slot = false;
}
/* LDS.L @Rm+,MACL */
void lds_l__load_to_macl(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1, address, macl;
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
macl = sign_extend32(read_memory32(map, address));
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->macl = zero_extend32(macl);
state->is_delay_slot = false;
}
/* LDS.L @Rm+,PR */
void lds_l__load_to_pr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t op1, address, newpr, delayedpr;
op1 = sign_extend32(REG(state, m));
address = zero_extend32(op1);
newpr = sign_extend32(read_memory32(map, address));
delayedpr = newpr;
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->pr[1] = _register(newpr);
state->pr[2] = _register(delayedpr);
state->is_delay_slot = false;
}
/* NOP */
void nop__no_operand(struct architectural_state * state, struct memory_map * map)
{
;
state->is_delay_slot = false;
}
/* RTE */
void rte__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t md, ssr, pc, target, delayedpc;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
ssr = sign_extend32(state->ssr);
pc = sign_extend32(state->pc[0]);
if (is_delay_slot(state)) return ILLSLOT(state);
target = pc;
delayedpc = target & (~0x1);
state->pc[2] = _register(delayedpc);
state->sr.value = _register(ssr);
state->is_delay_slot = true;
}
/* SETS */
void sets__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t s;
s = 1;
state->sr.bits.s = bit(s);
state->is_delay_slot = false;
}
/* SETT */
void sett__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t t;
t = 1;
state->sr.bits.t = bit(t);
state->is_delay_slot = false;
}
/* SLEEP */
void sleep__no_operand(struct architectural_state * state, struct memory_map * map)
{
int64_t md;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
sleep(state);
state->is_delay_slot = false;
}
/* STC SR,Rn */
void stc__transfer_from_sr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md, sr, op1;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
sr = sign_extend32(state->sr.value);
op1 = sr;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC GBR,Rn */
void stc__transfer_from_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t gbr, op1;
gbr = sign_extend32(state->gbr);
op1 = gbr;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC VBR,Rn */
void stc__transfer_from_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md, vbr, op1;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
vbr = sign_extend32(state->vbr);
op1 = vbr;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC SSR,Rn */
void stc__transfer_from_ssr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md, ssr, op1;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
ssr = sign_extend32(state->ssr);
op1 = ssr;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC SPC,Rn */
void stc__transfer_from_spc(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md, spc, op1;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
spc = sign_extend32(state->spc);
op1 = spc;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC SGR,Rn */
void stc__transfer_from_sgr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md, sgr, op1;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
sgr = sign_extend32(state->sgr);
op1 = sgr;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC DBR,Rn */
void stc__transfer_from_dbr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md, dbr, op1;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
dbr = sign_extend32(state->dbr);
op1 = dbr;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC Rm_BANK,Rn */
void stc__transfer_from_rm_bank(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t md, op1, op2;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
op1 = sign_extend32(REG_BANK(state, m));
op2 = op1;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* STC.L SR,@-Rn */
void stc_l__store_from_sr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md, sr, op1, address;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
sr = sign_extend32(state->sr.value);
op1 = sign_extend32(REG(state, n));
address = zero_extend32(op1 - 4);
write_memory32(map, address, sr);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC.L GBR,@-Rn */
void stc_l__store_from_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t gbr, op1, address;
gbr = sign_extend32(state->gbr);
op1 = sign_extend32(REG(state, n));
address = zero_extend32(op1 - 4);
write_memory32(map, address, gbr);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC.L VBR,@-Rn */
void stc_l__store_from_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md, vbr, op1, address;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
vbr = sign_extend32(state->vbr);
op1 = sign_extend32(REG(state, n));
address = zero_extend32(op1 - 4);
write_memory32(map, address, vbr);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC.L SSR,@-Rn */
void stc_l__store_from_ssr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md, ssr, op1, address;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
ssr = sign_extend32(state->ssr);
op1 = sign_extend32(REG(state, n));
address = zero_extend32(op1 - 4);
write_memory32(map, address, ssr);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC.L SPC,@-Rn */
void stc_l__store_from_spc(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md, spc, op1, address;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
spc = sign_extend32(state->spc);
op1 = sign_extend32(REG(state, n));
address = zero_extend32(op1 - 4);
write_memory32(map, address, spc);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC.L SGR,@-Rn */
void stc_l__store_from_sgr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md, sgr, op1, address;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
sgr = sign_extend32(state->sgr);
op1 = sign_extend32(REG(state, n));
address = zero_extend32(op1 - 4);
write_memory32(map, address, sgr);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC.L DBR,@-Rn */
void stc_l__store_from_dbr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t md, dbr, op1, address;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
dbr = sign_extend32(state->dbr);
op1 = sign_extend32(REG(state, n));
address = zero_extend32(op1 - 4);
write_memory32(map, address, dbr);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STC.L Rm_BANK,@-Rn */
void stc_l__store_from_rm_bank(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n)
{
int64_t md, op1, op2, address;
md = zero_extend1(state->sr.bits.md);
if (md == 0) return RESINST(state);
op1 = sign_extend32(REG_BANK(state, m));
op2 = sign_extend32(REG(state, n));
address = zero_extend32(op2 - 4);
write_memory32(map, address, op1);
op2 = address;
REG(state, n) = _register(op2);
state->is_delay_slot = false;
}
/* STS MACH,Rn */
void sts__transfer_from_mach(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t mach, op1;
mach = sign_extend32(state->mach);
op1 = mach;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STS MACL,Rn */
void sts__transfer_from_macl(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t macl, op1;
macl = sign_extend32(state->macl);
op1 = macl;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STS PR,Rn */
void sts__transfer_from_pr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t pr, op1;
pr = sign_extend32(state->pr[1]);
op1 = pr;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STS.L MACH,@-Rn */
void sts_l__store_from_mach(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t mach, op1, address;
mach = sign_extend32(state->mach);
op1 = sign_extend32(REG(state, n));
address = zero_extend32(op1 - 4);
write_memory32(map, address, mach);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STS.L MACL,@-Rn */
void sts_l__store_from_macl(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t macl, op1, address;
macl = sign_extend32(state->macl);
op1 = sign_extend32(REG(state, n));
address = zero_extend32(op1 - 4);
write_memory32(map, address, macl);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STS.L PR,@-Rn */
void sts_l__store_from_pr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t pr, op1, address;
pr = sign_extend32(state->pr[1]);
op1 = sign_extend32(REG(state, n));
address = zero_extend32(op1 - 4);
write_memory32(map, address, pr);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* TRAPA #imm */
void trapa__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i)
{
int64_t imm;
imm = zero_extend8(i);
if (is_delay_slot(state)) return ILLSLOT(state);
return TRAP(state, imm);
state->is_delay_slot = false;
}
/* LDS Rm,FPSCR */
void lds__transfer_to_fpscr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t sr, op1;
sr = zero_extend32(state->sr.value);
op1 = sign_extend32(REG(state, m));
if (fpu_is_disabled(sr) && is_delay_slot(state)) return SLOTFPUDIS(state);
if (fpu_is_disabled(sr)) return FPUDIS(state);
state->fpscr.value = zero_extend32(op1);
state->is_delay_slot = false;
}
/* LDS Rm,FPUL */
void lds__transfer_to_fpul(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t sr, op1, fpul;
sr = zero_extend32(state->sr.value);
op1 = sign_extend32(REG(state, m));
if (fpu_is_disabled(sr) && is_delay_slot(state)) return SLOTFPUDIS(state);
if (fpu_is_disabled(sr)) return FPUDIS(state);
fpul = op1;
state->fpul = zero_extend32(fpul);
state->is_delay_slot = false;
}
/* LDS.L @Rm+,FPSCR */
void lds_l__load_to_fpscr(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t sr, op1, address, value;
sr = zero_extend32(state->sr.value);
op1 = sign_extend32(REG(state, m));
if (fpu_is_disabled(sr) && is_delay_slot(state)) return SLOTFPUDIS(state);
if (fpu_is_disabled(sr)) return FPUDIS(state);
address = zero_extend32(op1);
value = read_memory32(map, address);
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->fpscr.value = zero_extend32(value);
state->is_delay_slot = false;
}
/* LDS.L @Rm+,FPUL */
void lds_l__load_to_fpul(struct architectural_state * state, struct memory_map * map, const uint32_t m)
{
int64_t sr, op1, address, fpul;
sr = zero_extend32(state->sr.value);
op1 = sign_extend32(REG(state, m));
if (fpu_is_disabled(sr) && is_delay_slot(state)) return SLOTFPUDIS(state);
if (fpu_is_disabled(sr)) return FPUDIS(state);
address = zero_extend32(op1);
fpul = read_memory32(map, address);
op1 = op1 + 4;
REG(state, m) = _register(op1);
state->fpul = zero_extend32(fpul);
state->is_delay_slot = false;
}
/* STS FPSCR,Rn */
void sts__transfer_from_fpscr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t sr, fps, op1;
sr = zero_extend32(state->sr.value);
fps = zero_extend32(state->fpscr.value);
if (fpu_is_disabled(sr) && is_delay_slot(state)) return SLOTFPUDIS(state);
if (fpu_is_disabled(sr)) return FPUDIS(state);
op1 = fps;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STS FPUL,Rn */
void sts__transfer_from_fpul(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t sr, fpul, op1;
sr = zero_extend32(state->sr.value);
fpul = sign_extend32(state->fpul);
if (fpu_is_disabled(sr) && is_delay_slot(state)) return SLOTFPUDIS(state);
if (fpu_is_disabled(sr)) return FPUDIS(state);
op1 = fpul;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STS.L FPSCR,@-Rn */
void sts_l__store_from_fpscr(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t sr, fps, op1, value, address;
sr = zero_extend32(state->sr.value);
fps = zero_extend32(state->fpscr.value);
op1 = sign_extend32(REG(state, n));
if (fpu_is_disabled(sr) && is_delay_slot(state)) return SLOTFPUDIS(state);
if (fpu_is_disabled(sr)) return FPUDIS(state);
value = fps;
address = zero_extend32(op1 - 4);
write_memory32(map, address, value);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}
/* STS.L FPUL,@-Rn */
void sts_l__store_from_fpul(struct architectural_state * state, struct memory_map * map, const uint32_t n)
{
int64_t sr, fpul, op1, address;
sr = zero_extend32(state->sr.value);
fpul = sign_extend32(state->fpul);
op1 = sign_extend32(REG(state, n));
if (fpu_is_disabled(sr) && is_delay_slot(state)) return SLOTFPUDIS(state);
if (fpu_is_disabled(sr)) return FPUDIS(state);
address = zero_extend32(op1 - 4);
write_memory32(map, address, fpul);
op1 = address;
REG(state, n) = _register(op1);
state->is_delay_slot = false;
}