diff --git a/.gitignore b/.gitignore index e74cb7e..1bacacd 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,6 @@ __pycache__ *.o *.elf *.bin -*.pyc \ No newline at end of file +*.pyc +*.d +c/main \ No newline at end of file diff --git a/bitfield.py b/bitfield.py new file mode 100644 index 0000000..3eb40e2 --- /dev/null +++ b/bitfield.py @@ -0,0 +1,74 @@ +sr_bits = ( + ("T" , 0 , 1), # true/false condition + ("S" , 1 , 1), # saturation + ("IMASK", 4 , 4), # interrupt mask level, 4 bits + ("Q" , 8 , 1), # state for divide step + ("M" , 9 , 1), # state for divide step + ("FD" , 15, 1), # FPU disable + ("BL" , 28, 1), # Exception/interrupt block bit + ("RB" , 29, 1), # General register bank specifier in privileged mode + ("MD" , 30, 1), # Processor mode +) + +#define FPSCR__RM (1 << 0 ) /* Rounding mode */ +#define FPSCR__FLAG_INEXACT (1 << 2 ) +#define FPSCR__FLAG_UNDERFLOW (1 << 3 ) +#define FPSCR__FLAG_OVERFLOW (1 << 4 ) +#define FPSCR__FLAG_DIVISION_BY_ZERO (1 << 5 ) +#define FPSCR__FLAG_INVALID_OPERATION (1 << 6 ) +#define FPSCR__ENABLE_INEXACT (1 << 7 ) +#define FPSCR__ENABLE_UNDERFLOW (1 << 8 ) +#define FPSCR__ENABLE_OVERFLOW (1 << 9 ) +#define FPSCR__ENABLE_DIVISION_BY_ZERO (1 << 10) +#define FPSCR__ENABLE_INVALID (1 << 11) +#define FPSCR__CAUSE_INEXACT (1 << 12) +#define FPSCR__CAUSE_UNDERFLOW (1 << 13) +#define FPSCR__CAUSE_OVERFLOW (1 << 14) +#define FPSCR__CAUSE_DIVISION_BY_ZERO (1 << 15) +#define FPSCR__CAUSE_INVALID (1 << 16) +#define FPSCR__CAUSE_FPU_ERROR (1 << 17) +#define FPSCR__DN (1 << 18) /* Denormalization mode */ +#define FPSCR__PR (1 << 19) /* Precision mode */ +#define FPSCR__SZ (1 << 20) /* Transfer size mode */ +#define FPSCR__FR (1 << 21) /* Floating-point register bank */ + +def generate_bitfield(bits, start=0, end=31): + res = 0 + current = start + for name, index, length in bits: + if index != current: + size = index - current + yield f"_res{res}", size + res += 1 + yield name, length + current = index + 1 + + end_len = end + 1 + if current != end_len: + yield f"_res{res}", end_len - current + +def generate_bitfield_little(bits): + return generate_bitfield(bits) + +def generate_bitfield_big(bits): + return reversed(list(generate_bitfield(bits))) + +def generate(struct_name, bits): + yield "#pragma once" + yield "" + yield "#include " + yield "" + yield f"struct {struct_name} {{" + yield "#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__" + for name, size in generate_bitfield_little(bits): + yield f" uint32_t {name.lower()} : {size};" + yield "#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__" + for name, size in generate_bitfield_big(bits): + yield f" uint32_t {name.lower()} : {size};" + yield "#else" + yield '# error "unsupported endianness"' + yield "#endif" + yield "};" + +if __name__ == "__main__": + print('\n'.join(generate("sr_bits", sr_bits))) diff --git a/build.sh b/build.sh new file mode 100644 index 0000000..813ace2 --- /dev/null +++ b/build.sh @@ -0,0 +1,3 @@ +PYTHONPATH=python/ python transform.py c/impl.c c/impl.h +PYTHONPATH=python/ python generate_decoder.py c/decode_execute.c c/decode_print.c +PYTHONPATH=python/ python bitfield.py > c/sr_bits.h diff --git a/c/Makefile b/c/Makefile new file mode 100644 index 0000000..3faa0d7 --- /dev/null +++ b/c/Makefile @@ -0,0 +1,30 @@ +DEBUG = -g -gdwarf-4 + +AFLAGS += --fatal-warnings + +CFLAGS += -falign-functions=4 -ffunction-sections -fdata-sections -fshort-enums +CFLAGS += -Wall -Werror -Wfatal-errors -Wno-error=dangling-else +CFLAGS += -std=c2x + +DEPFLAGS = -MMD -MP + +CC = $(TARGET)gcc + +OBJS = \ + decode_execute.o \ + decode_print.o \ + exception.o \ + execute.o \ + impl.o \ + main.o \ + ram.o + +all: main + +%.o: %.c + $(CC) $(CARCH) $(CFLAGS) $(OPT) $(DEBUG) $(DEPFLAGS) -MF ${<}.d -c $< -o $@ + +main: $(OBJS) + $(CC) $^ -o $@ + +-include $(shell find -type f -name '*.d') diff --git a/c/decode.h b/c/decode.h new file mode 100644 index 0000000..06a42fb --- /dev/null +++ b/c/decode.h @@ -0,0 +1,6 @@ +#pragma once + +enum decode_status { + DECODE__DEFINED, + DECODE__UNDEFINED +}; diff --git a/c/decode_execute.c b/c/decode_execute.c new file mode 100644 index 0000000..91b20fd --- /dev/null +++ b/c/decode_execute.c @@ -0,0 +1,937 @@ +#include "decode_execute.h" +#include "impl.h" + +enum decode_status decode_and_execute_instruction(struct architectural_state * state, struct memory_map * map, uint16_t code) +{ + switch (code & 0b1111000000000000) { + case 0b0001000000000000: // MOV.L Rm,@(disp,Rn) + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t d = (code >> 0) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_l__store_register_indirect_with_displacement(state, map, m, d, n); + return DECODE__DEFINED; + } + case 0b0101000000000000: // MOV.L @(disp,Rm),Rn + { + uint32_t d = (code >> 0) & ((1 << 4) - 1); + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_l__load_register_indirect_with_displacement(state, map, d, m, n); + return DECODE__DEFINED; + } + case 0b0111000000000000: // ADD #imm,Rn + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + add__immediate(state, map, i, n); + return DECODE__DEFINED; + } + case 0b1001000000000000: // MOV.W @(disp,PC),Rn + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_w__pc_relative_with_displacement(state, map, d, n); + return DECODE__DEFINED; + } + case 0b1010000000000000: // BRA label + { + uint32_t d = (code >> 0) & ((1 << 12) - 1); + bra__pc_relative(state, map, d); + return DECODE__DEFINED; + } + case 0b1011000000000000: // BSR label + { + uint32_t d = (code >> 0) & ((1 << 12) - 1); + bsr__pc_relative(state, map, d); + return DECODE__DEFINED; + } + case 0b1101000000000000: // MOV.L @(disp,PC),Rn + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_l__pc_relative_with_displacement(state, map, d, n); + return DECODE__DEFINED; + } + case 0b1110000000000000: // MOV #imm,Rn + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov__immediate(state, map, i, n); + return DECODE__DEFINED; + } + } + switch (code & 0b1111000000001111) { + case 0b0000000000000100: // MOV.B Rm,@(R0,Rn) + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_b__store_indexed_register_indirect(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0000000000000101: // MOV.W Rm,@(R0,Rn) + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_w__store_indexed_register_indirect(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0000000000000110: // MOV.L Rm,@(R0,Rn) + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_l__store_indexed_register_indirect(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0000000000000111: // MUL.L Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mul_l__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0000000000001100: // MOV.B @(R0,Rm),Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_b__load_indexed_register_indirect(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0000000000001101: // MOV.W @(R0,Rm),Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_w__load_indexed_register_indirect(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0000000000001110: // MOV.L @(R0,Rm),Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_l__load_indexed_register_indirect(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0000000000001111: // MAC.L @Rm+,@Rn+ + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mac_l__multiply_and_accumulate_operation(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0010000000000000: // MOV.B Rm,@Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_b__store_register_direct_data_transfer(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0010000000000001: // MOV.W Rm,@Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_w__store_register_direct_data_transfer(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0010000000000010: // MOV.L Rm,@Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_l__store_register_direct_data_transfer(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0010000000000100: // MOV.B Rm,@-Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_b__store_direct_data_transfer_from_register(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0010000000000101: // MOV.W Rm,@-Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_w__store_direct_data_transfer_from_register(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0010000000000110: // MOV.L Rm,@-Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_l__store_direct_data_transfer_from_register(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0010000000000111: // DIV0S Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + div0s__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0010000000001000: // TST Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + tst__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0010000000001001: // AND Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + and__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0010000000001010: // XOR Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + xor__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0010000000001011: // OR Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + or__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0010000000001100: // CMP/STR Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + cmp_str__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0010000000001101: // XTRCT Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + xtrct__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0010000000001110: // MULU.W Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mulu_w__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0010000000001111: // MULS.W Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + muls_w__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0011000000000000: // CMP/EQ Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + cmp_eq__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0011000000000010: // CMP/HS Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + cmp_hs__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0011000000000011: // CMP/GE Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + cmp_ge__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0011000000000100: // DIV1 Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + div1__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0011000000000101: // DMULU.L Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + dmulu_l__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0011000000000110: // CMP/HI Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + cmp_hi__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0011000000000111: // CMP/GT Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + cmp_gt__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0011000000001000: // SUB Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + sub__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0011000000001010: // SUBC Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + subc__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0011000000001011: // SUBV Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + subv__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0011000000001100: // ADD Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + add__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0011000000001101: // DMULS.L Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + dmuls_l__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0011000000001110: // ADDC Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + addc__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0011000000001111: // ADDV Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + addv__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0100000000001111: // MAC.W @Rm+,@Rn+ + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mac_w__multiply_and_accumulate_operation(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000000000: // MOV.B @Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_b__load_register_direct_data_transfer(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000000001: // MOV.W @Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_w__load_register_direct_data_transfer(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000000010: // MOV.L @Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_l__load_register_direct_data_transfer(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000000011: // MOV Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000000100: // MOV.B @Rm+,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_b__load_direct_data_transfer_from_register(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000000101: // MOV.W @Rm+,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_w__load_direct_data_transfer_from_register(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000000110: // MOV.L @Rm+,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + mov_l__load_direct_data_transfer_from_register(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000000111: // NOT Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + not__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000001000: // SWAP.B Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + swap_b__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000001001: // SWAP.W Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + swap_w__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000001010: // NEGC Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + negc__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000001011: // NEG Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + neg__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000001100: // EXTU.B Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + extu_b__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000001101: // EXTU.W Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + extu_w__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000001110: // EXTS.B Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + exts_b__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + case 0b0110000000001111: // EXTS.W Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + exts_w__source_and_destination_operands(state, map, m, n); + return DECODE__DEFINED; + } + } + switch (code & 0b1111111100000000) { + case 0b1000000000000000: // MOV.B R0,@(disp,Rn) + { + uint32_t d = (code >> 0) & ((1 << 4) - 1); + uint32_t n = (code >> 4) & ((1 << 4) - 1); + mov_b__store_register_indirect_with_displacement(state, map, d, n); + return DECODE__DEFINED; + } + case 0b1000000100000000: // MOV.W R0,@(disp,Rn) + { + uint32_t d = (code >> 0) & ((1 << 4) - 1); + uint32_t n = (code >> 4) & ((1 << 4) - 1); + mov_w__store_register_indirect_with_displacement(state, map, d, n); + return DECODE__DEFINED; + } + case 0b1000010000000000: // MOV.B @(disp,Rm),R0 + { + uint32_t d = (code >> 0) & ((1 << 4) - 1); + uint32_t m = (code >> 4) & ((1 << 4) - 1); + mov_b__load_register_indirect_with_displacement(state, map, d, m); + return DECODE__DEFINED; + } + case 0b1000010100000000: // MOV.W @(disp,Rm),R0 + { + uint32_t d = (code >> 0) & ((1 << 4) - 1); + uint32_t m = (code >> 4) & ((1 << 4) - 1); + mov_w__load_register_indirect_with_displacement(state, map, d, m); + return DECODE__DEFINED; + } + case 0b1000100000000000: // CMP/EQ #imm,R0 + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + cmp_eq__immediate(state, map, i); + return DECODE__DEFINED; + } + case 0b1000100100000000: // BT label + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + bt__pc_relative(state, map, d); + return DECODE__DEFINED; + } + case 0b1000101100000000: // BF label + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + bf__pc_relative(state, map, d); + return DECODE__DEFINED; + } + case 0b1000110100000000: // BT/S label + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + bt_s__pc_relative(state, map, d); + return DECODE__DEFINED; + } + case 0b1000111100000000: // BF/S label + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + bf_s__pc_relative(state, map, d); + return DECODE__DEFINED; + } + case 0b1100000000000000: // MOV.B R0,@(disp,GBR) + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + mov_b__store_gbr_indirect_with_displacement(state, map, d); + return DECODE__DEFINED; + } + case 0b1100000100000000: // MOV.W R0,@(disp,GBR) + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + mov_w__store_gbr_indirect_with_displacement(state, map, d); + return DECODE__DEFINED; + } + case 0b1100001000000000: // MOV.L R0,@(disp,GBR) + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + mov_l__store_gbr_indirect_with_displacement(state, map, d); + return DECODE__DEFINED; + } + case 0b1100001100000000: // TRAPA #imm + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + trapa__immediate(state, map, i); + return DECODE__DEFINED; + } + case 0b1100010000000000: // MOV.B @(disp,GBR),R0 + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + mov_b__load_gbr_indirect_with_displacement(state, map, d); + return DECODE__DEFINED; + } + case 0b1100010100000000: // MOV.W @(disp,GBR),R0 + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + mov_w__load_gbr_indirect_with_displacement(state, map, d); + return DECODE__DEFINED; + } + case 0b1100011000000000: // MOV.L @(disp,GBR),R0 + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + mov_l__load_gbr_indirect_with_displacement(state, map, d); + return DECODE__DEFINED; + } + case 0b1100011100000000: // MOVA @(disp,PC),R0 + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + mova__pc_relative_with_displacement(state, map, d); + return DECODE__DEFINED; + } + case 0b1100100000000000: // TST #imm,R0 + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + tst__immediate(state, map, i); + return DECODE__DEFINED; + } + case 0b1100100100000000: // AND #imm,R0 + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + and__immediate(state, map, i); + return DECODE__DEFINED; + } + case 0b1100101000000000: // XOR #imm,R0 + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + xor__immediate(state, map, i); + return DECODE__DEFINED; + } + case 0b1100101100000000: // OR #imm,R0 + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + or__immediate(state, map, i); + return DECODE__DEFINED; + } + case 0b1100110000000000: // TST.B #imm,@(R0,GBR) + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + tst_b__store_indexed_gbr_indirect(state, map, i); + return DECODE__DEFINED; + } + case 0b1100110100000000: // AND.B #imm,@(R0,GBR) + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + and_b__store_indexed_gbr_indirect(state, map, i); + return DECODE__DEFINED; + } + case 0b1100111000000000: // XOR.B #imm,@(R0,GBR) + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + xor_b__store_indexed_gbr_indirect(state, map, i); + return DECODE__DEFINED; + } + case 0b1100111100000000: // OR.B #imm,@(R0,GBR) + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + or_b__store_indexed_gbr_indirect(state, map, i); + return DECODE__DEFINED; + } + } + switch (code & 0b1111000011111111) { + case 0b0000000000000010: // STC SR,Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + stc__transfer_from_sr(state, map, n); + return DECODE__DEFINED; + } + case 0b0000000000000011: // BSRF Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + bsrf__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0000000000001010: // STS MACH,Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + sts__transfer_from_mach(state, map, n); + return DECODE__DEFINED; + } + case 0b0000000000010010: // STC GBR,Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + stc__transfer_from_gbr(state, map, n); + return DECODE__DEFINED; + } + case 0b0000000000011010: // STS MACL,Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + sts__transfer_from_macl(state, map, n); + return DECODE__DEFINED; + } + case 0b0000000000100010: // STC VBR,Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + stc__transfer_from_vbr(state, map, n); + return DECODE__DEFINED; + } + case 0b0000000000100011: // BRAF Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + braf__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0000000000101001: // MOVT Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + movt__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0000000000101010: // STS PR,Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + sts__transfer_from_pr(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000000000: // SHLL Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + shll__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000000001: // SHLR Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + shlr__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000000010: // STS.L MACH,@-Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + sts_l__store_from_mach(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000000011: // STC.L SR,@-Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + stc_l__store_from_sr(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000000100: // ROTL Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + rotl__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000000101: // ROTR Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + rotr__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000000110: // LDS.L @Rm+,MACH + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + lds_l__load_to_mach(state, map, m); + return DECODE__DEFINED; + } + case 0b0100000000000111: // LDC.L @Rm+,SR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + ldc_l__load_to_sr(state, map, m); + return DECODE__DEFINED; + } + case 0b0100000000001000: // SHLL2 Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + shll2__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000001001: // SHLR2 Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + shlr2__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000001010: // LDS Rm,MACH + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + lds__transfer_to_mach(state, map, m); + return DECODE__DEFINED; + } + case 0b0100000000001011: // JSR @Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + jsr__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000001110: // LDC Rm,SR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + ldc__transfer_to_sr(state, map, m); + return DECODE__DEFINED; + } + case 0b0100000000010000: // DT Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + dt__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000010001: // CMP/PZ Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + cmp_pz__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000010010: // STS.L MACL,@-Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + sts_l__store_from_macl(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000010011: // STC.L GBR,@-Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + stc_l__store_from_gbr(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000010101: // CMP/PL Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + cmp_pl__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000010110: // LDS.L @Rm+,MACL + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + lds_l__load_to_macl(state, map, m); + return DECODE__DEFINED; + } + case 0b0100000000010111: // LDC.L @Rm+,GBR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + ldc_l__load_to_gbr(state, map, m); + return DECODE__DEFINED; + } + case 0b0100000000011000: // SHLL8 Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + shll8__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000011001: // SHLR8 Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + shlr8__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000011010: // LDS Rm,MACL + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + lds__transfer_to_macl(state, map, m); + return DECODE__DEFINED; + } + case 0b0100000000011011: // TAS.B @Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + tas_b__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000011110: // LDC Rm,GBR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + ldc__transfer_to_gbr(state, map, m); + return DECODE__DEFINED; + } + case 0b0100000000100000: // SHAL Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + shal__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000100001: // SHAR Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + shar__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000100010: // STS.L PR,@-Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + sts_l__store_from_pr(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000100011: // STC.L VBR,@-Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + stc_l__store_from_vbr(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000100100: // ROTCL Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + rotcl__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000100101: // ROTCR Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + rotcr__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000100110: // LDS.L @Rm+,PR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + lds_l__load_to_pr(state, map, m); + return DECODE__DEFINED; + } + case 0b0100000000100111: // LDC.L @Rm+,VBR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + ldc_l__load_to_vbr(state, map, m); + return DECODE__DEFINED; + } + case 0b0100000000101000: // SHLL16 Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + shll16__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000101001: // SHLR16 Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + shlr16__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000101010: // LDS Rm,PR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + lds__transfer_to_pr(state, map, m); + return DECODE__DEFINED; + } + case 0b0100000000101011: // JMP @Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + jmp__destination_operand_only(state, map, n); + return DECODE__DEFINED; + } + case 0b0100000000101110: // LDC Rm,VBR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + ldc__transfer_to_vbr(state, map, m); + return DECODE__DEFINED; + } + } + switch (code & 0b1111111111111111) { + case 0b0000000000001000: // CLRT + { + clrt__no_operand(state, map); + return DECODE__DEFINED; + } + case 0b0000000000001001: // NOP + { + nop__no_operand(state, map); + return DECODE__DEFINED; + } + case 0b0000000000001011: // RTS + { + rts__no_operand(state, map); + return DECODE__DEFINED; + } + case 0b0000000000011000: // SETT + { + sett__no_operand(state, map); + return DECODE__DEFINED; + } + case 0b0000000000011001: // DIV0U + { + div0u__no_operand(state, map); + return DECODE__DEFINED; + } + case 0b0000000000011011: // SLEEP + { + sleep__no_operand(state, map); + return DECODE__DEFINED; + } + case 0b0000000000101000: // CLRMAC + { + clrmac__no_operand(state, map); + return DECODE__DEFINED; + } + case 0b0000000000101011: // RTE + { + rte__no_operand(state, map); + return DECODE__DEFINED; + } + case 0b0000000001001000: // CLRS + { + clrs__no_operand(state, map); + return DECODE__DEFINED; + } + case 0b0000000001011000: // SETS + { + sets__no_operand(state, map); + return DECODE__DEFINED; + } + } + return DECODE__UNDEFINED; +} diff --git a/c/decode_execute.h b/c/decode_execute.h new file mode 100644 index 0000000..90f701a --- /dev/null +++ b/c/decode_execute.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +#include "memory_map.h" +#include "state.h" +#include "decode.h" + +enum decode_status decode_and_execute_instruction(struct architectural_state * state, struct memory_map * map, uint16_t code); diff --git a/c/decode_print.c b/c/decode_print.c new file mode 100644 index 0000000..fa19b66 --- /dev/null +++ b/c/decode_print.c @@ -0,0 +1,1082 @@ +#include + +#include "decode_print.h" + +enum decode_status decode_and_print_instruction(struct architectural_state * state, struct memory_map * map, uint16_t code, char const ** instruction_buf, char * operand_buf, uint32_t size) +{ + switch (code & 0b1111000000000000) { + case 0b0001000000000000: // MOV.L Rm,@(disp,Rn) + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t d = (code >> 0) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,@(%d,R%d)", m, d, n); + *instruction_buf = "MOV.L"; + return DECODE__DEFINED; + } + case 0b0101000000000000: // MOV.L @(disp,Rm),Rn + { + uint32_t d = (code >> 0) & ((1 << 4) - 1); + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@(%d,R%d),R%d", d, m, n); + *instruction_buf = "MOV.L"; + return DECODE__DEFINED; + } + case 0b0111000000000000: // ADD #imm,Rn + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "#%d,R%d", i, n); + *instruction_buf = "ADD"; + return DECODE__DEFINED; + } + case 0b1001000000000000: // MOV.W @(disp,PC),Rn + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@(%d,PC),R%d", d, n); + *instruction_buf = "MOV.W"; + return DECODE__DEFINED; + } + case 0b1010000000000000: // BRA label + { + uint32_t d = (code >> 0) & ((1 << 12) - 1); + snprintf(operand_buf, size, "%d", d); + *instruction_buf = "BRA"; + return DECODE__DEFINED; + } + case 0b1011000000000000: // BSR label + { + uint32_t d = (code >> 0) & ((1 << 12) - 1); + snprintf(operand_buf, size, "%d", d); + *instruction_buf = "BSR"; + return DECODE__DEFINED; + } + case 0b1101000000000000: // MOV.L @(disp,PC),Rn + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@(%d,PC),R%d", d, n); + *instruction_buf = "MOV.L"; + return DECODE__DEFINED; + } + case 0b1110000000000000: // MOV #imm,Rn + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "#%d,R%d", i, n); + *instruction_buf = "MOV"; + return DECODE__DEFINED; + } + } + switch (code & 0b1111000000001111) { + case 0b0000000000000100: // MOV.B Rm,@(R0,Rn) + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,@(R0,R%d)", m, n); + *instruction_buf = "MOV.B"; + return DECODE__DEFINED; + } + case 0b0000000000000101: // MOV.W Rm,@(R0,Rn) + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,@(R0,R%d)", m, n); + *instruction_buf = "MOV.W"; + return DECODE__DEFINED; + } + case 0b0000000000000110: // MOV.L Rm,@(R0,Rn) + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,@(R0,R%d)", m, n); + *instruction_buf = "MOV.L"; + return DECODE__DEFINED; + } + case 0b0000000000000111: // MUL.L Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "MUL.L"; + return DECODE__DEFINED; + } + case 0b0000000000001100: // MOV.B @(R0,Rm),Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@(R0,R%d),R%d", m, n); + *instruction_buf = "MOV.B"; + return DECODE__DEFINED; + } + case 0b0000000000001101: // MOV.W @(R0,Rm),Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@(R0,R%d),R%d", m, n); + *instruction_buf = "MOV.W"; + return DECODE__DEFINED; + } + case 0b0000000000001110: // MOV.L @(R0,Rm),Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@(R0,R%d),R%d", m, n); + *instruction_buf = "MOV.L"; + return DECODE__DEFINED; + } + case 0b0000000000001111: // MAC.L @Rm+,@Rn+ + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d+,@R%d+", m, n); + *instruction_buf = "MAC.L"; + return DECODE__DEFINED; + } + case 0b0010000000000000: // MOV.B Rm,@Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,@R%d", m, n); + *instruction_buf = "MOV.B"; + return DECODE__DEFINED; + } + case 0b0010000000000001: // MOV.W Rm,@Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,@R%d", m, n); + *instruction_buf = "MOV.W"; + return DECODE__DEFINED; + } + case 0b0010000000000010: // MOV.L Rm,@Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,@R%d", m, n); + *instruction_buf = "MOV.L"; + return DECODE__DEFINED; + } + case 0b0010000000000100: // MOV.B Rm,@-Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,@-R%d", m, n); + *instruction_buf = "MOV.B"; + return DECODE__DEFINED; + } + case 0b0010000000000101: // MOV.W Rm,@-Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,@-R%d", m, n); + *instruction_buf = "MOV.W"; + return DECODE__DEFINED; + } + case 0b0010000000000110: // MOV.L Rm,@-Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,@-R%d", m, n); + *instruction_buf = "MOV.L"; + return DECODE__DEFINED; + } + case 0b0010000000000111: // DIV0S Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "DIV0S"; + return DECODE__DEFINED; + } + case 0b0010000000001000: // TST Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "TST"; + return DECODE__DEFINED; + } + case 0b0010000000001001: // AND Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "AND"; + return DECODE__DEFINED; + } + case 0b0010000000001010: // XOR Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "XOR"; + return DECODE__DEFINED; + } + case 0b0010000000001011: // OR Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "OR"; + return DECODE__DEFINED; + } + case 0b0010000000001100: // CMP/STR Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "CMP/STR"; + return DECODE__DEFINED; + } + case 0b0010000000001101: // XTRCT Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "XTRCT"; + return DECODE__DEFINED; + } + case 0b0010000000001110: // MULU.W Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "MULU.W"; + return DECODE__DEFINED; + } + case 0b0010000000001111: // MULS.W Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "MULS.W"; + return DECODE__DEFINED; + } + case 0b0011000000000000: // CMP/EQ Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "CMP/EQ"; + return DECODE__DEFINED; + } + case 0b0011000000000010: // CMP/HS Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "CMP/HS"; + return DECODE__DEFINED; + } + case 0b0011000000000011: // CMP/GE Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "CMP/GE"; + return DECODE__DEFINED; + } + case 0b0011000000000100: // DIV1 Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "DIV1"; + return DECODE__DEFINED; + } + case 0b0011000000000101: // DMULU.L Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "DMULU.L"; + return DECODE__DEFINED; + } + case 0b0011000000000110: // CMP/HI Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "CMP/HI"; + return DECODE__DEFINED; + } + case 0b0011000000000111: // CMP/GT Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "CMP/GT"; + return DECODE__DEFINED; + } + case 0b0011000000001000: // SUB Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "SUB"; + return DECODE__DEFINED; + } + case 0b0011000000001010: // SUBC Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "SUBC"; + return DECODE__DEFINED; + } + case 0b0011000000001011: // SUBV Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "SUBV"; + return DECODE__DEFINED; + } + case 0b0011000000001100: // ADD Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "ADD"; + return DECODE__DEFINED; + } + case 0b0011000000001101: // DMULS.L Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "DMULS.L"; + return DECODE__DEFINED; + } + case 0b0011000000001110: // ADDC Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "ADDC"; + return DECODE__DEFINED; + } + case 0b0011000000001111: // ADDV Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "ADDV"; + return DECODE__DEFINED; + } + case 0b0100000000001111: // MAC.W @Rm+,@Rn+ + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d+,@R%d+", m, n); + *instruction_buf = "MAC.W"; + return DECODE__DEFINED; + } + case 0b0110000000000000: // MOV.B @Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d,R%d", m, n); + *instruction_buf = "MOV.B"; + return DECODE__DEFINED; + } + case 0b0110000000000001: // MOV.W @Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d,R%d", m, n); + *instruction_buf = "MOV.W"; + return DECODE__DEFINED; + } + case 0b0110000000000010: // MOV.L @Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d,R%d", m, n); + *instruction_buf = "MOV.L"; + return DECODE__DEFINED; + } + case 0b0110000000000011: // MOV Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "MOV"; + return DECODE__DEFINED; + } + case 0b0110000000000100: // MOV.B @Rm+,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d+,R%d", m, n); + *instruction_buf = "MOV.B"; + return DECODE__DEFINED; + } + case 0b0110000000000101: // MOV.W @Rm+,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d+,R%d", m, n); + *instruction_buf = "MOV.W"; + return DECODE__DEFINED; + } + case 0b0110000000000110: // MOV.L @Rm+,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d+,R%d", m, n); + *instruction_buf = "MOV.L"; + return DECODE__DEFINED; + } + case 0b0110000000000111: // NOT Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "NOT"; + return DECODE__DEFINED; + } + case 0b0110000000001000: // SWAP.B Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "SWAP.B"; + return DECODE__DEFINED; + } + case 0b0110000000001001: // SWAP.W Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "SWAP.W"; + return DECODE__DEFINED; + } + case 0b0110000000001010: // NEGC Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "NEGC"; + return DECODE__DEFINED; + } + case 0b0110000000001011: // NEG Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "NEG"; + return DECODE__DEFINED; + } + case 0b0110000000001100: // EXTU.B Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "EXTU.B"; + return DECODE__DEFINED; + } + case 0b0110000000001101: // EXTU.W Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "EXTU.W"; + return DECODE__DEFINED; + } + case 0b0110000000001110: // EXTS.B Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "EXTS.B"; + return DECODE__DEFINED; + } + case 0b0110000000001111: // EXTS.W Rm,Rn + { + uint32_t m = (code >> 4) & ((1 << 4) - 1); + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,R%d", m, n); + *instruction_buf = "EXTS.W"; + return DECODE__DEFINED; + } + } + switch (code & 0b1111111100000000) { + case 0b1000000000000000: // MOV.B R0,@(disp,Rn) + { + uint32_t d = (code >> 0) & ((1 << 4) - 1); + uint32_t n = (code >> 4) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R0,@(%d,R%d)", d, n); + *instruction_buf = "MOV.B"; + return DECODE__DEFINED; + } + case 0b1000000100000000: // MOV.W R0,@(disp,Rn) + { + uint32_t d = (code >> 0) & ((1 << 4) - 1); + uint32_t n = (code >> 4) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R0,@(%d,R%d)", d, n); + *instruction_buf = "MOV.W"; + return DECODE__DEFINED; + } + case 0b1000010000000000: // MOV.B @(disp,Rm),R0 + { + uint32_t d = (code >> 0) & ((1 << 4) - 1); + uint32_t m = (code >> 4) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@(%d,R%d),R0", d, m); + *instruction_buf = "MOV.B"; + return DECODE__DEFINED; + } + case 0b1000010100000000: // MOV.W @(disp,Rm),R0 + { + uint32_t d = (code >> 0) & ((1 << 4) - 1); + uint32_t m = (code >> 4) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@(%d,R%d),R0", d, m); + *instruction_buf = "MOV.W"; + return DECODE__DEFINED; + } + case 0b1000100000000000: // CMP/EQ #imm,R0 + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "#%d,R0", i); + *instruction_buf = "CMP/EQ"; + return DECODE__DEFINED; + } + case 0b1000100100000000: // BT label + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "%d", d); + *instruction_buf = "BT"; + return DECODE__DEFINED; + } + case 0b1000101100000000: // BF label + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "%d", d); + *instruction_buf = "BF"; + return DECODE__DEFINED; + } + case 0b1000110100000000: // BT/S label + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "%d", d); + *instruction_buf = "BT/S"; + return DECODE__DEFINED; + } + case 0b1000111100000000: // BF/S label + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "%d", d); + *instruction_buf = "BF/S"; + return DECODE__DEFINED; + } + case 0b1100000000000000: // MOV.B R0,@(disp,GBR) + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "R0,@(%d,GBR)", d); + *instruction_buf = "MOV.B"; + return DECODE__DEFINED; + } + case 0b1100000100000000: // MOV.W R0,@(disp,GBR) + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "R0,@(%d,GBR)", d); + *instruction_buf = "MOV.W"; + return DECODE__DEFINED; + } + case 0b1100001000000000: // MOV.L R0,@(disp,GBR) + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "R0,@(%d,GBR)", d); + *instruction_buf = "MOV.L"; + return DECODE__DEFINED; + } + case 0b1100001100000000: // TRAPA #imm + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "#%d", i); + *instruction_buf = "TRAPA"; + return DECODE__DEFINED; + } + case 0b1100010000000000: // MOV.B @(disp,GBR),R0 + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "@(%d,GBR),R0", d); + *instruction_buf = "MOV.B"; + return DECODE__DEFINED; + } + case 0b1100010100000000: // MOV.W @(disp,GBR),R0 + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "@(%d,GBR),R0", d); + *instruction_buf = "MOV.W"; + return DECODE__DEFINED; + } + case 0b1100011000000000: // MOV.L @(disp,GBR),R0 + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "@(%d,GBR),R0", d); + *instruction_buf = "MOV.L"; + return DECODE__DEFINED; + } + case 0b1100011100000000: // MOVA @(disp,PC),R0 + { + uint32_t d = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "@(%d,PC),R0", d); + *instruction_buf = "MOVA"; + return DECODE__DEFINED; + } + case 0b1100100000000000: // TST #imm,R0 + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "#%d,R0", i); + *instruction_buf = "TST"; + return DECODE__DEFINED; + } + case 0b1100100100000000: // AND #imm,R0 + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "#%d,R0", i); + *instruction_buf = "AND"; + return DECODE__DEFINED; + } + case 0b1100101000000000: // XOR #imm,R0 + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "#%d,R0", i); + *instruction_buf = "XOR"; + return DECODE__DEFINED; + } + case 0b1100101100000000: // OR #imm,R0 + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "#%d,R0", i); + *instruction_buf = "OR"; + return DECODE__DEFINED; + } + case 0b1100110000000000: // TST.B #imm,@(R0,GBR) + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "#%d,@(R0,GBR)", i); + *instruction_buf = "TST.B"; + return DECODE__DEFINED; + } + case 0b1100110100000000: // AND.B #imm,@(R0,GBR) + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "#%d,@(R0,GBR)", i); + *instruction_buf = "AND.B"; + return DECODE__DEFINED; + } + case 0b1100111000000000: // XOR.B #imm,@(R0,GBR) + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "#%d,@(R0,GBR)", i); + *instruction_buf = "XOR.B"; + return DECODE__DEFINED; + } + case 0b1100111100000000: // OR.B #imm,@(R0,GBR) + { + uint32_t i = (code >> 0) & ((1 << 8) - 1); + snprintf(operand_buf, size, "#%d,@(R0,GBR)", i); + *instruction_buf = "OR.B"; + return DECODE__DEFINED; + } + } + switch (code & 0b1111000011111111) { + case 0b0000000000000010: // STC SR,Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "SR,R%d", n); + *instruction_buf = "STC"; + return DECODE__DEFINED; + } + case 0b0000000000000011: // BSRF Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "BSRF"; + return DECODE__DEFINED; + } + case 0b0000000000001010: // STS MACH,Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "MACH,R%d", n); + *instruction_buf = "STS"; + return DECODE__DEFINED; + } + case 0b0000000000010010: // STC GBR,Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "GBR,R%d", n); + *instruction_buf = "STC"; + return DECODE__DEFINED; + } + case 0b0000000000011010: // STS MACL,Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "MACL,R%d", n); + *instruction_buf = "STS"; + return DECODE__DEFINED; + } + case 0b0000000000100010: // STC VBR,Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "VBR,R%d", n); + *instruction_buf = "STC"; + return DECODE__DEFINED; + } + case 0b0000000000100011: // BRAF Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "BRAF"; + return DECODE__DEFINED; + } + case 0b0000000000101001: // MOVT Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "MOVT"; + return DECODE__DEFINED; + } + case 0b0000000000101010: // STS PR,Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "PR,R%d", n); + *instruction_buf = "STS"; + return DECODE__DEFINED; + } + case 0b0100000000000000: // SHLL Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "SHLL"; + return DECODE__DEFINED; + } + case 0b0100000000000001: // SHLR Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "SHLR"; + return DECODE__DEFINED; + } + case 0b0100000000000010: // STS.L MACH,@-Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "MACH,@-R%d", n); + *instruction_buf = "STS.L"; + return DECODE__DEFINED; + } + case 0b0100000000000011: // STC.L SR,@-Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "SR,@-R%d", n); + *instruction_buf = "STC.L"; + return DECODE__DEFINED; + } + case 0b0100000000000100: // ROTL Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "ROTL"; + return DECODE__DEFINED; + } + case 0b0100000000000101: // ROTR Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "ROTR"; + return DECODE__DEFINED; + } + case 0b0100000000000110: // LDS.L @Rm+,MACH + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d+,MACH", m); + *instruction_buf = "LDS.L"; + return DECODE__DEFINED; + } + case 0b0100000000000111: // LDC.L @Rm+,SR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d+,SR", m); + *instruction_buf = "LDC.L"; + return DECODE__DEFINED; + } + case 0b0100000000001000: // SHLL2 Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "SHLL2"; + return DECODE__DEFINED; + } + case 0b0100000000001001: // SHLR2 Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "SHLR2"; + return DECODE__DEFINED; + } + case 0b0100000000001010: // LDS Rm,MACH + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,MACH", m); + *instruction_buf = "LDS"; + return DECODE__DEFINED; + } + case 0b0100000000001011: // JSR @Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d", n); + *instruction_buf = "JSR"; + return DECODE__DEFINED; + } + case 0b0100000000001110: // LDC Rm,SR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,SR", m); + *instruction_buf = "LDC"; + return DECODE__DEFINED; + } + case 0b0100000000010000: // DT Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "DT"; + return DECODE__DEFINED; + } + case 0b0100000000010001: // CMP/PZ Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "CMP/PZ"; + return DECODE__DEFINED; + } + case 0b0100000000010010: // STS.L MACL,@-Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "MACL,@-R%d", n); + *instruction_buf = "STS.L"; + return DECODE__DEFINED; + } + case 0b0100000000010011: // STC.L GBR,@-Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "GBR,@-R%d", n); + *instruction_buf = "STC.L"; + return DECODE__DEFINED; + } + case 0b0100000000010101: // CMP/PL Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "CMP/PL"; + return DECODE__DEFINED; + } + case 0b0100000000010110: // LDS.L @Rm+,MACL + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d+,MACL", m); + *instruction_buf = "LDS.L"; + return DECODE__DEFINED; + } + case 0b0100000000010111: // LDC.L @Rm+,GBR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d+,GBR", m); + *instruction_buf = "LDC.L"; + return DECODE__DEFINED; + } + case 0b0100000000011000: // SHLL8 Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "SHLL8"; + return DECODE__DEFINED; + } + case 0b0100000000011001: // SHLR8 Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "SHLR8"; + return DECODE__DEFINED; + } + case 0b0100000000011010: // LDS Rm,MACL + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,MACL", m); + *instruction_buf = "LDS"; + return DECODE__DEFINED; + } + case 0b0100000000011011: // TAS.B @Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d", n); + *instruction_buf = "TAS.B"; + return DECODE__DEFINED; + } + case 0b0100000000011110: // LDC Rm,GBR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,GBR", m); + *instruction_buf = "LDC"; + return DECODE__DEFINED; + } + case 0b0100000000100000: // SHAL Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "SHAL"; + return DECODE__DEFINED; + } + case 0b0100000000100001: // SHAR Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "SHAR"; + return DECODE__DEFINED; + } + case 0b0100000000100010: // STS.L PR,@-Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "PR,@-R%d", n); + *instruction_buf = "STS.L"; + return DECODE__DEFINED; + } + case 0b0100000000100011: // STC.L VBR,@-Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "VBR,@-R%d", n); + *instruction_buf = "STC.L"; + return DECODE__DEFINED; + } + case 0b0100000000100100: // ROTCL Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "ROTCL"; + return DECODE__DEFINED; + } + case 0b0100000000100101: // ROTCR Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "ROTCR"; + return DECODE__DEFINED; + } + case 0b0100000000100110: // LDS.L @Rm+,PR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d+,PR", m); + *instruction_buf = "LDS.L"; + return DECODE__DEFINED; + } + case 0b0100000000100111: // LDC.L @Rm+,VBR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d+,VBR", m); + *instruction_buf = "LDC.L"; + return DECODE__DEFINED; + } + case 0b0100000000101000: // SHLL16 Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "SHLL16"; + return DECODE__DEFINED; + } + case 0b0100000000101001: // SHLR16 Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d", n); + *instruction_buf = "SHLR16"; + return DECODE__DEFINED; + } + case 0b0100000000101010: // LDS Rm,PR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,PR", m); + *instruction_buf = "LDS"; + return DECODE__DEFINED; + } + case 0b0100000000101011: // JMP @Rn + { + uint32_t n = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "@R%d", n); + *instruction_buf = "JMP"; + return DECODE__DEFINED; + } + case 0b0100000000101110: // LDC Rm,VBR + { + uint32_t m = (code >> 8) & ((1 << 4) - 1); + snprintf(operand_buf, size, "R%d,VBR", m); + *instruction_buf = "LDC"; + return DECODE__DEFINED; + } + } + switch (code & 0b1111111111111111) { + case 0b0000000000001000: // CLRT + { + operand_buf[0] = 0; + *instruction_buf = "CLRT"; + return DECODE__DEFINED; + } + case 0b0000000000001001: // NOP + { + operand_buf[0] = 0; + *instruction_buf = "NOP"; + return DECODE__DEFINED; + } + case 0b0000000000001011: // RTS + { + operand_buf[0] = 0; + *instruction_buf = "RTS"; + return DECODE__DEFINED; + } + case 0b0000000000011000: // SETT + { + operand_buf[0] = 0; + *instruction_buf = "SETT"; + return DECODE__DEFINED; + } + case 0b0000000000011001: // DIV0U + { + operand_buf[0] = 0; + *instruction_buf = "DIV0U"; + return DECODE__DEFINED; + } + case 0b0000000000011011: // SLEEP + { + operand_buf[0] = 0; + *instruction_buf = "SLEEP"; + return DECODE__DEFINED; + } + case 0b0000000000101000: // CLRMAC + { + operand_buf[0] = 0; + *instruction_buf = "CLRMAC"; + return DECODE__DEFINED; + } + case 0b0000000000101011: // RTE + { + operand_buf[0] = 0; + *instruction_buf = "RTE"; + return DECODE__DEFINED; + } + case 0b0000000001001000: // CLRS + { + operand_buf[0] = 0; + *instruction_buf = "CLRS"; + return DECODE__DEFINED; + } + case 0b0000000001011000: // SETS + { + operand_buf[0] = 0; + *instruction_buf = "SETS"; + return DECODE__DEFINED; + } + } + return DECODE__UNDEFINED; +} diff --git a/c/decode_print.h b/c/decode_print.h new file mode 100644 index 0000000..e772e0a --- /dev/null +++ b/c/decode_print.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +#include "memory_map.h" +#include "state.h" +#include "decode.h" + +enum decode_status decode_and_print_instruction(struct architectural_state * state, struct memory_map * map, uint16_t code, char const ** instruction_buf, char * operand_buf, uint32_t size); diff --git a/c/exception.c b/c/exception.c new file mode 100644 index 0000000..470aa9b --- /dev/null +++ b/c/exception.c @@ -0,0 +1,391 @@ +#include + +#include "exception.h" + +#define SR state->sr.bits +#define _SR state->sr.value +#define VBR state->vbr +#define PC state->pc[0] = state->pc[1] +#define SPC state->spc +#define SSR state->ssr +#define SGR state->sgr +#define R15 state->general_register[15] + +#define BL bl +#define FD fd +#define IMASK imask +#define MD md +#define RB rb + +#pragma GCC diagnostic ignored "-Wunused-variable" +#define EXPEVT int32_t expevt +#define INTEVT int32_t intevt +#define TEA int32_t tea +#define TRA int32_t tra + +#define EXCEPTION_ADDRESS 0 + +void exception(struct architectural_state * state, const char * name) +{ + printf("exception: %s\n", name); + state->is_delay_slot = 0; +} + +void POWERON(struct architectural_state * state) +{ + exception(state, "POWERON"); + //Initialize_Module(PowerOn); + EXPEVT = 0x00000000; + VBR = 0x00000000; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + SR.IMASK = 0xF; + SR.FD = 0; + PC = 0xA0000000; +} + +void MANRESET(struct architectural_state * state) +{ + exception(state, "MANRESET"); + //Initialize_Module(Manual); + EXPEVT = 0x00000020; + VBR = 0x00000000; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + SR.IMASK = 0xF; + SR.FD = 0; + PC = 0xA0000000; +} + +void HUIDRESET(struct architectural_state * state) +{ + exception(state, "HUIDRESET"); + //Initialize_Module(PowerOn); + EXPEVT = 0x00000000; + VBR = 0x00000000; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + SR.IMASK = 0xF; + SR.FD = 0; + PC = 0xA0000000; +} + +void ITLBMULTIHIT(struct architectural_state * state) +{ + exception(state, "ITLBMULTIHIT"); + //Initialize_Module(Manual); + TEA = EXCEPTION_ADDRESS; + //PTEH.VPN = PAGE_NUMBER; + EXPEVT = 0x00000140; + VBR = 0x00000000; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + SR.IMASK = 0xF; + SR.FD = 0; + PC = 0xA0000000; +} + +void OTLBMULTIHIT(struct architectural_state * state) +{ + exception(state, "OTLBMULTIHIT"); + //Initialize_Module(Manual); + TEA = EXCEPTION_ADDRESS; + //PTEH.VPN = PAGE_NUMBER; + EXPEVT = 0x00000140; + VBR = 0x00000000; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + SR.IMASK = 0xF; + SR.FD = 0; + PC = 0xA0000000; +} + +/* General exceptions */ + +void RTLBMISS(struct architectural_state * state, int32_t op1) +{ + exception(state, "RTLBMISS"); + TEA = EXCEPTION_ADDRESS; + //PTEH.VPN = PAGE_NUMBER; + SPC = PC; + SSR = _SR; + SGR = R15; + EXPEVT = 0x00000040; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000400; +} + +void WTLBMISS(struct architectural_state * state, int32_t op1) +{ + exception(state, "WTLBMISS"); + TEA = EXCEPTION_ADDRESS; + //PTEH.VPN = PAGE_NUMBER; + SPC = PC; + SSR = _SR; + SGR = R15; + EXPEVT = 0x00000060; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000400; +} + +void ITLBMISS(struct architectural_state * state) +{ + exception(state, "ITLBMISS"); + TEA = EXCEPTION_ADDRESS; + //PTEH.VPN = PAGE_NUMBER; + SPC = PC; + SSR = _SR; + SGR = R15; + EXPEVT = 0x00000040; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000400; +} + +void FIRSTWRITE(struct architectural_state * state, int32_t op1) +{ + exception(state, "FIRSTWRITE"); + TEA = EXCEPTION_ADDRESS; + //PTEH.VPN = PAGE_NUMBER; + SPC = PC; + SSR = _SR; + SGR = R15; + EXPEVT = 0x00000080; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000100; +} + +void READPROT(struct architectural_state * state, int32_t op1) +{ + exception(state, "READPROT"); + TEA = EXCEPTION_ADDRESS; + //PTEH.VPN = PAGE_NUMBER; + SPC = PC; + SSR = _SR; + SGR = R15; + EXPEVT = 0x000000A0; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000100; +} + +void WRITEPROT(struct architectural_state * state, int32_t op1) +{ + exception(state, "WRITEPROT"); + TEA = EXCEPTION_ADDRESS; + //PTEH.VPN = PAGE_NUMBER; + SPC = PC; + SSR = _SR; + SGR = R15; + EXPEVT = 0x000000C0; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000100; +} + +void EXECPROT(struct architectural_state * state) +{ + exception(state, "EXECPROT"); + TEA = EXCEPTION_ADDRESS; + //PTEH.VPN = PAGE_NUMBER; + SPC = PC; + SSR = _SR; + SGR = R15; + EXPEVT = 0x000000A0; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000100; +} + +void RADDERR(struct architectural_state * state, int32_t op1) +{ + exception(state, "RADDERR"); + TEA = EXCEPTION_ADDRESS; + //PTEN.VPN = PAGE_NUMBER; + SPC = PC; + SSR = _SR; + SGR = R15; + EXPEVT = 0x000000E0; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000100; +} + +void WADDERR(struct architectural_state * state, int32_t op1) +{ + exception(state, "WADDERR"); + TEA = EXCEPTION_ADDRESS; + //PTEN.VPN = PAGE_NUMBER; + SPC = PC; + SSR = _SR; + SGR = R15; + EXPEVT = 0x00000100; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000100; +} + +void IADDERR(struct architectural_state * state) +{ + exception(state, "IADDERR"); + TEA = EXCEPTION_ADDRESS; + //PTEN.VPN = PAGE_NUMBER; + SPC = PC; + SSR = _SR; + SGR = R15; + EXPEVT = 0x000000E0; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000100; +} + +void TRAP(struct architectural_state * state, int32_t imm) +{ + exception(state, "TRAP"); + SPC = PC + 2; + SSR = _SR; + SGR = R15; + TRA = imm << 2; + EXPEVT = 0x00000160; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000100; +} + +void RESINST(struct architectural_state * state) +{ + exception(state, "RESINST"); + SPC = PC; + SSR = _SR; + SGR = R15; + EXPEVT = 0x00000180; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000100; +} + +void ILLSLOT(struct architectural_state * state) +{ + exception(state, "ILLSLOT"); + SPC = PC - 2; + SSR = _SR; + SGR = R15; + EXPEVT = 0x000001A0; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000100; +} + +void FPUDIS(struct architectural_state * state) +{ + exception(state, "FPUDIS"); + SPC = PC; + SSR = _SR; + SGR = R15; + EXPEVT = 0x00000800; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000100; +} + +void SLOTFPUDIS(struct architectural_state * state) +{ + exception(state, "SLOTFPUDIS"); + SPC = PC - 2; + SSR = _SR; + SGR = R15; + EXPEVT = 0x00000820; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000100; +} + +void UBRKBEFORE(struct architectural_state * state) +{ + exception(state, "UBRKBEFORE"); + SPC = PC; + SSR = _SR; + SGR = R15; + EXPEVT = 0x000001E0; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + //PC = (BRCR.UBDE==1 ? DBR : VBR + H00000100); +} + +void UBRKAFTER(struct architectural_state * state) +{ + exception(state, "UBRKAFTER"); + SPC = PC + 2; + SSR = _SR; + SGR = R15; + EXPEVT = 0x000001E0; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + //PC = (BRCR.UBDE==1 ? DBR : VBR + H00000100); +} + +void FPUEXC(struct architectural_state * state) +{ + exception(state, "FPUEXC"); + SPC = PC; + SSR = _SR; + SGR = R15; + EXPEVT = 0x00000120; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000100; +} + +/* interrupts */ + +void NMI(struct architectural_state * state) +{ + exception(state, "NMI"); + SPC = PC; + SSR = _SR; + SGR = R15; + INTEVT = 0x000001C0; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000600; +} + +void IRLINT(struct architectural_state * state) +{ + exception(state, "IRLINT"); + SPC = PC; + SSR = _SR; + SGR = R15; + //INTEVT = 0x00000200 ~ 0x000003C0; + SR.MD = 1; + SR.RB = 1; + SR.BL = 1; + PC = VBR + 0x00000600; +} diff --git a/c/exception.h b/c/exception.h new file mode 100644 index 0000000..6a71a8e --- /dev/null +++ b/c/exception.h @@ -0,0 +1,31 @@ +#pragma once + +#include + +#include "state.h" + +void POWERON(struct architectural_state * state); +void MANRESET(struct architectural_state * state); +void HUIDRESET(struct architectural_state * state); +void ITLBMULTIHIT(struct architectural_state * state); +void OTLBMULTIHIT(struct architectural_state * state); +void RTLBMISS(struct architectural_state * state, int32_t op1); +void WTLBMISS(struct architectural_state * state, int32_t op1); +void ITLBMISS(struct architectural_state * state); +void FIRSTWRITE(struct architectural_state * state, int32_t op1); +void READPROT(struct architectural_state * state, int32_t op1); +void WRITEPROT(struct architectural_state * state, int32_t op1); +void EXECPROT(struct architectural_state * state); +void RADDERR(struct architectural_state * state, int32_t op1); +void WADDERR(struct architectural_state * state, int32_t op1); +void IADDERR(struct architectural_state * state); +void TRAP(struct architectural_state * state, int32_t imm); +void RESINST(struct architectural_state * state); +void ILLSLOT(struct architectural_state * state); +void FPUDIS(struct architectural_state * state); +void SLOTFPUDIS(struct architectural_state * state); +void UBRKBEFORE(struct architectural_state * state); +void UBRKAFTER(struct architectural_state * state); +void FPUEXC(struct architectural_state * state); +void NMI(struct architectural_state * state); +void IRLINT(struct architectural_state * state); diff --git a/c/execute.c b/c/execute.c new file mode 100644 index 0000000..e0b9b5b --- /dev/null +++ b/c/execute.c @@ -0,0 +1,69 @@ +#include + +#include "execute.h" +#include "operations.h" +#include "state_helpers.h" +#include "decode_execute.h" +#include "exception.h" + +uint16_t fetch(struct architectural_state * state, struct memory_map * map) +{ + uint32_t address = zero_extend32(sign_extend32(state->pc[0])); + assert((address & 0b1) == 0); + return read_memory16(map, address); +} + +void step(struct architectural_state * state, struct memory_map * map) +{ + /* + * 3 Fetch the instruction bytes from the address in memory, as + * indicated by the current program counter, 2 bytes need to be + * fetched for each instruction. + */ + uint16_t instruction_code = fetch(state, map); + + /* + * 4 Calculate the default values of PC’ and PR’. PC’ is set to the + * value of PC”, PR’ is set to the value of PR”. + */ + state->pc[1] = state->pc[2]; + state->pr[1] = state->pr[2]; + + /* + * 5 Calculate the default values of PC” and PR” assuming continued + * sequential execution without procedure call or mode switch: PC” + * is PC’+2, while PR” is unchanged. + */ + state->pc[2] = state->pc[1] + 2; + state->pr[2] = state->pr[2]; // unchanged + + /* + * 6 Decode and execute the instruction. This includes checks for + * synchronous events, such as exceptions and panics, and + * initiation of handling if required. Synchronous events are not + * accepted between a delayed branch and a delay slot. They are + * detected either before the delayed branch or after the delay + * slot. + */ + enum decode_status status = decode_and_execute_instruction(state, map, instruction_code); + switch (status) { + case DECODE__DEFINED: + break; + case DECODE__UNDEFINED: // undefined instruction + if (state->is_delay_slot) + ILLSLOT(state); + else + RESINST(state); + break; + default: + assert(false); + break; + } + + /* + * 7 Set the current program counter (PC) to the value of the next + * program counter (PC’) and PR to the value of PR’. + */ + state->pc[0] = state->pc[1]; + state->pr[0] = state->pr[1]; +} diff --git a/c/execute.h b/c/execute.h new file mode 100644 index 0000000..5204f08 --- /dev/null +++ b/c/execute.h @@ -0,0 +1,7 @@ +#pragma once + +#include "memory_map.h" +#include "state.h" + +uint16_t fetch(struct architectural_state * state, struct memory_map * map); +void step(struct architectural_state * state, struct memory_map * mem); diff --git a/c/impl.c b/c/impl.c new file mode 100644 index 0000000..b270534 --- /dev/null +++ b/c/impl.c @@ -0,0 +1,1952 @@ +#include "impl.h" +#include "operations.h" +#include "exception.h" +#include "state_helpers.h" + +/* MOV #imm,Rn */ +void mov__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i, const uint32_t n) +{ + int64_t imm = sign_extend8(i); + int64_t op2 = imm; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV.W @(disp,PC),Rn */ +void mov_w__pc_relative_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n) +{ + int64_t pc = sign_extend32(state->pc[0]); + int64_t disp = zero_extend8(d) << 1; + if (is_delay_slot(state)) return ILLSLOT(state); + int64_t address = zero_extend32(disp + (pc + 4)); + int64_t op2 = sign_extend16(read_memory16(map, address)); + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV.L @(disp,PC),Rn */ +void mov_l__pc_relative_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n) +{ + int64_t pc = sign_extend32(state->pc[0]); + int64_t disp = zero_extend8(d) << 2; + if (is_delay_slot(state)) return ILLSLOT(state); + int64_t address = zero_extend32(disp + ((pc + 4) & (~0x3))); + int64_t op2 = sign_extend32(read_memory32(map, address)); + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV Rm,Rn */ +void mov__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, m)); + int64_t op2 = op1; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV.B Rm,@Rn */ +void mov_b__store_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(op2); + write_memory8(map, address, op1); + + state->is_delay_slot = false; +} + + +/* MOV.W Rm,@Rn */ +void mov_w__store_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(op2); + write_memory16(map, address, op1); + + state->is_delay_slot = false; +} + + +/* MOV.L Rm,@Rn */ +void mov_l__store_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(op2); + write_memory32(map, address, op1); + + state->is_delay_slot = false; +} + + +/* MOV.B @Rm,Rn */ +void mov_b__load_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(op1); + int64_t op2 = sign_extend8(read_memory8(map, address)); + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV.W @Rm,Rn */ +void mov_w__load_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(op1); + int64_t op2 = sign_extend16(read_memory16(map, address)); + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV.L @Rm,Rn */ +void mov_l__load_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(op1); + int64_t op2 = sign_extend32(read_memory32(map, address)); + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV.B Rm,@-Rn */ +void mov_b__store_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(op2 - 1); + write_memory8(map, address, op1); + op2 = address; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV.W Rm,@-Rn */ +void mov_w__store_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(op2 - 2); + write_memory16(map, address, op1); + op2 = address; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV.L Rm,@-Rn */ +void mov_l__store_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(op2 - 4); + write_memory32(map, address, op1); + op2 = address; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV.B @Rm+,Rn */ +void mov_b__load_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t m_field = zero_extend4(m); + int64_t n_field = zero_extend4(n); + int64_t op1 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(op1); + int64_t op2 = sign_extend8(read_memory8(map, address)); + if (m_field == n_field) op1 = op2; + else op1 = op1 + 1; + REG(state, m) = _register(op1); + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV.W @Rm+,Rn */ +void mov_w__load_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t m_field = zero_extend4(m); + int64_t n_field = zero_extend4(n); + int64_t op1 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(op1); + int64_t op2 = sign_extend16(read_memory16(map, address)); + if (m_field == n_field) op1 = op2; + else op1 = op1 + 2; + REG(state, m) = _register(op1); + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV.L @Rm+,Rn */ +void mov_l__load_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t m_field = zero_extend4(m); + int64_t n_field = zero_extend4(n); + int64_t op1 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(op1); + int64_t op2 = sign_extend32(read_memory32(map, address)); + if (m_field == n_field) op1 = op2; + else op1 = op1 + 4; + REG(state, m) = _register(op1); + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV.B R0,@(disp,Rn) */ +void mov_b__store_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n) +{ + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t disp = zero_extend4(d); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(disp + op2); + write_memory8(map, address, r0); + + state->is_delay_slot = false; +} + + +/* MOV.W R0,@(disp,Rn) */ +void mov_w__store_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n) +{ + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t disp = zero_extend4(d) << 1; + int64_t op2 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(disp + op2); + write_memory16(map, address, r0); + + state->is_delay_slot = false; +} + + +/* MOV.L Rm,@(disp,Rn) */ +void mov_l__store_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t d, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t disp = zero_extend4(d) << 2; + int64_t op3 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(disp + op3); + write_memory32(map, address, op1); + + state->is_delay_slot = false; +} + + +/* MOV.B @(disp,Rm),R0 */ +void mov_b__load_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t m) +{ + int64_t disp = zero_extend4(d); + int64_t op2 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(disp + op2); + int64_t r0 = sign_extend8(read_memory8(map, address)); + REG(state, 0) = _register(r0); + + state->is_delay_slot = false; +} + + +/* MOV.W @(disp,Rm),R0 */ +void mov_w__load_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t m) +{ + int64_t disp = zero_extend4(d) << 1; + int64_t op2 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(disp + op2); + int64_t r0 = sign_extend16(read_memory16(map, address)); + REG(state, 0) = _register(r0); + + state->is_delay_slot = false; +} + + +/* MOV.L @(disp,Rm),Rn */ +void mov_l__load_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t m, const uint32_t n) +{ + int64_t disp = zero_extend4(d) << 2; + int64_t op2 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(disp + op2); + int64_t op3 = sign_extend32(read_memory32(map, address)); + REG(state, n) = _register(op3); + + state->is_delay_slot = false; +} + + +/* MOV.B Rm,@(R0,Rn) */ +void mov_b__store_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(r0 + op2); + write_memory8(map, address, op1); + + state->is_delay_slot = false; +} + + +/* MOV.W Rm,@(R0,Rn) */ +void mov_w__store_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(r0 + op2); + write_memory16(map, address, op1); + + state->is_delay_slot = false; +} + + +/* MOV.L Rm,@(R0,Rn) */ +void mov_l__store_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(r0 + op2); + write_memory32(map, address, op1); + + state->is_delay_slot = false; +} + + +/* MOV.B @(R0,Rm),Rn */ +void mov_b__load_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t op1 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(r0 + op1); + int64_t op2 = sign_extend8(read_memory8(map, address)); + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV.W @(R0,Rm),Rn */ +void mov_w__load_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t op1 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(r0 + op1); + int64_t op2 = sign_extend16(read_memory16(map, address)); + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV.L @(R0,Rm),Rn */ +void mov_l__load_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t op1 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(r0 + op1); + int64_t op2 = sign_extend32(read_memory32(map, address)); + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MOV.B R0,@(disp,GBR) */ +void mov_b__store_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d) +{ + int64_t gbr = sign_extend32(state->gbr); + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t disp = zero_extend8(d); + int64_t address = zero_extend32(disp + gbr); + write_memory8(map, address, r0); + + state->is_delay_slot = false; +} + + +/* MOV.W R0,@(disp,GBR) */ +void mov_w__store_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d) +{ + int64_t gbr = sign_extend32(state->gbr); + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t disp = zero_extend8(d) << 1; + int64_t address = zero_extend32(disp + gbr); + write_memory16(map, address, r0); + + state->is_delay_slot = false; +} + + +/* MOV.L R0,@(disp,GBR) */ +void mov_l__store_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d) +{ + int64_t gbr = sign_extend32(state->gbr); + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t disp = zero_extend8(d) << 2; + int64_t address = zero_extend32(disp + gbr); + write_memory32(map, address, r0); + + state->is_delay_slot = false; +} + + +/* MOV.B @(disp,GBR),R0 */ +void mov_b__load_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d) +{ + int64_t gbr = sign_extend32(state->gbr); + int64_t disp = zero_extend8(d); + int64_t address = zero_extend32(disp + gbr); + int64_t r0 = sign_extend8(read_memory8(map, address)); + REG(state, 0) = _register(r0); + + state->is_delay_slot = false; +} + + +/* MOV.W @(disp,GBR),R0 */ +void mov_w__load_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d) +{ + int64_t gbr = sign_extend32(state->gbr); + int64_t disp = zero_extend8(d) << 1; + int64_t address = zero_extend32(disp + gbr); + int64_t r0 = sign_extend16(read_memory16(map, address)); + REG(state, 0) = _register(r0); + + state->is_delay_slot = false; +} + + +/* MOV.L @(disp,GBR),R0 */ +void mov_l__load_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d) +{ + int64_t gbr = sign_extend32(state->gbr); + int64_t disp = zero_extend8(d) << 2; + int64_t address = zero_extend32(disp + gbr); + int64_t r0 = sign_extend32(read_memory32(map, address)); + REG(state, 0) = _register(r0); + + state->is_delay_slot = false; +} + + +/* MOVA @(disp,PC),R0 */ +void mova__pc_relative_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d) +{ + int64_t pc = sign_extend32(state->pc[0]); + int64_t disp = zero_extend8(d) << 2; + if (is_delay_slot(state)) return ILLSLOT(state); + int64_t r0 = disp + ((pc + 4) & (~0x3)); + REG(state, 0) = _register(r0); + + state->is_delay_slot = false; +} + + +/* MOVT Rn */ +void movt__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t t = zero_extend1(state->sr.bits.t); + int64_t op1 = t; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* SWAP.B Rm,Rn */ +void swap_b__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, m)); + int64_t op2 = ((bit_extract(op1, 16, 16) << 16) | (bit_extract(op1, 0, 8) << 8)) | bit_extract(op1, 8, 8); + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* SWAP.W Rm,Rn */ +void swap_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, m)); + int64_t op2 = (bit_extract(op1, 0, 16) << 16) | bit_extract(op1, 16, 16); + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* XTRCT Rm,Rn */ +void xtrct__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, m)); + int64_t op2 = zero_extend32(REG(state, n)); + op2 = bit_extract(op2, 16, 16) | (bit_extract(op1, 0, 16) << 16); + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* ADD Rm,Rn */ +void add__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + op2 = op2 + op1; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* ADD #imm,Rn */ +void add__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i, const uint32_t n) +{ + int64_t imm = sign_extend8(i); + int64_t op2 = sign_extend32(REG(state, n)); + op2 = op2 + imm; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* ADDC Rm,Rn */ +void addc__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t t = zero_extend1(state->sr.bits.t); + int64_t op1 = zero_extend32(sign_extend32(REG(state, m))); + int64_t op2 = zero_extend32(sign_extend32(REG(state, n))); + op2 = (op2 + op1) + t; + t = bit_extract(op2, 32, 1); + REG(state, n) = _register(op2); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* ADDV Rm,Rn */ +void addv__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + op2 = op2 + op1; + int64_t t = unary_int(((op2 < (-(1LL << 31))) || (op2 >= (1LL << 31)))); + REG(state, n) = _register(op2); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* CMP/EQ #imm,R0 */ +void cmp_eq__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i) +{ + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t imm = sign_extend8(i); + int64_t t = unary_int((r0 == imm)); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* CMP/EQ Rm,Rn */ +void cmp_eq__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t t = unary_int((op2 == op1)); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* CMP/HS Rm,Rn */ +void cmp_hs__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = zero_extend32(sign_extend32(REG(state, m))); + int64_t op2 = zero_extend32(sign_extend32(REG(state, n))); + int64_t t = unary_int((op2 >= op1)); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* CMP/GE Rm,Rn */ +void cmp_ge__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t t = unary_int((op2 >= op1)); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* CMP/HI Rm,Rn */ +void cmp_hi__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = zero_extend32(sign_extend32(REG(state, m))); + int64_t op2 = zero_extend32(sign_extend32(REG(state, n))); + int64_t t = unary_int((op2 > op1)); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* CMP/GT Rm,Rn */ +void cmp_gt__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t t = unary_int((op2 > op1)); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* CMP/PZ Rn */ +void cmp_pz__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, n)); + int64_t t = unary_int((op1 >= 0)); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* CMP/PL Rn */ +void cmp_pl__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, n)); + int64_t t = unary_int((op1 > 0)); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* CMP/STR Rm,Rn */ +void cmp_str__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t temp = op1 ^ op2; + int64_t t = unary_int((bit_extract(temp, 0, 8) == 0)); + t = (unary_int((bit_extract(temp, 8, 8) == 0))) | t; + t = (unary_int((bit_extract(temp, 16, 8) == 0))) | t; + t = (unary_int((bit_extract(temp, 24, 8) == 0))) | t; + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* DIV1 Rm,Rn */ +void div1__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t q = zero_extend1(state->sr.bits.q); + int64_t _m = zero_extend1(state->sr.bits.m); + int64_t t = zero_extend1(state->sr.bits.t); + int64_t op1 = zero_extend32(sign_extend32(REG(state, m))); + int64_t op2 = zero_extend32(sign_extend32(REG(state, n))); + int64_t oldq = q; + q = bit_extract(op2, 31, 1); + op2 = zero_extend32(op2 << 1) | t; + if (oldq == _m) op2 = op2 - op1; + else op2 = op2 + op1; + q = (q ^ _m) ^ bit_extract(op2, 32, 1); + t = 1 - (q ^ _m); + REG(state, n) = _register(op2); + state->sr.bits.q = bit(q); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* DIV0S Rm,Rn */ +void div0s__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t q = bit_extract(op2, 31, 1); + int64_t _m = bit_extract(op1, 31, 1); + int64_t t = _m ^ q; + state->sr.bits.q = bit(q); + state->sr.bits.m = bit(_m); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* DIV0U */ +void div0u__no_operand(struct architectural_state * state, struct memory_map * map) +{ + int64_t q = 0; + int64_t _m = 0; + int64_t t = 0; + state->sr.bits.q = bit(q); + state->sr.bits.m = bit(_m); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* DMULS.L Rm,Rn */ +void dmuls_l__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t mac = op2 * op1; + int64_t macl = mac; + int64_t mach = mac >> 32; + state->macl = zero_extend32(macl); + state->mach = zero_extend32(mach); + + state->is_delay_slot = false; +} + + +/* DMULU.L Rm,Rn */ +void dmulu_l__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = zero_extend32(sign_extend32(REG(state, m))); + int64_t op2 = zero_extend32(sign_extend32(REG(state, n))); + int64_t mac = op2 * op1; + int64_t macl = mac; + int64_t mach = mac >> 32; + state->macl = zero_extend32(macl); + state->mach = zero_extend32(mach); + + state->is_delay_slot = false; +} + + +/* DT Rn */ +void dt__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, n)); + op1 = op1 - 1; + int64_t t = unary_int((op1 == 0)); + REG(state, n) = _register(op1); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* EXTS.B Rm,Rn */ +void exts_b__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend8(REG(state, m)); + int64_t op2 = op1; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* EXTS.W Rm,Rn */ +void exts_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend16(REG(state, m)); + int64_t op2 = op1; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* EXTU.B Rm,Rn */ +void extu_b__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = zero_extend8(REG(state, m)); + int64_t op2 = op1; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* EXTU.W Rm,Rn */ +void extu_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = zero_extend16(REG(state, m)); + int64_t op2 = op1; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* MAC.L @Rm+,@Rn+ */ +void mac_l__multiply_and_accumulate_operation(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t macl = zero_extend32(state->macl); + int64_t mach = zero_extend32(state->mach); + int64_t s = zero_extend1(state->sr.bits.s); + int64_t m_field = zero_extend4(m); + int64_t n_field = zero_extend4(n); + int64_t m_address = sign_extend32(REG(state, m)); + int64_t n_address = sign_extend32(REG(state, n)); + int64_t value2 = sign_extend32(read_memory32(map, zero_extend32(n_address))); + n_address = n_address + 4; + if (n_field == m_field) + { + m_address = m_address + 4; + n_address = n_address + 4; + } + int64_t value1 = sign_extend32(read_memory32(map, zero_extend32(m_address))); + m_address = m_address + 4; + int64_t mul = value2 * value1; + int64_t mac = (mach << 32) + macl; + int64_t result = mac + mul; + if (s == 1) if (bit_extract(((result ^ mac) & (result ^ mul)), 63, 1) == 1) if (bit_extract(mac, 63, 1) == 0) result = (1LL << 47) - 1; + else result = -(1LL << 47); + else result = signed_saturate48(result); + macl = result; + mach = result >> 32; + REG(state, m) = _register(m_address); + REG(state, n) = _register(n_address); + state->macl = zero_extend32(macl); + state->mach = zero_extend32(mach); + + state->is_delay_slot = false; +} + + +/* MAC.W @Rm+,@Rn+ */ +void mac_w__multiply_and_accumulate_operation(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t macl = zero_extend32(state->macl); + int64_t mach = zero_extend32(state->mach); + int64_t s = zero_extend1(state->sr.bits.s); + int64_t m_field = zero_extend4(m); + int64_t n_field = zero_extend4(n); + int64_t m_address = sign_extend32(REG(state, m)); + int64_t n_address = sign_extend32(REG(state, n)); + int64_t value2 = sign_extend16(read_memory16(map, zero_extend32(n_address))); + n_address = n_address + 2; + if (n_field == m_field) + { + m_address = m_address + 2; + n_address = n_address + 2; + } + int64_t value1 = sign_extend16(read_memory16(map, zero_extend32(m_address))); + m_address = m_address + 2; + int64_t mul = value2 * value1; + int64_t result = 0; + if (s == 1) + { + macl = sign_extend32(macl) + mul; + int64_t temp = signed_saturate32(macl); + if (macl == temp) result = (mach << 32) | zero_extend32(macl); + else result = (1LL << 32) | zero_extend32(temp); + } + else result = ((mach << 32) + macl) + mul; + macl = result; + mach = result >> 32; + REG(state, m) = _register(m_address); + REG(state, n) = _register(n_address); + state->macl = zero_extend32(macl); + state->mach = zero_extend32(mach); + + state->is_delay_slot = false; +} + + +/* MUL.L Rm,Rn */ +void mul_l__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t macl = op1 * op2; + state->macl = zero_extend32(macl); + + state->is_delay_slot = false; +} + + +/* MULS.W Rm,Rn */ +void muls_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend16(sign_extend32(REG(state, m))); + int64_t op2 = sign_extend16(sign_extend32(REG(state, n))); + int64_t macl = op1 * op2; + state->macl = zero_extend32(macl); + + state->is_delay_slot = false; +} + + +/* MULU.W Rm,Rn */ +void mulu_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = zero_extend16(sign_extend32(REG(state, m))); + int64_t op2 = zero_extend16(sign_extend32(REG(state, n))); + int64_t macl = op1 * op2; + state->macl = zero_extend32(macl); + + state->is_delay_slot = false; +} + + +/* NEG Rm,Rn */ +void neg__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = -op1; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* NEGC Rm,Rn */ +void negc__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t t = zero_extend1(state->sr.bits.t); + int64_t op1 = zero_extend32(REG(state, m)); + int64_t op2 = (-op1) - t; + t = bit_extract(op2, 32, 1); + REG(state, n) = _register(op2); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* SUB Rm,Rn */ +void sub__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + op2 = op2 - op1; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* SUBC Rm,Rn */ +void subc__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t t = zero_extend1(state->sr.bits.t); + int64_t op1 = zero_extend32(sign_extend32(REG(state, m))); + int64_t op2 = zero_extend32(sign_extend32(REG(state, n))); + op2 = (op2 - op1) - t; + t = bit_extract(op2, 32, 1); + REG(state, n) = _register(op2); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* SUBV Rm,Rn */ +void subv__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + op2 = op2 - op1; + int64_t t = unary_int(((op2 < (-(1LL << 31))) || (op2 >= (1LL << 31)))); + REG(state, n) = _register(op2); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* AND Rm,Rn */ +void and__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, m)); + int64_t op2 = zero_extend32(REG(state, n)); + op2 = op2 & op1; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* AND #imm,R0 */ +void and__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i) +{ + int64_t r0 = zero_extend32(REG(state, 0)); + int64_t imm = zero_extend8(i); + r0 = r0 & imm; + REG(state, 0) = _register(r0); + + state->is_delay_slot = false; +} + + +/* AND.B #imm,@(R0,GBR) */ +void and_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i) +{ + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t gbr = sign_extend32(state->gbr); + int64_t imm = zero_extend8(i); + int64_t address = zero_extend32(r0 + gbr); + int64_t value = zero_extend8(read_memory8(map, address)); + value = value & imm; + write_memory8(map, address, value); + + state->is_delay_slot = false; +} + + +/* NOT Rm,Rn */ +void not__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, m)); + int64_t op2 = ~op1; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* OR Rm,Rn */ +void or__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, m)); + int64_t op2 = zero_extend32(REG(state, n)); + op2 = op2 | op1; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* OR #imm,R0 */ +void or__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i) +{ + int64_t r0 = zero_extend32(REG(state, 0)); + int64_t imm = zero_extend8(i); + r0 = r0 | imm; + REG(state, 0) = _register(r0); + + state->is_delay_slot = false; +} + + +/* OR.B #imm,@(R0,GBR) */ +void or_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i) +{ + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t gbr = sign_extend32(state->gbr); + int64_t imm = zero_extend8(i); + int64_t address = zero_extend32(r0 + gbr); + int64_t value = zero_extend8(read_memory8(map, address)); + value = value | imm; + write_memory8(map, address, value); + + state->is_delay_slot = false; +} + + +/* TAS.B @Rn */ +void tas_b__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(op1); + ocbp(state, address); + int64_t value = zero_extend8(read_memory8(map, address)); + int64_t t = unary_int((value == 0)); + value = value | (1LL << 7); + write_memory8(map, address, value); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* TST Rm,Rn */ +void tst__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t op2 = sign_extend32(REG(state, n)); + int64_t t = unary_int(((op1 & op2) == 0)); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* TST #imm,R0 */ +void tst__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i) +{ + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t imm = zero_extend8(i); + int64_t t = unary_int(((r0 & imm) == 0)); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* TST.B #imm,@(R0,GBR) */ +void tst_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i) +{ + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t gbr = sign_extend32(state->gbr); + int64_t imm = zero_extend8(i); + int64_t address = zero_extend32(r0 + gbr); + int64_t value = zero_extend8(read_memory8(map, address)); + int64_t t = ((value & imm) == 0); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* XOR Rm,Rn */ +void xor__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, m)); + int64_t op2 = zero_extend32(REG(state, n)); + op2 = op2 ^ op1; + REG(state, n) = _register(op2); + + state->is_delay_slot = false; +} + + +/* XOR #imm,R0 */ +void xor__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i) +{ + int64_t r0 = zero_extend32(REG(state, 0)); + int64_t imm = zero_extend8(i); + r0 = r0 ^ imm; + REG(state, 0) = _register(r0); + + state->is_delay_slot = false; +} + + +/* XOR.B #imm,@(R0,GBR) */ +void xor_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i) +{ + int64_t r0 = sign_extend32(REG(state, 0)); + int64_t gbr = sign_extend32(state->gbr); + int64_t imm = zero_extend8(i); + int64_t address = zero_extend32(r0 + gbr); + int64_t value = zero_extend8(read_memory8(map, address)); + value = value ^ imm; + write_memory8(map, address, value); + + state->is_delay_slot = false; +} + + +/* ROTL Rn */ +void rotl__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, n)); + int64_t t = bit_extract(op1, 31, 1); + op1 = (op1 << 1) | t; + REG(state, n) = _register(op1); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* ROTR Rn */ +void rotr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, n)); + int64_t t = bit_extract(op1, 0, 1); + op1 = (op1 >> 1) | (t << 31); + REG(state, n) = _register(op1); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* ROTCL Rn */ +void rotcl__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t t = zero_extend1(state->sr.bits.t); + int64_t op1 = zero_extend32(REG(state, n)); + op1 = (op1 << 1) | t; + t = bit_extract(op1, 32, 1); + REG(state, n) = _register(op1); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* ROTCR Rn */ +void rotcr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t t = zero_extend1(state->sr.bits.t); + int64_t op1 = zero_extend32(REG(state, n)); + int64_t oldt = t; + t = bit_extract(op1, 0, 1); + op1 = (op1 >> 1) | (oldt << 31); + REG(state, n) = _register(op1); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* SHAL Rn */ +void shal__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, n)); + int64_t t = bit_extract(op1, 31, 1); + op1 = op1 << 1; + REG(state, n) = _register(op1); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* SHAR Rn */ +void shar__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, n)); + int64_t t = bit_extract(op1, 0, 1); + op1 = op1 >> 1; + REG(state, n) = _register(op1); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* SHLL Rn */ +void shll__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, n)); + int64_t t = bit_extract(op1, 31, 1); + op1 = op1 << 1; + REG(state, n) = _register(op1); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* SHLR Rn */ +void shlr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, n)); + int64_t t = bit_extract(op1, 0, 1); + op1 = op1 >> 1; + REG(state, n) = _register(op1); + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* SHLL2 Rn */ +void shll2__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, n)); + op1 = op1 << 2; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* SHLR2 Rn */ +void shlr2__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, n)); + op1 = op1 >> 2; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* SHLL8 Rn */ +void shll8__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, n)); + op1 = op1 << 8; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* SHLR8 Rn */ +void shlr8__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, n)); + op1 = op1 >> 8; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* SHLL16 Rn */ +void shll16__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, n)); + op1 = op1 << 16; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* SHLR16 Rn */ +void shlr16__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = zero_extend32(REG(state, n)); + op1 = op1 >> 16; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* BF label */ +void bf__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d) +{ + int64_t t = zero_extend1(state->sr.bits.t); + int64_t pc = sign_extend32(state->pc[0]); + int64_t newpc = sign_extend32(state->pc[1]); + int64_t delayedpc = sign_extend32(state->pc[2]); + int64_t label = sign_extend8(d) << 1; + if (is_delay_slot(state)) return ILLSLOT(state); + if (t == 0) + { + int64_t temp = zero_extend32(pc + 4 + label); + newpc = temp; + delayedpc = temp + 2; + } + state->pc[1] = _register(newpc); + state->pc[2] = _register(delayedpc); + + state->is_delay_slot = false; +} + + +/* BF/S label */ +void bf_s__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d) +{ + int64_t t = zero_extend1(state->sr.bits.t); + int64_t pc = sign_extend32(state->pc[0]); + int64_t delayedpc = sign_extend32(state->pc[2]); + int64_t label = sign_extend8(d) << 1; + if (is_delay_slot(state)) return ILLSLOT(state); + if (t == 0) + { + int64_t temp = zero_extend32(pc + 4 + label); + delayedpc = temp; + } + state->pc[2] = _register(delayedpc); + + state->is_delay_slot = true; +} + + +/* BT label */ +void bt__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d) +{ + int64_t t = zero_extend1(state->sr.bits.t); + int64_t pc = sign_extend32(state->pc[0]); + int64_t newpc = sign_extend32(state->pc[1]); + int64_t delayedpc = sign_extend32(state->pc[2]); + int64_t label = sign_extend8(d) << 1; + if (is_delay_slot(state)) return ILLSLOT(state); + if (t == 1) + { + int64_t temp = zero_extend32(pc + 4 + label); + newpc = temp; + delayedpc = temp + 2; + } + state->pc[1] = _register(newpc); + state->pc[2] = _register(delayedpc); + + state->is_delay_slot = false; +} + + +/* BT/S label */ +void bt_s__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d) +{ + int64_t t = zero_extend1(state->sr.bits.t); + int64_t pc = sign_extend32(state->pc[0]); + int64_t delayedpc = sign_extend32(state->pc[2]); + int64_t label = sign_extend8(d) << 1; + if (is_delay_slot(state)) return ILLSLOT(state); + if (t == 1) + { + int64_t temp = zero_extend32(pc + 4 + label); + delayedpc = temp; + } + state->pc[2] = _register(delayedpc); + + state->is_delay_slot = true; +} + + +/* BRA label */ +void bra__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d) +{ + int64_t pc = sign_extend32(state->pc[0]); + int64_t label = sign_extend12(d) << 1; + if (is_delay_slot(state)) return ILLSLOT(state); + int64_t temp = zero_extend32(pc + 4 + label); + int64_t delayedpc = temp; + state->pc[2] = _register(delayedpc); + + state->is_delay_slot = true; +} + + +/* BRAF Rn */ +void braf__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t pc = sign_extend32(state->pc[0]); + int64_t op1 = sign_extend32(REG(state, n)); + if (is_delay_slot(state)) return ILLSLOT(state); + int64_t target = zero_extend32(pc + 4 + op1); + int64_t delayedpc = target & (~0x1); + state->pc[2] = _register(delayedpc); + + state->is_delay_slot = true; +} + + +/* BSR label */ +void bsr__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d) +{ + int64_t pc = sign_extend32(state->pc[0]); + int64_t label = sign_extend12(d) << 1; + if (is_delay_slot(state)) return ILLSLOT(state); + int64_t delayedpr = pc + 4; + int64_t temp = zero_extend32(pc + 4 + label); + int64_t delayedpc = temp; + state->pr[2] = _register(delayedpr); + state->pc[2] = _register(delayedpc); + + state->is_delay_slot = true; +} + + +/* BSRF Rn */ +void bsrf__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t pc = sign_extend32(state->pc[0]); + int64_t op1 = sign_extend32(REG(state, n)); + if (is_delay_slot(state)) return ILLSLOT(state); + int64_t delayedpr = pc + 4; + int64_t target = zero_extend32(pc + 4 + op1); + int64_t delayedpc = target & (~0x1); + state->pr[2] = _register(delayedpr); + state->pc[2] = _register(delayedpc); + + state->is_delay_slot = true; +} + + +/* JMP @Rn */ +void jmp__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t op1 = sign_extend32(REG(state, n)); + if (is_delay_slot(state)) return ILLSLOT(state); + int64_t target = op1; + int64_t delayedpc = target & (~0x1); + state->pc[2] = _register(delayedpc); + + state->is_delay_slot = true; +} + + +/* JSR @Rn */ +void jsr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t pc = sign_extend32(state->pc[0]); + int64_t op1 = sign_extend32(REG(state, n)); + if (is_delay_slot(state)) return ILLSLOT(state); + int64_t delayedpr = pc + 4; + int64_t target = op1; + int64_t delayedpc = target & (~0x1); + state->pr[2] = _register(delayedpr); + state->pc[2] = _register(delayedpc); + + state->is_delay_slot = true; +} + + +/* RTS */ +void rts__no_operand(struct architectural_state * state, struct memory_map * map) +{ + int64_t pr = sign_extend32(state->pr[0]); + if (is_delay_slot(state)) return ILLSLOT(state); + int64_t target = pr; + int64_t delayedpc = target & (~0x1); + state->pc[2] = _register(delayedpc); + + state->is_delay_slot = true; +} + + +/* CLRMAC */ +void clrmac__no_operand(struct architectural_state * state, struct memory_map * map) +{ + int64_t macl = 0; + int64_t mach = 0; + state->macl = zero_extend32(macl); + state->mach = zero_extend32(mach); + + state->is_delay_slot = false; +} + + +/* CLRS */ +void clrs__no_operand(struct architectural_state * state, struct memory_map * map) +{ + int64_t s = 0; + state->sr.bits.s = bit(s); + + state->is_delay_slot = false; +} + + +/* CLRT */ +void clrt__no_operand(struct architectural_state * state, struct memory_map * map) +{ + int64_t t = 0; + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* LDC Rm,SR */ +void ldc__transfer_to_sr(struct architectural_state * state, struct memory_map * map, const uint32_t m) +{ + int64_t md = zero_extend1(state->sr.bits.md); + if (md == 0) return RESINST(state); + int64_t op1 = sign_extend32(REG(state, m)); + int64_t sr = op1; + state->sr.value = _register(sr); + + state->is_delay_slot = false; +} + + +/* LDC Rm,GBR */ +void ldc__transfer_to_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t m) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t gbr = op1; + state->gbr = _register(gbr); + + state->is_delay_slot = false; +} + + +/* LDC Rm,VBR */ +void ldc__transfer_to_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t m) +{ + int64_t md = zero_extend1(state->sr.bits.md); + if (md == 0) return RESINST(state); + int64_t op1 = sign_extend32(REG(state, m)); + int64_t vbr = op1; + state->vbr = _register(vbr); + + state->is_delay_slot = false; +} + + +/* LDC.L @Rm+,SR */ +void ldc_l__load_to_sr(struct architectural_state * state, struct memory_map * map, const uint32_t m) +{ + int64_t md = zero_extend1(state->sr.bits.md); + if (md == 0) return RESINST(state); + int64_t op1 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(op1); + int64_t sr = sign_extend32(read_memory32(map, address)); + op1 = op1 + 4; + REG(state, m) = _register(op1); + state->sr.value = _register(sr); + + state->is_delay_slot = false; +} + + +/* LDC.L @Rm+,GBR */ +void ldc_l__load_to_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t m) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(op1); + int64_t gbr = sign_extend32(read_memory32(map, address)); + op1 = op1 + 4; + REG(state, m) = _register(op1); + state->gbr = _register(gbr); + + state->is_delay_slot = false; +} + + +/* LDC.L @Rm+,VBR */ +void ldc_l__load_to_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t m) +{ + int64_t md = zero_extend1(state->sr.bits.md); + if (md == 0) return RESINST(state); + int64_t op1 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(op1); + int64_t vbr = sign_extend32(read_memory32(map, address)); + op1 = op1 + 4; + REG(state, m) = _register(op1); + state->vbr = _register(vbr); + + state->is_delay_slot = false; +} + + +/* LDS Rm,MACH */ +void lds__transfer_to_mach(struct architectural_state * state, struct memory_map * map, const uint32_t m) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t mach = op1; + state->mach = zero_extend32(mach); + + state->is_delay_slot = false; +} + + +/* LDS Rm,MACL */ +void lds__transfer_to_macl(struct architectural_state * state, struct memory_map * map, const uint32_t m) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t macl = op1; + state->macl = zero_extend32(macl); + + state->is_delay_slot = false; +} + + +/* LDS Rm,PR */ +void lds__transfer_to_pr(struct architectural_state * state, struct memory_map * map, const uint32_t m) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t newpr = op1; + int64_t delayedpr = newpr; + state->pr[1] = _register(newpr); + state->pr[2] = _register(delayedpr); + + state->is_delay_slot = false; +} + + +/* LDS.L @Rm+,MACH */ +void lds_l__load_to_mach(struct architectural_state * state, struct memory_map * map, const uint32_t m) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(op1); + int64_t mach = sign_extend32(read_memory32(map, address)); + op1 = op1 + 4; + REG(state, m) = _register(op1); + state->mach = zero_extend32(mach); + + state->is_delay_slot = false; +} + + +/* LDS.L @Rm+,MACL */ +void lds_l__load_to_macl(struct architectural_state * state, struct memory_map * map, const uint32_t m) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(op1); + int64_t macl = sign_extend32(read_memory32(map, address)); + op1 = op1 + 4; + REG(state, m) = _register(op1); + state->macl = zero_extend32(macl); + + state->is_delay_slot = false; +} + + +/* LDS.L @Rm+,PR */ +void lds_l__load_to_pr(struct architectural_state * state, struct memory_map * map, const uint32_t m) +{ + int64_t op1 = sign_extend32(REG(state, m)); + int64_t address = zero_extend32(op1); + int64_t newpr = sign_extend32(read_memory32(map, address)); + int64_t delayedpr = newpr; + op1 = op1 + 4; + REG(state, m) = _register(op1); + state->pr[1] = _register(newpr); + state->pr[2] = _register(delayedpr); + + state->is_delay_slot = false; +} + + +/* NOP */ +void nop__no_operand(struct architectural_state * state, struct memory_map * map) +{ + ; + + state->is_delay_slot = false; +} + + +/* RTE */ +void rte__no_operand(struct architectural_state * state, struct memory_map * map) +{ + int64_t md = zero_extend1(state->sr.bits.md); + if (md == 0) return RESINST(state); + int64_t ssr = sign_extend32(state->ssr); + int64_t pc = sign_extend32(state->pc[0]); + if (is_delay_slot(state)) return ILLSLOT(state); + int64_t target = pc; + int64_t delayedpc = target & (~0x1); + state->pc[2] = _register(delayedpc); + state->sr.value = _register(ssr); + + state->is_delay_slot = true; +} + + +/* SETS */ +void sets__no_operand(struct architectural_state * state, struct memory_map * map) +{ + int64_t s = 1; + state->sr.bits.s = bit(s); + + state->is_delay_slot = false; +} + + +/* SETT */ +void sett__no_operand(struct architectural_state * state, struct memory_map * map) +{ + int64_t t = 1; + state->sr.bits.t = bit(t); + + state->is_delay_slot = false; +} + + +/* SLEEP */ +void sleep__no_operand(struct architectural_state * state, struct memory_map * map) +{ + int64_t md = zero_extend1(state->sr.bits.md); + if (md == 0) return RESINST(state); + sleep(state); + + state->is_delay_slot = false; +} + + +/* STC SR,Rn */ +void stc__transfer_from_sr(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t md = zero_extend1(state->sr.bits.md); + if (md == 0) return RESINST(state); + int64_t sr = sign_extend32(state->sr.value); + int64_t op1 = sr; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* STC GBR,Rn */ +void stc__transfer_from_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t gbr = sign_extend32(state->gbr); + int64_t op1 = gbr; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* STC VBR,Rn */ +void stc__transfer_from_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t md = zero_extend1(state->sr.bits.md); + if (md == 0) return RESINST(state); + int64_t vbr = sign_extend32(state->vbr); + int64_t op1 = vbr; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* STC.L SR,@-Rn */ +void stc_l__store_from_sr(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t md = zero_extend1(state->sr.bits.md); + if (md == 0) return RESINST(state); + int64_t sr = sign_extend32(state->sr.value); + int64_t op1 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(op1 - 4); + write_memory32(map, address, sr); + op1 = address; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* STC.L GBR,@-Rn */ +void stc_l__store_from_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t gbr = sign_extend32(state->gbr); + int64_t op1 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(op1 - 4); + write_memory32(map, address, gbr); + op1 = address; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* STC.L VBR,@-Rn */ +void stc_l__store_from_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t md = zero_extend1(state->sr.bits.md); + if (md == 0) return RESINST(state); + int64_t vbr = sign_extend32(state->vbr); + int64_t op1 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(op1 - 4); + write_memory32(map, address, vbr); + op1 = address; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* STS MACH,Rn */ +void sts__transfer_from_mach(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t mach = sign_extend32(state->mach); + int64_t op1 = mach; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* STS MACL,Rn */ +void sts__transfer_from_macl(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t macl = sign_extend32(state->macl); + int64_t op1 = macl; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* STS PR,Rn */ +void sts__transfer_from_pr(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t pr = sign_extend32(state->pr[1]); + int64_t op1 = pr; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* STS.L MACH,@-Rn */ +void sts_l__store_from_mach(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t mach = sign_extend32(state->mach); + int64_t op1 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(op1 - 4); + write_memory32(map, address, mach); + op1 = address; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* STS.L MACL,@-Rn */ +void sts_l__store_from_macl(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t macl = sign_extend32(state->macl); + int64_t op1 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(op1 - 4); + write_memory32(map, address, macl); + op1 = address; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* STS.L PR,@-Rn */ +void sts_l__store_from_pr(struct architectural_state * state, struct memory_map * map, const uint32_t n) +{ + int64_t pr = sign_extend32(state->pr[1]); + int64_t op1 = sign_extend32(REG(state, n)); + int64_t address = zero_extend32(op1 - 4); + write_memory32(map, address, pr); + op1 = address; + REG(state, n) = _register(op1); + + state->is_delay_slot = false; +} + + +/* TRAPA #imm */ +void trapa__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i) +{ + int64_t imm = zero_extend8(i); + if (is_delay_slot(state)) return ILLSLOT(state); + return TRAP(state, imm); + + state->is_delay_slot = false; +} + diff --git a/c/impl.h b/c/impl.h new file mode 100644 index 0000000..c7f16f2 --- /dev/null +++ b/c/impl.h @@ -0,0 +1,293 @@ +#pragma once + +#include "state.h" +#include "memory_map.h" + +/* MOV #imm,Rn */ +void mov__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i, const uint32_t n); +/* MOV.W @(disp,PC),Rn */ +void mov_w__pc_relative_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n); +/* MOV.L @(disp,PC),Rn */ +void mov_l__pc_relative_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n); +/* MOV Rm,Rn */ +void mov__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.B Rm,@Rn */ +void mov_b__store_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.W Rm,@Rn */ +void mov_w__store_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.L Rm,@Rn */ +void mov_l__store_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.B @Rm,Rn */ +void mov_b__load_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.W @Rm,Rn */ +void mov_w__load_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.L @Rm,Rn */ +void mov_l__load_register_direct_data_transfer(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.B Rm,@-Rn */ +void mov_b__store_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.W Rm,@-Rn */ +void mov_w__store_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.L Rm,@-Rn */ +void mov_l__store_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.B @Rm+,Rn */ +void mov_b__load_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.W @Rm+,Rn */ +void mov_w__load_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.L @Rm+,Rn */ +void mov_l__load_direct_data_transfer_from_register(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.B R0,@(disp,Rn) */ +void mov_b__store_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n); +/* MOV.W R0,@(disp,Rn) */ +void mov_w__store_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t n); +/* MOV.L Rm,@(disp,Rn) */ +void mov_l__store_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t d, const uint32_t n); +/* MOV.B @(disp,Rm),R0 */ +void mov_b__load_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t m); +/* MOV.W @(disp,Rm),R0 */ +void mov_w__load_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t m); +/* MOV.L @(disp,Rm),Rn */ +void mov_l__load_register_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d, const uint32_t m, const uint32_t n); +/* MOV.B Rm,@(R0,Rn) */ +void mov_b__store_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.W Rm,@(R0,Rn) */ +void mov_w__store_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.L Rm,@(R0,Rn) */ +void mov_l__store_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.B @(R0,Rm),Rn */ +void mov_b__load_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.W @(R0,Rm),Rn */ +void mov_w__load_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.L @(R0,Rm),Rn */ +void mov_l__load_indexed_register_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MOV.B R0,@(disp,GBR) */ +void mov_b__store_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d); +/* MOV.W R0,@(disp,GBR) */ +void mov_w__store_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d); +/* MOV.L R0,@(disp,GBR) */ +void mov_l__store_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d); +/* MOV.B @(disp,GBR),R0 */ +void mov_b__load_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d); +/* MOV.W @(disp,GBR),R0 */ +void mov_w__load_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d); +/* MOV.L @(disp,GBR),R0 */ +void mov_l__load_gbr_indirect_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d); +/* MOVA @(disp,PC),R0 */ +void mova__pc_relative_with_displacement(struct architectural_state * state, struct memory_map * map, const uint32_t d); +/* MOVT Rn */ +void movt__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* SWAP.B Rm,Rn */ +void swap_b__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* SWAP.W Rm,Rn */ +void swap_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* XTRCT Rm,Rn */ +void xtrct__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* ADD Rm,Rn */ +void add__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* ADD #imm,Rn */ +void add__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i, const uint32_t n); +/* ADDC Rm,Rn */ +void addc__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* ADDV Rm,Rn */ +void addv__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* CMP/EQ #imm,R0 */ +void cmp_eq__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i); +/* CMP/EQ Rm,Rn */ +void cmp_eq__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* CMP/HS Rm,Rn */ +void cmp_hs__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* CMP/GE Rm,Rn */ +void cmp_ge__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* CMP/HI Rm,Rn */ +void cmp_hi__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* CMP/GT Rm,Rn */ +void cmp_gt__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* CMP/PZ Rn */ +void cmp_pz__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* CMP/PL Rn */ +void cmp_pl__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* CMP/STR Rm,Rn */ +void cmp_str__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* DIV1 Rm,Rn */ +void div1__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* DIV0S Rm,Rn */ +void div0s__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* DIV0U */ +void div0u__no_operand(struct architectural_state * state, struct memory_map * map); +/* DMULS.L Rm,Rn */ +void dmuls_l__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* DMULU.L Rm,Rn */ +void dmulu_l__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* DT Rn */ +void dt__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* EXTS.B Rm,Rn */ +void exts_b__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* EXTS.W Rm,Rn */ +void exts_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* EXTU.B Rm,Rn */ +void extu_b__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* EXTU.W Rm,Rn */ +void extu_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MAC.L @Rm+,@Rn+ */ +void mac_l__multiply_and_accumulate_operation(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MAC.W @Rm+,@Rn+ */ +void mac_w__multiply_and_accumulate_operation(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MUL.L Rm,Rn */ +void mul_l__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MULS.W Rm,Rn */ +void muls_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* MULU.W Rm,Rn */ +void mulu_w__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* NEG Rm,Rn */ +void neg__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* NEGC Rm,Rn */ +void negc__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* SUB Rm,Rn */ +void sub__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* SUBC Rm,Rn */ +void subc__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* SUBV Rm,Rn */ +void subv__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* AND Rm,Rn */ +void and__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* AND #imm,R0 */ +void and__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i); +/* AND.B #imm,@(R0,GBR) */ +void and_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i); +/* NOT Rm,Rn */ +void not__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* OR Rm,Rn */ +void or__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* OR #imm,R0 */ +void or__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i); +/* OR.B #imm,@(R0,GBR) */ +void or_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i); +/* TAS.B @Rn */ +void tas_b__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* TST Rm,Rn */ +void tst__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* TST #imm,R0 */ +void tst__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i); +/* TST.B #imm,@(R0,GBR) */ +void tst_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i); +/* XOR Rm,Rn */ +void xor__source_and_destination_operands(struct architectural_state * state, struct memory_map * map, const uint32_t m, const uint32_t n); +/* XOR #imm,R0 */ +void xor__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i); +/* XOR.B #imm,@(R0,GBR) */ +void xor_b__store_indexed_gbr_indirect(struct architectural_state * state, struct memory_map * map, const uint32_t i); +/* ROTL Rn */ +void rotl__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* ROTR Rn */ +void rotr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* ROTCL Rn */ +void rotcl__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* ROTCR Rn */ +void rotcr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* SHAL Rn */ +void shal__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* SHAR Rn */ +void shar__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* SHLL Rn */ +void shll__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* SHLR Rn */ +void shlr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* SHLL2 Rn */ +void shll2__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* SHLR2 Rn */ +void shlr2__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* SHLL8 Rn */ +void shll8__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* SHLR8 Rn */ +void shlr8__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* SHLL16 Rn */ +void shll16__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* SHLR16 Rn */ +void shlr16__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* BF label */ +void bf__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d); +/* BF/S label */ +void bf_s__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d); +/* BT label */ +void bt__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d); +/* BT/S label */ +void bt_s__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d); +/* BRA label */ +void bra__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d); +/* BRAF Rn */ +void braf__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* BSR label */ +void bsr__pc_relative(struct architectural_state * state, struct memory_map * map, const uint32_t d); +/* BSRF Rn */ +void bsrf__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* JMP @Rn */ +void jmp__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* JSR @Rn */ +void jsr__destination_operand_only(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* RTS */ +void rts__no_operand(struct architectural_state * state, struct memory_map * map); +/* CLRMAC */ +void clrmac__no_operand(struct architectural_state * state, struct memory_map * map); +/* CLRS */ +void clrs__no_operand(struct architectural_state * state, struct memory_map * map); +/* CLRT */ +void clrt__no_operand(struct architectural_state * state, struct memory_map * map); +/* LDC Rm,SR */ +void ldc__transfer_to_sr(struct architectural_state * state, struct memory_map * map, const uint32_t m); +/* LDC Rm,GBR */ +void ldc__transfer_to_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t m); +/* LDC Rm,VBR */ +void ldc__transfer_to_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t m); +/* LDC.L @Rm+,SR */ +void ldc_l__load_to_sr(struct architectural_state * state, struct memory_map * map, const uint32_t m); +/* LDC.L @Rm+,GBR */ +void ldc_l__load_to_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t m); +/* LDC.L @Rm+,VBR */ +void ldc_l__load_to_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t m); +/* LDS Rm,MACH */ +void lds__transfer_to_mach(struct architectural_state * state, struct memory_map * map, const uint32_t m); +/* LDS Rm,MACL */ +void lds__transfer_to_macl(struct architectural_state * state, struct memory_map * map, const uint32_t m); +/* LDS Rm,PR */ +void lds__transfer_to_pr(struct architectural_state * state, struct memory_map * map, const uint32_t m); +/* LDS.L @Rm+,MACH */ +void lds_l__load_to_mach(struct architectural_state * state, struct memory_map * map, const uint32_t m); +/* LDS.L @Rm+,MACL */ +void lds_l__load_to_macl(struct architectural_state * state, struct memory_map * map, const uint32_t m); +/* LDS.L @Rm+,PR */ +void lds_l__load_to_pr(struct architectural_state * state, struct memory_map * map, const uint32_t m); +/* NOP */ +void nop__no_operand(struct architectural_state * state, struct memory_map * map); +/* RTE */ +void rte__no_operand(struct architectural_state * state, struct memory_map * map); +/* SETS */ +void sets__no_operand(struct architectural_state * state, struct memory_map * map); +/* SETT */ +void sett__no_operand(struct architectural_state * state, struct memory_map * map); +/* SLEEP */ +void sleep__no_operand(struct architectural_state * state, struct memory_map * map); +/* STC SR,Rn */ +void stc__transfer_from_sr(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* STC GBR,Rn */ +void stc__transfer_from_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* STC VBR,Rn */ +void stc__transfer_from_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* STC.L SR,@-Rn */ +void stc_l__store_from_sr(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* STC.L GBR,@-Rn */ +void stc_l__store_from_gbr(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* STC.L VBR,@-Rn */ +void stc_l__store_from_vbr(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* STS MACH,Rn */ +void sts__transfer_from_mach(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* STS MACL,Rn */ +void sts__transfer_from_macl(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* STS PR,Rn */ +void sts__transfer_from_pr(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* STS.L MACH,@-Rn */ +void sts_l__store_from_mach(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* STS.L MACL,@-Rn */ +void sts_l__store_from_macl(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* STS.L PR,@-Rn */ +void sts_l__store_from_pr(struct architectural_state * state, struct memory_map * map, const uint32_t n); +/* TRAPA #imm */ +void trapa__immediate(struct architectural_state * state, struct memory_map * map, const uint32_t i); \ No newline at end of file diff --git a/c/main.c b/c/main.c new file mode 100644 index 0000000..ba87e78 --- /dev/null +++ b/c/main.c @@ -0,0 +1,60 @@ +#include +#include + +#include "memory_map.h" +#include "ram.h" +#include "state.h" +#include "exception.h" +#include "execute.h" +#include "decode_print.h" + +int main(int argc, char *argv[]) +{ + assert(argc > 1); + FILE * f = fopen(argv[1], "r"); + if (f == NULL) { + fprintf(stderr, "fopen %s\n", argv[1]); + return -1; + } + int ret = fseek(f, 0, SEEK_END); + assert(ret == 0); + uint32_t read_size = ftell(f); + ret = fseek(f, 0, SEEK_SET); + assert(ret == 0); + + uint32_t rom_size = ((read_size + 3) & ~3); + uint32_t buf[rom_size / 4]; + uint32_t ret_size = fread(buf, 1, read_size, f); + assert(ret_size == read_size); + + ret = fclose(f); + assert(ret == 0); + + struct memory_map map = { .length = 0 }; + map.entry[map.length++] = (struct memory_map_entry){ + .start = 0x0000'0000, + .size = rom_size, + .mem = (void *)buf, + .access = ram__memory_access, + }; + + struct architectural_state state = { 0 }; + POWERON(&state); + + char const * instruction_buf; + char operand_buf[128]; + + while (1) { + uint32_t instruction_code = fetch(&state, &map); + decode_and_print_instruction(&state, &map, instruction_code, &instruction_buf, operand_buf, 128); + printf("pc %08x %-7s %-20s\n", state.pc[0], instruction_buf, operand_buf); + + if (physical_address(state.pc[0]) == 0x38) + break; + + step(&state, &map); + } + + printf("part1 %d\n", state.gbr); + printf("part2 %d %d\n", state.mach, state.macl); +} diff --git a/c/memory_map.h b/c/memory_map.h new file mode 100644 index 0000000..c3d9bec --- /dev/null +++ b/c/memory_map.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include + +// 0x1F000000 - 0x1FFFFFFF +// 0xFF000000 - 0xFFFFFFFF + +struct memory_access { + uint8_t (* read_memory8 )(void * mem, uint32_t address); + uint16_t (* read_memory16 )(void * mem, uint32_t address); + uint32_t (* read_memory32 )(void * mem, uint32_t address); + void (* write_memory8 )(void * mem, uint32_t address, uint8_t value); + void (* write_memory16)(void * mem, uint32_t address, uint16_t value); + void (* write_memory32)(void * mem, uint32_t address, uint32_t value); +}; + +struct memory_map_entry { + uint32_t start; + uint32_t size; + void * mem; + struct memory_access access; +}; + +#define memory_map_max_length 16 + +struct memory_map { + uint32_t length; + struct memory_map_entry entry[memory_map_max_length]; +}; + +static inline uint32_t physical_address(uint32_t address) +{ + if (address < 0xe0000000) + return address & (~(0b111 << 29)); // P0 P1 P2 P3 region + else + return address; // P4 region +} + +static inline struct memory_map_entry * find_entry(struct memory_map * map, uint32_t address) +{ + uint32_t physical = physical_address(address); + assert(map->length <= memory_map_max_length); + for (int i = 0; i < map->length; i++) { + uint32_t entry_start = map->entry[i].start; + uint32_t entry_end = map->entry[i].start + map->entry[i].size; + if (physical >= entry_start && physical < entry_end) + return &map->entry[i]; + } + return (struct memory_map_entry *)0; +} + +#undef memory_map_max_length diff --git a/c/operations.h b/c/operations.h new file mode 100644 index 0000000..6c9d181 --- /dev/null +++ b/c/operations.h @@ -0,0 +1,131 @@ +#pragma once +#include +#include + +// +// sign_extend +// + +static inline int32_t sign_extend(uint32_t x, uint32_t b) +{ + const uint32_t m = 1UL << (b - 1); + x = x & ((1UL << b) - 1); + const int32_t r = (x ^ m) - m; + return r; +} + +static inline int64_t sign_extend64(uint64_t x) +{ + return (int64_t)x; +} + +static inline int32_t sign_extend32(uint32_t x) +{ + return (int32_t)x; +} + +static inline int32_t sign_extend16(uint32_t x) +{ + return sign_extend(x, 16); +} + +static inline int32_t sign_extend12(uint32_t x) +{ + return sign_extend(x, 12); +} + +static inline int32_t sign_extend8(uint32_t x) +{ + return sign_extend(x, 8); +} + +// +// zero_extend +// + +static inline uint32_t zero_extend(uint32_t x, uint32_t b) +{ + x = x & ((1ULL << b) - 1); + return x; +} + +static inline uint32_t zero_extend32(uint32_t x) +{ + return (uint32_t)x; +} + +static inline uint32_t zero_extend16(uint32_t x) +{ + return zero_extend(x, 16); +} + +static inline uint32_t zero_extend8(uint32_t x) +{ + return zero_extend(x, 8); +} + +static inline uint32_t zero_extend4(uint32_t x) +{ + return zero_extend(x, 4); +} + +static inline uint32_t zero_extend1(uint32_t x) +{ + return zero_extend(x, 1); +} + +// +// signed_saturate +// + +static inline uint64_t signed_saturate(uint64_t x, uint32_t b) +{ + const int64_t upper = (1LL << (b - 1)) - 1; + const int64_t lower = -(1LL << (b - 1)); + static_assert(-(1LL << (48 - 1)) < 0); + + if (x > upper) + return upper; + else if (x < lower) + return lower; + else + return x; +} + +static inline uint64_t signed_saturate48(uint64_t x) +{ + return signed_saturate(x, 48); +} + +static inline uint32_t signed_saturate32(uint32_t x) +{ + return signed_saturate(x, 32); +} + +// +// "convenience" functions +// + +static inline uint32_t _register(uint32_t x) +{ + return zero_extend(x, 32); +} + +static inline uint32_t bit(uint32_t x) +{ + return zero_extend(x, 1); +} + +// +// "operations" +// + +static inline int32_t unary_int(int64_t x) +{ + return x != 0; +} + +static inline int64_t bit_extract(int64_t n, int64_t b, int64_t m) +{ + return (n >> b) & ((1 << m) - 1); +} diff --git a/c/ram.c b/c/ram.c new file mode 100644 index 0000000..32b3251 --- /dev/null +++ b/c/ram.c @@ -0,0 +1,64 @@ +#include "ram.h" + +#define TARGET_ENDIAN __ORDER_LITTLE_ENDIAN__ + +uint8_t ram__read_memory8(void * data, uint32_t address) +{ + uint8_t * mem = (uint8_t *)data; + return mem[address]; +} + +uint16_t ram__read_memory16(void * data, uint32_t address) +{ + uint16_t * mem = (uint16_t *)data; + uint16_t value = mem[address >> 1]; +#if __BYTE_ORDER__ == TARGET_ENDIAN + return value; +#else + return __builtin_bswap16(value); +#endif +} + +uint32_t ram__read_memory32(void * data, uint32_t address) +{ + uint32_t * mem = (uint32_t *)data; + uint32_t value = mem[address >> 2]; +#if __BYTE_ORDER__ == TARGET_ENDIAN + return value; +#else + return __builtin_bswap32(value); +#endif +} + +void ram__write_memory8(void * data, uint32_t address, uint8_t value) +{ + uint8_t * mem = (uint8_t *)data; + mem[address] = value; +} + +void ram__write_memory16(void * data, uint32_t address, uint16_t value) +{ + uint16_t * mem = (uint16_t *)data; +#if __BYTE_ORDER__ != TARGET_ENDIAN + value = __builtin_bswap16(value); +#endif + mem[address >> 1] = value; +} + +void ram__write_memory32(void * data, uint32_t address, uint32_t value) +{ + uint32_t * mem = (uint32_t *)data; +#if __BYTE_ORDER__ != TARGET_ENDIAN + value = __builtin_bswap32(value); +#endif + mem[address >> 2] = value; +} + +struct memory_access ram__memory_access = { + .read_memory8 = &ram__read_memory8, + .read_memory16 = &ram__read_memory16, + .read_memory32 = &ram__read_memory32, + .write_memory8 = &ram__write_memory8, + .write_memory16 = &ram__write_memory16, + .write_memory32 = &ram__write_memory32 +}; diff --git a/c/ram.h b/c/ram.h new file mode 100644 index 0000000..4658884 --- /dev/null +++ b/c/ram.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +#include "memory_map.h" + +uint8_t ram__read_memory8(void * data, uint32_t address); +uint16_t ram__read_memory16(void * data, uint32_t address); +uint32_t ram__read_memory32(void * data, uint32_t address); +void ram__write_memory8(void * data, uint32_t address, uint8_t value); +void ram__write_memory16(void * data, uint32_t address, uint16_t value); +void ram__write_memory32(void * data, uint32_t address, uint32_t value); + +extern struct memory_access ram__memory_access; diff --git a/c/sr_bits.h b/c/sr_bits.h new file mode 100644 index 0000000..8ed6eef --- /dev/null +++ b/c/sr_bits.h @@ -0,0 +1,39 @@ +#pragma once + +#include + +struct sr_bits { +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + uint32_t t : 1; + uint32_t s : 1; + uint32_t _res0 : 2; + uint32_t imask : 4; + uint32_t _res1 : 3; + uint32_t q : 1; + uint32_t m : 1; + uint32_t _res2 : 5; + uint32_t fd : 1; + uint32_t _res3 : 12; + uint32_t bl : 1; + uint32_t rb : 1; + uint32_t md : 1; + uint32_t _res4 : 1; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint32_t _res4 : 1; + uint32_t md : 1; + uint32_t rb : 1; + uint32_t bl : 1; + uint32_t _res3 : 12; + uint32_t fd : 1; + uint32_t _res2 : 5; + uint32_t m : 1; + uint32_t q : 1; + uint32_t _res1 : 3; + uint32_t imask : 4; + uint32_t _res0 : 2; + uint32_t s : 1; + uint32_t t : 1; +#else +# error "unsupported endianness" +#endif +}; diff --git a/c/state.h b/c/state.h new file mode 100644 index 0000000..4fab23f --- /dev/null +++ b/c/state.h @@ -0,0 +1,66 @@ +#pragma once + +#include +#include +#include + +#include "sr_bits.h" + +#define SR__RB (1 << 29) +#define SR__MD (1 << 30) + +#define BANK(sr) (((sr) & SR__RB) && ((sr) & SR__MD)) +#define REGN(sr, x) (((x) & (1 << 3)) ? (x) : ((x) ^ (BANK(sr) << 4))) +#define REG(state, x) ((state)->general_register[REGN((state)->sr.value, (x))]) +#define REGN_BANK(sr, x) (((x) & (1 << 3)) ? (x) : ((x) ^ ((!BANK(sr)) << 4))) +#define REG_BANK(state, x) ((state)->general_register[REGN_BANK((state)->sr.value, (x))]) + +static_assert(REGN(0, 0 ) == 0 ); +static_assert(REGN(0, 7 ) == 7 ); +static_assert(REGN(0, 8 ) == 8 ); +static_assert(REGN(0, 15) == 15); +static_assert(REGN(SR__MD | SR__RB, 0 ) == 16); +static_assert(REGN(SR__MD | SR__RB, 7 ) == 23); +static_assert(REGN(SR__MD | SR__RB, 8 ) == 8 ); +static_assert(REGN(SR__MD | SR__RB, 15) == 15); + +static_assert(REGN_BANK(0, 0 ) == 16); +static_assert(REGN_BANK(0, 7 ) == 23); +static_assert(REGN_BANK(0, 8 ) == 8 ); +static_assert(REGN_BANK(0, 15) == 15); +static_assert(REGN_BANK(SR__MD | SR__RB, 0 ) == 0 ); +static_assert(REGN_BANK(SR__MD | SR__RB, 7 ) == 7 ); +static_assert(REGN_BANK(SR__MD | SR__RB, 8 ) == 8 ); +static_assert(REGN_BANK(SR__MD | SR__RB, 15) == 15); + +struct architectural_state { + uint32_t general_register[24]; + + // system_register + uint32_t mach; + uint32_t macl; + uint32_t pr[3]; + uint32_t pc[3]; + uint32_t fpscr; + uint32_t fpul; + // + + // control_register + union { + struct sr_bits bits; + uint32_t value; + } sr; + uint32_t ssr; + uint32_t spc; + uint32_t gbr; + uint32_t vbr; + uint32_t sgr; + uint32_t dbr; + // + + uint32_t floating_point_register[32]; + + bool is_delay_slot; +}; + +// diff --git a/c/state_helpers.h b/c/state_helpers.h new file mode 100644 index 0000000..ab0958d --- /dev/null +++ b/c/state_helpers.h @@ -0,0 +1,72 @@ +#pragma once + +#include +#include + +#include "state.h" +#include "memory_map.h" + +static inline bool is_delay_slot(struct architectural_state * state) +{ + return state->is_delay_slot; +} + +static inline void sleep(struct architectural_state * state) +{ +} + +static inline void ocbp(struct architectural_state * state, uint32_t address) +{ +} + +static inline uint8_t read_memory8(struct memory_map * map, uint32_t address) +{ + struct memory_map_entry * entry = find_entry(map, address); + if (entry == NULL) return 0; + uint32_t relative_address = physical_address(address) - entry->start; + return entry->access.read_memory8(entry->mem, relative_address); +} + +static inline uint16_t read_memory16(struct memory_map * map, uint32_t address) +{ + assert((address & 0b1) == 0); + struct memory_map_entry * entry = find_entry(map, address); + if (entry == NULL) return 0; + uint32_t relative_address = physical_address(address) - entry->start; + return entry->access.read_memory16(entry->mem, relative_address); +} + +static inline uint32_t read_memory32(struct memory_map * map, uint32_t address) +{ + assert((address & 0b11) == 0); + struct memory_map_entry * entry = find_entry(map, address); + if (entry == NULL) return 0; + uint32_t relative_address = physical_address(address) - entry->start; + return entry->access.read_memory32(entry->mem, relative_address); +} + +static inline void write_memory8(struct memory_map * map, uint32_t address, uint8_t value) +{ + struct memory_map_entry * entry = find_entry(map, address); + if (entry == NULL) return; + uint32_t relative_address = physical_address(address) - entry->start; + entry->access.write_memory8(entry->mem, relative_address, value); +} + +static inline void write_memory16(struct memory_map * map, uint32_t address, uint16_t value) +{ + assert((address & 0b1) == 0); + struct memory_map_entry * entry = find_entry(map, address); + if (entry == NULL) return; + uint32_t relative_address = physical_address(address) - entry->start; + entry->access.write_memory16(entry->mem, relative_address, value); +} + +static inline void write_memory32(struct memory_map * map, uint32_t address, uint32_t value) +{ + assert((address & 0b11) == 0); + struct memory_map_entry * entry = find_entry(map, address); + if (entry == NULL) return; + uint32_t relative_address = physical_address(address) - entry->start; + entry->access.write_memory32(entry->mem, relative_address, value); +} diff --git a/exception.txt b/exception.txt new file mode 100644 index 0000000..d545685 --- /dev/null +++ b/exception.txt @@ -0,0 +1,23 @@ +POWERON 1 1 0xA0000000 - 0x000 +MANRESET 1 2 0xA0000000 - 0x020 +HUDIRESET 1 1 0xA0000000 - 0x000 +ITLBMULTIHIT 1 3 0xA0000000 - 0x140 +OTLBMULTIHIT 1 4 0xA0000000 - 0x140 +UBRKBEFORE 2 0 (VBR/DBR) 0x100/- 0x1E0 +IADDERR 2 1 (VBR) 0x100 0x0E0 +ITLBMISS 2 2 (VBR) 0x400 0x040 +EXECPROT 2 3 (VBR) 0x100 0x0A0 +RESINST 2 4 (VBR) 0x100 0x180 +ILLSLOT 2 4 (VBR) 0x100 0x1A0 +FPUDIS 2 4 (VBR) 0x100 0x800 +SLOTFPUDIS 2 4 (lVBR) 0x100 0x820 +RADDERR 2 5 (VBR) 0x100 0x0E0 +WADDERR 2 5 (VBR) 0x100 0x100 +RTLBMISS 2 6 (VBR) 0x400 0x040 +WTLBMISS 2 6 (VBR) 0x400 0x060 +READPROT 2 7 (VBR) 0x100 0x0A0 +WRITEPROT 2 7 (VBR) 0x100 0x0C0 +FPUEXC 2 8 (VBR) 0x100 0x120 +FIRSTWRITE 2 9 (VBR) 0x100 0x080 +TRAP 2 4 (VBR) 0x100 0x160 +UBRKAFTER 2 10 (VBR/DBR) 0x100/- 0x1E0 diff --git a/generate.py b/generate.py new file mode 100644 index 0000000..2871189 --- /dev/null +++ b/generate.py @@ -0,0 +1,35 @@ +import io + +def should_autonewline(line): + return ( + "static_assert" not in line + and "extern" not in line + and (len(line.split()) < 2 or line.split()[1] != '=') # hacky; meh + ) + +def _render(out, lines): + indent = " " + level = 0 + for l in lines: + if l and (l[0] == "}" or l[0] == ")"): + level -= 2 + assert level >= 0, out.getvalue() + + if len(l) == 0: + out.write("\n") + else: + out.write(indent * level + l + "\n") + + if l and (l[-1] == "{" or l[-1] == "("): + level += 2 + + if level == 0 and l and l[-1] == ";": + if should_autonewline(l): + out.write("\n") + return out + +def renderer(): + out = io.StringIO() + def render(lines): + return _render(out, lines) + return render, out diff --git a/generate_decoder.py b/generate_decoder.py new file mode 100644 index 0000000..1ece323 --- /dev/null +++ b/generate_decoder.py @@ -0,0 +1,108 @@ +import sys + +from collections import defaultdict +from instruction_table import untabulate_instructions_sh4 +from instruction_table import untabulate_instructions_sh2 +from instruction_function_name import instruction_function_name +from generate import renderer + +def b16(n): + bits = [] + for i in range(16): + bits.append((n >> i) & 1) + return '0b' + ''.join(map(str, reversed(bits))) + +def format_variables(ins): + s = ins.operands + for i, name in enumerate(ins.variables): + if 'disp' in s and name == 'd': + name = 'disp' + if 'imm' in s and name == 'i': + name = 'imm' + if 'label' in s and name == 'd': + name = 'label' + s = s.replace(name, "%d") + return s + +def render_print(ins): + variable_args = ", " + ", ".join(ins.variables) if ins.variables else "" + operands = format_variables(ins) + if operands: + yield f'snprintf(operand_buf, size, "{operands}"{variable_args});' + else: + yield "operand_buf[0] = 0;"; + + yield f'*instruction_buf = "{ins.instruction}";' + +def render_execute(ins): + function_name = instruction_function_name(ins) + variable_args = ", " + ", ".join(ins.variables) if ins.variables else "" + yield f"{function_name}(state, map{variable_args});" + +def render_mask_switch(body_func, mask, instructions): + yield f"switch (code & {b16(mask)}) {{" + for ins in instructions: + yield f"case {b16(ins.code.code_bits)}: // {ins.instruction} {ins.operands}" + yield "{" + for variable in ins.variables: + operand_var = ins.code.operands[variable] + yield f"uint32_t {variable} = (code >> {operand_var.lsb}) & ((1 << {operand_var.length}) - 1);" + yield from body_func(ins) + yield f"return DECODE__DEFINED;" + yield "}" + yield "}" + +def render_mask_switches(body_func, by_mask): + yield "{" + for mask, instructions in by_mask.items(): + instructions = sorted(instructions, key=lambda i: i.code.code_bits) + yield from render_mask_switch(body_func, mask, instructions) + yield "return DECODE__UNDEFINED;" + yield "}" + +def render_decode_and_execute(by_mask): + yield f"enum decode_status decode_and_execute_instruction(struct architectural_state * state, struct memory_map * map, uint16_t code)" + yield from render_mask_switches(render_execute, by_mask) + +def render_decode_and_print(by_mask): + yield f"enum decode_status decode_and_print_instruction(struct architectural_state * state, struct memory_map * map, uint16_t code, char const ** instruction_buf, char * operand_buf, uint32_t size)" + yield from render_mask_switches(render_print, by_mask) + +def header_execute(): + yield '#include "decode_execute.h"' + yield '#include "impl.h"' + yield "" + +def header_print(): + yield '#include ' + yield "" + yield '#include "decode_print.h"' + yield "" + +def main(): + by_mask = defaultdict(list) + masks = [] + + sh2_instructions = set((ins.instruction, ins.operands) for ins in untabulate_instructions_sh2()) + for ins in untabulate_instructions_sh4(): + if (ins.instruction, ins.operands) not in sh2_instructions: + continue + + if ins.code.mask_bits not in masks: + masks.append(ins.code.mask_bits) + by_mask[ins.code.mask_bits].append(ins) + + render, out = renderer() + render(header_execute()) + render(render_decode_and_execute(by_mask)) + with open(sys.argv[1], 'w') as f: + f.write(out.getvalue()) + + render, out = renderer() + render(header_print()) + render(render_decode_and_print(by_mask)) + with open(sys.argv[2], 'w') as f: + f.write(out.getvalue()) + +if __name__ == "__main__": + main() diff --git a/generator.py b/generator.py new file mode 100644 index 0000000..acb4a9f --- /dev/null +++ b/generator.py @@ -0,0 +1,301 @@ +from dataclasses import dataclass + +from parser import Tree +from lexer import Identifier, Punctuator, IntegerConstant +import identifier_substitution + +@dataclass +class CTX: + identifiers: set + +def argument_list(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield ", " + yield from generate(ctx, tree.children[1]) + +def _is_new_temporary(ctx, tree): + return ( + type(tree) is Identifier + and tree.token not in identifier_substitution.mapping + and tree.token not in ctx.identifiers + ) + +def assignment(ctx, tree): + lhs = tree.children[0] + rhs = tree.children[1] + if _is_new_temporary(ctx, lhs): + ctx.identifiers.add(lhs.token) + yield "int64_t " + + yield from generate(ctx, tree.children[0]) + yield " = " + yield from generate(ctx, tree.children[1]) + +def bit_extraction(ctx, tree): + yield "bit_extract(" + yield from generate(ctx, tree.children[0]) + yield ", " + yield from generate(ctx, tree.children[1]) + yield ")" + +def bitwise_and(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " & " + yield from generate(ctx, tree.children[1]) + +def bitwise_or(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " | " + yield from generate(ctx, tree.children[1]) + +def bitwise_xor(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " ^ " + yield from generate(ctx, tree.children[1]) + +def block_item_list(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield from generate(ctx, tree.children[1]) + +def compound_statement(ctx, tree): + yield "\n{\n" + if tree.children: + yield from generate(ctx, tree.children[0]) + yield "}\n" + +def expression_statement(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield ";\n" + +def for_expression(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield ", " + if len(tree.children) > 1: + yield from generate(ctx, tree.children[1]) + else: + yield "1" + +def helper_arguments(tree): + if type(tree.children[0]) is not Identifier: + return + name = tree.children[0].token + mapping = { + "IsDelaySlot": "state", + "SLEEP": "state", + "OCBP" : "state", + "WriteMemory8" : "map", + "WriteMemory16": "map", + "WriteMemory32": "map", + "ReadMemory8" : "map", + "ReadMemory16" : "map", + "ReadMemory32" : "map", + } + return mapping.get(name, None) + +def function_call(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield "(" + if len(tree.children) >= 2: + if helper_arguments(tree): + yield helper_arguments(tree) + yield ", " + yield from generate(ctx, tree.children[1]) + else: + if helper_arguments(tree): + yield helper_arguments(tree) + yield ")" + +def grouping(ctx, tree): + yield "(" + yield from generate(ctx, tree.children[0]) + yield ")" + +def _if(ctx, tree): + yield "if (" + yield from generate(ctx, tree.children[0]) + yield ") " + yield from generate(ctx, tree.children[1]) + +def if_else(ctx, tree): + yield "if (" + yield from generate(ctx, tree.children[0]) + yield ") " + yield from generate(ctx, tree.children[1]) + yield "else " + yield from generate(ctx, tree.children[2]) + +def logical_and(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " && " + yield from generate(ctx, tree.children[1]) + +def logical_or(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " || " + yield from generate(ctx, tree.children[1]) + +def member(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield "." + yield from generate(ctx, tree.children[1]) + +def subscript(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield "[" + yield from generate(ctx, tree.children[1]) + yield "]" + +def throw(ctx, tree): + yield "return " + yield from generate(ctx, tree.children[0]) + yield "(state);\n" + +def throw_arg(ctx, tree): + yield "return " + yield from generate(ctx, tree.children[0]) + yield "(state, " + yield from generate(ctx, tree.children[1]) + yield ");\n" + +def unary_complement(ctx, tree): + yield from "~" + yield from generate(ctx, tree.children[0]) + +def unary_int(ctx, tree): + yield "unary_int(" + yield from generate(ctx, tree.children[0]) + yield ")" + +def unary_negation(ctx, tree): + yield "-" + yield from generate(ctx, tree.children[0]) + +def unary_not(ctx, tree): + yield "!" + yield from generate(ctx, tree.children[0]) + +# +def addition(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " + " + yield from generate(ctx, tree.children[1]) + +def division(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " / " + yield from generate(ctx, tree.children[1]) + +def equality_equal(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " == " + yield from generate(ctx, tree.children[1]) + +def equality_not_equal(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " != " + yield from generate(ctx, tree.children[1]) + +def greater_than(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " > " + yield from generate(ctx, tree.children[1]) + +def greater_than_equal(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " >= " + yield from generate(ctx, tree.children[1]) + +def left_shift(ctx, tree): + yield from generate(ctx, tree.children[0]) + # hack for left shift by LHS constant + if type(tree.children[0]) is IntegerConstant: + yield "LL" + yield " << " + yield from generate(ctx, tree.children[1]) + +def less_than(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " < " + yield from generate(ctx, tree.children[1]) + +def less_than_equal(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " <= " + yield from generate(ctx, tree.children[1]) + +def multiplication(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " * " + yield from generate(ctx, tree.children[1]) + +def right_shift(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " >> " + yield from generate(ctx, tree.children[1]) + +def subtraction(ctx, tree): + yield from generate(ctx, tree.children[0]) + yield " - " + yield from generate(ctx, tree.children[1]) + +def identifier(ctx, token): + if token.token in identifier_substitution.mapping: + yield identifier_substitution.mapping[token.token] + else: + assert token.token.lower() == token.token, token + if token.token not in {'m', 'n', 'i', 'd'}: + assert token.token in ctx.identifiers, (token, ctx.identifiers) + yield token.token + +def constant(ctx, elem): + yield elem.token.lower() + +def generate(ctx, elem): + mapping = { + "argument_list": argument_list, + "assignment": assignment, + "bit_extraction": bit_extraction, + "bitwise_and": bitwise_and, + "bitwise_or": bitwise_or, + "bitwise_xor": bitwise_xor, + "block_item_list": block_item_list, + "compound_statement": compound_statement, + "expression_statement": expression_statement, + "for_expression": for_expression, + "function_call": function_call, + "grouping": grouping, + "if": _if, + "if_else": if_else, + "logical_and": logical_and, + "logical_or": logical_or, + "member": member, + "subscript": subscript, + "throw": throw, + "throw_arg": throw_arg, + "unary_complement": unary_complement, + "unary_int": unary_int, + "unary_negation": unary_negation, + "unary_not": unary_not, + # + "addition": addition, + "division": division, + "equality_equal": equality_equal, + "equality_not_equal": equality_not_equal, + "greater_than": greater_than, + "greater_than_equal": greater_than_equal, + "left_shift": left_shift, + "less_than": less_than, + "less_than_equal": less_than_equal, + "multiplication": multiplication, + "right_shift": right_shift, + "subtraction": subtraction, + # + } + if type(elem) is Tree: + yield from mapping[elem.operation](ctx, elem) + elif type(elem) is Identifier: + yield from identifier(ctx, elem) + elif type(elem) is IntegerConstant: + yield from constant(ctx, elem) + else: + assert False, type(elem) diff --git a/identifier_substitution.py b/identifier_substitution.py new file mode 100644 index 0000000..677c5d9 --- /dev/null +++ b/identifier_substitution.py @@ -0,0 +1,107 @@ +mapping = { + "ReadMemory8" : "read_memory8", + "ReadMemory16" : "read_memory16", + "ReadMemory32" : "read_memory32", + "WriteMemory8" : "write_memory8", + "WriteMemory16" : "write_memory16", + "WriteMemory32" : "write_memory32", + "ZeroExtend1" : "zero_extend1", + "ZeroExtend4" : "zero_extend4", + "ZeroExtend5" : "zero_extend5", + "ZeroExtend8" : "zero_extend8", + "ZeroExtend16" : "zero_extend16", + "ZeroExtend32" : "zero_extend32", + "SignedSaturate32": "signed_saturate32", + "SignedSaturate48": "signed_saturate48", + "SignExtend8" : "sign_extend8", + "SignExtend12" : "sign_extend12", + "SignExtend16" : "sign_extend16", + "SignExtend32" : "sign_extend32", + "Register" : "_register", + "Bit" : "bit", + + "MACH" : "state->mach", + "MACL" : "state->macl", + "PR" : "state->pr[0]", + "PR’" : "state->pr[1]", + "PR’’" : "state->pr[2]", + "PC" : "state->pc[0]", + "PC’" : "state->pc[1]", + "PC’’" : "state->pc[2]", + "FPSCR" : "state->fpscr", + "FPUL" : "state->fpul", + + "SR" : "state->sr.value", + "SSR" : "state->ssr", + "SPC" : "state->spc", + "GBR" : "state->gbr", + "VBR" : "state->vbr", + "SGR" : "state->sgr", + "DBR" : "state->dbr", + + "R0" : "REG(state, 0)", + "Rm" : "REG(state, m)", + "Rm_BANK" : "REG_BANK(state, m)", + "Rn" : "REG(state, n)", + "Rn_BANK" : "REG_BANK(state, n)", + + "T" : "state->sr.bits.t", + "S" : "state->sr.bits.s", + "Q" : "state->sr.bits.q", + "M" : "state->sr.bits.m", + "MD" : "state->sr.bits.md", + + "NOP" : "", + + "BREAK" : "BREAK", + "FIRSTWRITE" : "FIRSTWRITE", + "FPUDIS" : "FPUDIS", + "FPUEXC" : "FPUEXC", + "ILLSLOT" : "ILLSLOT", + "RADDERR" : "RADDERR", + "READPROT" : "READPROT", + "RESINST" : "RESINST", + "RTLBMISS" : "RTLBMISS", + "SLOTFPUDIS" : "SLOTFPUDIS", + "TRAP" : "TRAP", + "WADDERR" : "WADDERR", + "WRITEPROT" : "WRITEPROT", + "WTLBMISS" : "WTLBMISS", + + "IsDelaySlot" : "is_delay_slot", + "SLEEP" : "sleep", + "OCBP" : "ocbp", +} + +""" +ASID +VPN +PPN +SZ +SZ0 +SZ1 +SH +PR +WT +C +D +V + +AddressUnavailable +ALLOCO +DataAccessMiss +DirtyBit +FpuIsDisabled +IsDelaySlot +MMU +MMUCR +OCBI +OCBWB +PREF +PTEH +PTEL +ReadProhibited +URC +UTLB +WriteProhibited +""" diff --git a/lexer.py b/lexer.py new file mode 100644 index 0000000..d0ebd98 --- /dev/null +++ b/lexer.py @@ -0,0 +1,171 @@ +from dataclasses import dataclass +""" +token: + keyword + identifier + constant + punctuator +""" + +def is_nondigit(c): + return c in { + "_", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "’", + } + +def is_digit(c): + return c in { + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", + } + +def is_hexadecimal_digit(c): + return c in { + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", + "a", "b", "c", "d", "e", "f", + "A", "B", "C", "D", "E", "F", + } + +def is_punctuator(c): + return c in { + "[", "]", "(", ")", "{", "}", ".", + "+", "-", "~", "!", + "<<", ">>", + "<", ">", + "≤", "≥", + "≠", "=", + "∨", "∧", "⊕", + "×", "/", + "|", ";", ",", + "←", + } + +@dataclass +class Identifier: + line: int + token: str + +@dataclass +class IntegerConstant: + line: int + token: str + value: int + +@dataclass +class Punctuator: + line: int + token: str + +class Lexer: + def __init__(self, buf): + self.buf = buf + self.start = 0 + self.end = 0 + self.line = 0 + + def peek(self): + return self.buf[self.end] + + def match(self, c): + if self.buf[self.end] == c: + self.end += 1 + return True + else: + return False + + def slice(self, offset=0): + return self.buf[self.start:self.end+offset] + + def advance(self): + c = self.buf[self.end] + self.end += 1 + return c + + def advance_whitespace(self): + if self.match('\n'): + self.line += 1 + return True + elif self.match('\n'): + return True + elif self.match('\t'): + return True + elif self.match(' '): + return True + return False + + def identifier(self): + while True: + c = self.peek() + if is_digit(c) or is_nondigit(c): + self.advance() + else: + return Identifier(self.line, self.slice()) + + def hexadecimal_constant(self): + n = 0 + while True: + c = self.peek() + if is_hexadecimal_digit(c): + self.advance() + n *= 16 + i = ord(c) + if i >= ord('0') and i <= ord('9'): + n += i - ord('0') + elif i >= ord('a') and i <= ord('f'): + n += 10 + (i - ord('a')) + elif i >= ord('A') and i <= ord('F'): + n += 10 + (i - ord('A')) + else: + assert False + else: + return IntegerConstant(self.line, self.slice(), n) + + def decimal_constant(self): + n = 0 + while True: + c = self.peek() + if is_digit(c): + self.advance() + n *= 10 + i = ord(c) + if i >= ord('0') and i <= ord('9'): + n += i - ord('0') + else: + assert False + else: + return IntegerConstant(self.line, self.slice(), n) + + def punctuator(self): + while True: + if self.end < len(self.buf) and is_punctuator(self.slice(1)): + self.advance() + else: + assert is_punctuator(self.slice()) + return Punctuator(self.line, self.slice()) + + def integer_constant(self): + if self.buf[self.start] == '0' and (self.match('x') or self.match('X')): + return self.hexadecimal_constant() + else: + self.end -= 1 + return self.decimal_constant() + + def next_token(self): + while self.advance_whitespace(): + pass + + self.start = self.end + + c = self.advance() + + if is_nondigit(c): + return self.identifier() + elif is_digit(c): + return self.integer_constant() + elif is_punctuator(c): + return self.punctuator() + else: + raise ValueError(c) diff --git a/parser.py b/parser.py new file mode 100644 index 0000000..9b1f366 --- /dev/null +++ b/parser.py @@ -0,0 +1,459 @@ +from dataclasses import dataclass + +from lexer import Identifier, Punctuator, IntegerConstant + +@dataclass +class Tree: + operation: str + children: list + +class Parser: + def __init__(self, tokens): + self.pos = 0 + self.tokens = tokens + + def advance(self, offset=1): + token = self.tokens[self.pos] + self.pos += offset + return token + + def check(self, t, offset=0): + if self.pos + offset < len(self.tokens): + token = self.tokens[self.pos + offset] + return type(token) is t + else: + return False + + def check_punctuator(self, p, offset=0): + if self.pos + offset < len(self.tokens): + token = self.tokens[self.pos + offset] + return type(token) is Punctuator and token.token == p + else: + return False + + def check_identifier(self, p, offset=0): + if self.pos + offset < len(self.tokens): + token = self.tokens[self.pos + offset] + return type(token) is Identifier and token.token == p + else: + return False + + def match(self, t): + ret = self.check(t) + if ret: + self.advance() + return ret + + def match_punctuator(self, p): + ret = self.check_punctuator(p) + if ret: + self.advance() + return ret + + def match_identifier(self, p): + ret = self.check_identifier(p) + if ret: + self.advance() + return ret + + def previous(self): + return self.tokens[self.pos - 1] + + def primary_expression(self): + aa = ' '.join(t.token for t in self.tokens[self.pos:]) + if self.match(Identifier): + return self.previous() + elif self.match(IntegerConstant): + return self.previous() + elif self.match_punctuator("("): + expr = self.expression() + assert self.match_punctuator(")"), self.advance() + return Tree( + operation="grouping", + children=[expr] + ) + else: + assert False, self.advance() + + def argument_expression_list(self): + expr = self.assignment_expression() + while True: + if self.match_punctuator(","): + right = self.assignment_expression() + expr = Tree( + operation="argument_list", + children=[expr, right] + ) + else: + break + return expr + + def for_expression(self): + left = self.primary_expression() + assert self.match_identifier("FOR"), self.advance() + right = self.primary_expression() + return Tree( + operation="for_expression", + children=[left, right] + ) + + def postfix_expression(self): + expr = self.primary_expression() + while True: + if self.match_punctuator("["): + right = self.expression() + assert self.match_punctuator("]"), self.advance() + expr = Tree( + operation="subscript", + children=[expr, right] + ) + elif self.match_punctuator("("): + if self.match_punctuator(")"): + right = [] + else: + right = [self.argument_expression_list()] + assert self.match_punctuator(")"), self.advance() + expr = Tree( + operation="function_call", + children=[expr, *right] + ) + elif self.match_punctuator("<"): + backtrack = self.pos - 1 + try: + right = self.for_expression() + except AssertionError: + self.pos = backtrack + break + assert self.match_punctuator(">"), self.advance() + expr = Tree( + operation="bit_extraction", + children=[expr, right] + ) + elif self.match_punctuator("."): + assert self.match(Identifier), self.advance() + right = self.previous() + expr = Tree( + operation="member", + children=[expr, right] + ) + else: + break + return expr + + def unary_expression(self): + if self.match_identifier("NOT"): + return Tree( + operation="unary_not", + children = [ + self.unary_expression() + ] + ) + elif self.match_identifier("INT"): + return Tree( + operation="unary_int", + children = [ + self.unary_expression() + ] + ) + elif self.match_punctuator("~"): + return Tree( + operation="unary_complement", + children = [ + self.unary_expression() + ] + ) + elif self.match_punctuator("-"): + return Tree( + operation="unary_negation", + children = [ + self.unary_expression() + ] + ) + # elif self.match_punctuator("|"): + # expr = self.unary_expression() + # assert self.match_punctuator("|"), self.advance() + # return Tree( + # operation="unary_absolute_value", + # children = [ + # expr + # ] + # ) + else: + return self.postfix_expression() + + def multiplicative_expression(self): + expr = self.unary_expression() + while self.match_punctuator("×") or self.match_punctuator("/"): + operation = { + "×": "multiplication", + "/": "division", + }[self.previous().token] + expr = Tree( + operation=operation, + children=[ + expr, + self.unary_expression() + ] + ) + return expr + + def additive_expression(self): + expr = self.multiplicative_expression() + while self.match_punctuator("+") or self.match_punctuator("-"): + operation = { + "+": "addition", + "-": "subtraction", + }[self.previous().token] + expr = Tree( + operation=operation, + children=[ + expr, + self.multiplicative_expression() + ] + ) + return expr + + def shift_expression(self): + expr = self.additive_expression() + while self.match_punctuator("<<") or self.match_punctuator(">>"): + operation = { + "<<": "left_shift", + ">>": "right_shift", + }[self.previous().token] + expr = Tree( + operation=operation, + children=[ + expr, + self.additive_expression() + ] + ) + return expr + + def relational_expression(self): + expr = self.shift_expression() + while self.match_punctuator("<") or self.match_punctuator(">") \ + or self.match_punctuator("≤") or self.match_punctuator("≥"): + operation = { + "<": "less_than", + ">": "greater_than", + "≤": "less_than_equal", + "≥": "greater_than_equal", + }[self.previous().token] + expr = Tree( + operation=operation, + children=[ + expr, + self.shift_expression() + ] + ) + return expr + + def equality_expression(self): + expr = self.relational_expression() + while self.match_punctuator("=") or self.match_punctuator("≠"): + operation = { + "≠": "equality_not_equal", + "=": "equality_equal", + }[self.previous().token] + expr = Tree( + operation=operation, + children=[ + expr, + self.relational_expression() + ] + ) + return expr + + def bitwise_and_expression(self): + expr = self.equality_expression() + while self.match_punctuator("∧"): + expr = Tree( + operation="bitwise_and", + children=[ + expr, + self.equality_expression() + ] + ) + return expr + + def bitwise_xor_expression(self): + expr = self.bitwise_and_expression() + while self.match_punctuator("⊕"): + expr = Tree( + operation="bitwise_xor", + children=[ + expr, + self.bitwise_and_expression() + ] + ) + return expr + + def bitwise_or_expression(self): + expr = self.bitwise_xor_expression() + while self.match_punctuator("∨"): + expr = Tree( + operation="bitwise_or", + children=[ + expr, + self.bitwise_xor_expression() + ] + ) + return expr + + def logical_and_expression(self): + expr = self.bitwise_or_expression() + while self.match_identifier('AND'): + expr = Tree( + operation="logical_and", + children=[ + expr, + self.bitwise_or_expression() + ] + ) + return expr + + def logical_xor_expression(self): + # expr = self.logical_and_expression() + # while self.match_identifier('XOR'): + # expr = Tree( + # operation="logical_xor", + # children=[ + # expr, + # self.logical_and_expression() + # ] + # ) + # return expr + return self.logical_and_expression() + + def logical_or_expression(self): + expr = self.logical_xor_expression() + while self.match_identifier('OR'): + expr = Tree( + operation="logical_or", + children=[ + expr, + self.logical_xor_expression() + ] + ) + return expr + + def assignment_expression(self): + backtrack = self.pos + try: + left = self.unary_expression() + except AssertionError: + self.pos = backtrack + return self.logical_or_expression() + + if self.match_punctuator("←"): + right = self.assignment_expression() + return Tree( + operation="assignment", + children=[left, right], + ) + else: + self.pos = backtrack + return self.logical_or_expression() + + def expression(self): + return self.assignment_expression() + + def statement(self): + return self.unlabeled_statement() + + def unlabeled_statement(self): + backtrack = self.pos + try: + return self.expression_statement() + except AssertionError: + self.pos = backtrack + return self.primary_block() + + def expression_statement(self): + expr = self.expression() + assert self.match_punctuator(";"), self.advance() + return Tree( + operation="expression_statement", + children=[expr] + ) + + def primary_block(self): + if self.check_punctuator("{"): + return self.compound_statement() + elif self.check_identifier("IF"): + return self.selection_statement() + elif self.check_identifier("THROW"): + return self.throw_statement() + else: + assert False, self.advance() + + def secondary_block(self): + return self.statement() + + def compound_statement(self): + assert self.match_punctuator("{"), self.advance() + if self.match_punctuator("}"): + return Tree( + operation="compound_statement", + children=[], + ) + else: + expr = self.block_item_list() + assert self.match_punctuator("}"), self.advance() + return Tree( + operation="compound_statement", + children=[expr] + ) + + def block_item_list(self): + expr = self.block_item() + while True: + backtrack = self.pos + try: + right = self.block_item() + expr = Tree( + operation="block_item_list", + children=[expr, right] + ) + except AssertionError: + self.pos = backtrack + break + return expr + + def block_item(self): + return self.unlabeled_statement() + + def selection_statement(self): + assert self.match_identifier("IF"), self.advance() + assert self.match_punctuator("("), self.advance() + cond_expr = self.expression() + assert self.match_punctuator(")"), self.advance() + true_block = self.secondary_block() + if self.match_identifier("ELSE"): + else_block = self.secondary_block() + return Tree( + operation="if_else", + children=[cond_expr, true_block, else_block] + ) + else: + return Tree( + operation="if", + children=[cond_expr, true_block] + ) + + def throw_statement(self): + assert self.match_identifier("THROW"), self.advance() + block = self.primary_expression() + if self.match_punctuator(","): + arg = self.primary_expression() + assert self.match_punctuator(";"), self.advance() + return Tree( + operation="throw_arg", + children=[block, arg] + ) + else: + assert self.match_punctuator(";"), self.advance() + return Tree( + operation="throw", + children=[block] + ) diff --git a/python/elf.py b/python/elf.py deleted file mode 100644 index 88c32e5..0000000 --- a/python/elf.py +++ /dev/null @@ -1,82 +0,0 @@ -""" - #define EI_NIDENT 16 - - typedef struct { - unsigned char e_ident[EI_NIDENT]; - uint16_t e_type; - uint16_t e_machine; - uint32_t e_version; - ElfN_Addr e_entry; - ElfN_Off e_phoff; - ElfN_Off e_shoff; - uint32_t e_flags; - uint16_t e_ehsize; - uint16_t e_phentsize; - uint16_t e_phnum; - uint16_t e_shentsize; - uint16_t e_shnum; - uint16_t e_shstrndx; - } ElfN_Ehdr; -""" -@dataclass -class ElfEhdr: - e_ident: bytes - e_type: int - e_machine: int - e_version: int - e_entry: int - e_phoff: int - e_shoff: int - e_flags: int - e_ehsize: int - e_phentsize: int - e_phnum: int - e_shentsize: int - e_shnum: int - e_shstrndx: int - -ELFMAG0 = 0x7f -ELFMAG1 = ord('E') -ELFMAG2 = ord('L') -ELFMAG3 = ord('F') - -EI_NIDENT = 16 - -elf_ident_fields = [ - ("EI_MAG0", 0), - ("EI_MAG1", 1), - ("EI_MAG2", 2), - ("EI_MAG3", 3), - ("EI_CLASS", 4), - ("EI_DATA", 5), - ("EI_VERSION", 6), - ("EI_OSABI", 7), - ("EI_ABIVERSION", 8), - ("EI_PAD", 9), -] - -class ElfClass(IntEnum): - NONE = 0 - CLASS32 = 1 - CLASS64 = 2 - -class ElfData(IntEnum): - NONE = 0 - LSB = 1 - MSB = 2 - -class OsABI(IntEnum): - NONE = 0 - HPUX = 1 - NETBSD = 2 - GNU = 3 - SOLARIS = 6 - AIX = 7 - IRIX = 8 - FREEBSD = 9 - TRU64 = 10 - MODESTO = 11 - OPENBSD = 12 - ARM_AEABI = 64 - ARM = 97 - STANDALONE = 255 diff --git a/python/execute.py b/python/emulator/execute.py similarity index 91% rename from python/execute.py rename to python/emulator/execute.py index 78a52fe..d8ecece 100644 --- a/python/execute.py +++ b/python/emulator/execute.py @@ -1,6 +1,5 @@ -import instruction_properties -import impl2 -from operations import zero_extend32, sign_extend32 +from emulator import impl +from emulator.operations import zero_extend32, sign_extend32 from decode import decode_instruction, decode_variables def delay_slot_state(cpu, ins): @@ -37,7 +36,7 @@ def step(cpu, mem): # slot. ins = decode_instruction(instruction) variables = decode_variables(instruction, ins) - func = impl2.lookup[(ins.instruction, ins.operands)] + func = impl.lookup[(ins.instruction, ins.operands)] func(cpu, mem, *variables) delay_slot_state(cpu, ins) diff --git a/python/impl2.py b/python/emulator/impl.py similarity index 99% rename from python/impl2.py rename to python/emulator/impl.py index 7bf8db4..5ddeef3 100644 --- a/python/impl2.py +++ b/python/emulator/impl.py @@ -1,5 +1,4 @@ -from operations import * -from log import log +from emulator.operations import * class ILLSLOT(Exception): pass @@ -661,7 +660,6 @@ def mac_w__multiply_and_accumulate_operation(cpu, mem, m, n): if s == 1: macl = sign_extend32(macl) + mul temp = signed_saturate32(macl) - log(value1, value2, macl) if macl == temp: result = (mach << 32) | zero_extend32(macl) else: diff --git a/python/log.py b/python/emulator/log.py similarity index 100% rename from python/log.py rename to python/emulator/log.py diff --git a/python/mem.py b/python/emulator/mem.py similarity index 100% rename from python/mem.py rename to python/emulator/mem.py diff --git a/python/operations.py b/python/emulator/operations.py similarity index 98% rename from python/operations.py rename to python/emulator/operations.py index ed9be68..2b1a996 100644 --- a/python/operations.py +++ b/python/emulator/operations.py @@ -22,7 +22,7 @@ __all__ = [ def sign_extend(x, b): m = 1 << (b - 1) x = x & ((1 << b) - 1) - r = (x ^ m) - m; + r = (x ^ m) - m return r def sign_extend64(x): diff --git a/python/sh2.py b/python/emulator/sh2.py similarity index 100% rename from python/sh2.py rename to python/emulator/sh2.py diff --git a/python/simulate.py b/python/emulator/simulate.py similarity index 97% rename from python/simulate.py rename to python/emulator/simulate.py index 51b25a3..5fa1828 100644 --- a/python/simulate.py +++ b/python/emulator/simulate.py @@ -3,16 +3,16 @@ import curses.panel from curses.textpad import Textbox, rectangle import time from dataclasses import dataclass - -from sh2 import SH2 -from impl2 import ILLSLOT, TRAP -from mem import Memory import sys -from execute import step -from decode import decode_instruction, decode_variables -from operations import sign_extend32 -from log import get_log, log, raw_log +from emulator.sh2 import SH2 +from emulator.impl import ILLSLOT, TRAP +from emulator.mem import Memory +from emulator.execute import step +from emulator.operations import sign_extend32 +from emulator.log import get_log, log, raw_log + +from decode import decode_instruction, decode_variables last_seen1 = {} last_seen = {} diff --git a/python/test_impl.py b/python/emulator/test_impl.py similarity index 100% rename from python/test_impl.py rename to python/emulator/test_impl.py diff --git a/python/generate.py b/python/generate.py deleted file mode 100644 index 8f896f6..0000000 --- a/python/generate.py +++ /dev/null @@ -1,88 +0,0 @@ -from instruction_table import untabulate_instructions - -mode_name = { - '': 'no_operand', - 'Rn': 'destination_operand_only', - 'Rm': 'destination_operand_only', - 'Rm,Rn': 'source_and_destination_operands', - 'Rm,SR': 'transfer_to_sr', - 'Rm,GBR': 'transfer_to_gbr', - 'Rm,VBR': 'transfer_to_vbr', - 'Rm,MACH': 'transfer_to_mach', - 'Rm,MACL': 'transfer_to_macl', - 'Rm,PR': 'transfer_to_pr', - 'SR,Rn': 'transfer_from_sr', - 'GBR,Rn': 'transfer_from_gbr', - 'VBR,Rn': 'transfer_from_vbr', - 'MACH,Rn': 'transfer_from_mach', - 'MACL,Rn': 'transfer_from_macl', - 'PR,Rn': 'transfer_from_pr', - '@Rn': 'destination_operand_only', - 'Rm,@Rn': 'store_register_direct_data_transfer', - '@Rm,Rn': 'load_register_direct_data_transfer', - '@Rm+,@Rn+': 'multiply_and_accumulate_operation', - '@Rm+,Rn': 'load_direct_data_transfer_from_register', - '@Rm+,SR': 'load_to_sr', - '@Rm+,GBR': 'load_to_gbr', - '@Rm+,VBR': 'load_to_vbr', - '@Rm+,MACH': 'load_to_mach', - '@Rm+,MACL': 'load_to_macl', - '@Rm+,PR': 'load_to_pr', - 'Rm,@–Rn': 'store_direct_data_transfer_from_register', - 'SR,@–Rn': 'store_from_sr', - 'GBR,@–Rn': 'store_from_gbr', - 'VBR,@–Rn': 'store_from_vbr', - 'MACH,@–Rn': 'store_from_mach', - 'MACL,@–Rn': 'store_from_macl', - 'PR,@–Rn': 'store_from_pr', - 'R0,@(disp,Rn)': 'store_register_indirect_with_displacement', - 'Rm,@(disp,Rn)': 'store_register_indirect_with_displacement', - '@(disp,Rm),R0': 'load_register_indirect_with_displacement', - '@(disp,Rm),Rn': 'load_register_indirect_with_displacement', - 'Rm,@(R0,Rn)': 'store_indexed_register_indirect', - '@(R0,Rm),Rn': 'load_indexed_register_indirect', - 'R0,@(disp,GBR)': 'store_gbr_indirect_with_displacement', - '@(disp,GBR),R0': 'load_gbr_indirect_with_displacement', - '#imm,@(R0,GBR)': 'store_indexed_gbr_indirect', - '@(R0,GBR),#imm': 'load_indexed_gbr_indirect', - '@(disp,PC),Rn': 'pc_relative_with_displacement', - '@(disp,PC),R0': 'pc_relative_with_displacement', - 'label': 'pc_relative', - '#imm,Rn': 'immediate', - '#imm,R0': 'immediate', - '#imm': 'immediate', -} - -def sanitize_instruction(ins): - name = ins.instruction.replace('.', '_').replace('/', '_').lower() - assert ins.operands in mode_name, (ins.instruction, ins.operands) - mode = mode_name[ins.operands] - return '__'.join([name, mode]) - -def main(): - function_names = {} - instruction_table = untabulate_instructions() - for ins in instruction_table: - function_name = sanitize_instruction(ins) - instruction_operands = (ins.instruction, ins.operands) - assert function_name not in function_names, (instruction_operands, names[name]) - function_names[function_name] = instruction_operands - if ins.variables: - args = f', {", ".join(ins.variables)}' - else: - args = '' - print(f"def {function_name}(cpu, mem{args}):") - print(f" # {ins.instruction} {ins.operands}") - print(f" raise NotImplementedError()") - print() - - print("lookup = {") - for ins in instruction_table: - function_name = sanitize_instruction(ins) - i_space = ' ' * (len('CMP/STR') - len(ins.instruction)) - o_space = ' ' * (len('@(disp,GBR),R0') - len(ins.operands)) - print(f" ('{ins.instruction}'{i_space}, '{ins.operands}'{o_space}): {function_name},") - print("}") - -if __name__ == "__main__": - main() diff --git a/python/generate_python.py b/python/generate_python.py new file mode 100644 index 0000000..750e516 --- /dev/null +++ b/python/generate_python.py @@ -0,0 +1,30 @@ +from instruction_table import untabulate_instructions_sh2 +from instruction_function_name import instruction_function_name + +def main(): + function_names = {} + instruction_table = untabulate_instructions_sh2() + for ins in instruction_table: + function_name = instruction_function_name(ins) + instruction_operands = (ins.instruction, ins.operands) + assert function_name not in function_names, (instruction_operands, function_names[function_name]) + function_names[function_name] = instruction_operands + if ins.variables: + args = f', {", ".join(ins.variables)}' + else: + args = '' + print(f"def {function_name}(cpu, mem{args}):") + print(f" # {ins.instruction} {ins.operands}") + print(f" raise NotImplementedError()") + print() + + print("lookup = {") + for ins in instruction_table: + function_name = instruction_function_name(ins) + i_space = ' ' * (len('CMP/STR') - len(ins.instruction)) + o_space = ' ' * (len('@(disp,GBR),R0') - len(ins.operands)) + print(f" ('{ins.instruction}'{i_space}, '{ins.operands}'{o_space}): {function_name},") + print("}") + +if __name__ == "__main__": + main() diff --git a/python/generate_sh4_directory.py b/python/generate_sh4_directory.py new file mode 100644 index 0000000..098fc38 --- /dev/null +++ b/python/generate_sh4_directory.py @@ -0,0 +1,14 @@ +from instruction_table import untabulate_instructions_sh4 +from instruction_file_name import instruction_file_name + +for ins in untabulate_instructions_sh4(): + code = list(f'{ins.code.code_bits:016b}') + for operand in ins.code.operands.values(): + for i in range(operand.lsb, operand.lsb + operand.length): + code[15 - i] = operand.operand + + file_name = instruction_file_name(ins) + path = os.path.join("sh4", file_name) + assert not os.path.exists(path) + with open(path, 'w') as f: + f.write(''.join(code) + '\n') diff --git a/python/instruction_file_name.py b/python/instruction_file_name.py new file mode 100644 index 0000000..8a9b78e --- /dev/null +++ b/python/instruction_file_name.py @@ -0,0 +1,7 @@ +def instruction_file_name(ins): + if ins.operands: + file_name = ' '.join([ins.instruction, ins.operands]) + else: + file_name = ins.instruction + file_name = file_name.replace('/', '_') + return file_name diff --git a/python/instruction_function_name.py b/python/instruction_function_name.py new file mode 100644 index 0000000..273d9cc --- /dev/null +++ b/python/instruction_function_name.py @@ -0,0 +1,118 @@ +mode_name = { + '': 'no_operand', + 'Rn': 'destination_operand_only', + 'Rm': 'destination_operand_only', + 'Rm,Rn': 'source_and_destination_operands', + 'Rm,SR': 'transfer_to_sr', + 'Rm,SSR': 'transfer_to_ssr', + 'Rm,SPC': 'transfer_to_spc', + 'Rm,GBR': 'transfer_to_gbr', + 'Rm,VBR': 'transfer_to_vbr', + 'Rm,DBR': 'transfer_to_dbr', + 'Rm,MACH': 'transfer_to_mach', + 'Rm,MACL': 'transfer_to_macl', + 'Rm,Rn_BANK': 'transfer_to_rn_bank', + 'Rm,PR': 'transfer_to_pr', + 'SR,Rn': 'transfer_from_sr', + 'SSR,Rn': 'transfer_from_ssr', + 'SPC,Rn': 'transfer_from_spc', + 'GBR,Rn': 'transfer_from_gbr', + 'VBR,Rn': 'transfer_from_vbr', + 'DBR,Rn': 'transfer_from_dbr', + 'SGR,Rn': 'transfer_from_sgr', + 'MACH,Rn': 'transfer_from_mach', + 'MACL,Rn': 'transfer_from_macl', + 'Rm_BANK,Rn': 'transfer_from_rm_bank', + 'PR,Rn': 'transfer_from_pr', + '@Rn': 'destination_operand_only', + 'Rm,@Rn': 'store_register_direct_data_transfer', + 'R0,@Rn': 'r0_store_register_direct_data_transfer', + '@Rm,Rn': 'load_register_direct_data_transfer', + '@Rm+,@Rn+': 'multiply_and_accumulate_operation', + '@Rm+,Rn': 'load_direct_data_transfer_from_register', + '@Rm+,SR': 'load_to_sr', + '@Rm+,SSR': 'load_to_ssr', + '@Rm+,SPC': 'load_to_spc', + '@Rm+,GBR': 'load_to_gbr', + '@Rm+,VBR': 'load_to_vbr', + '@Rm+,DBR': 'load_to_dbr', + '@Rm+,MACH': 'load_to_mach', + '@Rm+,MACL': 'load_to_macl', + '@Rm+,Rn_BANK': 'load_to_rn_bank', + '@Rm+,PR': 'load_to_pr', + 'Rm,@-Rn': 'store_direct_data_transfer_from_register', + 'SR,@-Rn': 'store_from_sr', + 'SSR,@-Rn': 'store_from_ssr', + 'SPC,@-Rn': 'store_from_spc', + 'GBR,@-Rn': 'store_from_gbr', + 'VBR,@-Rn': 'store_from_vbr', + 'DBR,@-Rn': 'store_from_dbr', + 'SGR,@-Rn': 'store_from_sgr', + 'MACH,@-Rn': 'store_from_mach', + 'MACL,@-Rn': 'store_from_macl', + 'Rm_BANK,@-Rn': 'store_from_rm_bank', + 'PR,@-Rn': 'store_from_pr', + 'R0,@(disp,Rn)': 'store_register_indirect_with_displacement', + 'Rm,@(disp,Rn)': 'store_register_indirect_with_displacement', + '@(disp,Rm),R0': 'load_register_indirect_with_displacement', + '@(disp,Rm),Rn': 'load_register_indirect_with_displacement', + 'Rm,@(R0,Rn)': 'store_indexed_register_indirect', + '@(R0,Rm),Rn': 'load_indexed_register_indirect', + 'R0,@(disp,GBR)': 'store_gbr_indirect_with_displacement', + '@(disp,GBR),R0': 'load_gbr_indirect_with_displacement', + '#imm,@(R0,GBR)': 'store_indexed_gbr_indirect', + '@(R0,GBR),#imm': 'load_indexed_gbr_indirect', + '@(disp,PC),Rn': 'pc_relative_with_displacement', + '@(disp,PC),R0': 'pc_relative_with_displacement', + 'label': 'pc_relative', + '#imm,Rn': 'immediate', + '#imm,R0': 'immediate', + '#imm': 'immediate', + # floating point + 'FRn': 'destination_operand_only', + 'DRn': 'destination_operand_only_double', + 'FRm,FRn': 'source_and_destination_operands', + 'DRm,DRn': 'source_and_destination_operands_double', + 'XDm,XDn': 'source_and_destination_operands_bank', + 'DRm,XDn': 'double_to_bank', + 'XDm,DRn': 'bank_to_double', + '@Rm,FRn': 'load_register_direct_data_transfer', + '@Rm,DRn': 'load_register_direct_data_transfer_double', + '@Rm,XDn': 'load_register_direct_data_transfer_bank', + '@(R0,Rm),FRn': 'load_indexed_register_indirect', + '@(R0,Rm),DRn': 'load_indexed_register_indirect_double', + '@(R0,Rm),XDn': 'load_indexed_register_indirect_bank', + '@Rm+,FRn': 'load_direct_data_transfer_from_register', + '@Rm+,DRn': 'load_direct_data_transfer_from_register_double', + '@Rm+,XDn': 'load_direct_data_transfer_from_register_bank', + 'FRm,@Rn': 'store_register_direct_data_transfer', + 'DRm,@Rn': 'store_register_direct_data_transfer_double', + 'XDm,@Rn': 'store_register_direct_data_transfer_bank', + 'FRm,@-Rn': 'store_direct_data_transfer_from_register', + 'DRm,@-Rn': 'store_direct_data_transfer_from_register_double', + 'XDm,@-Rn': 'store_direct_data_transfer_from_register_bank', + 'FRm,@(R0,Rn)': 'store_indexed_register_indirect', + 'DRm,@(R0,Rn)': 'store_indexed_register_indirect_double', + 'XDm,@(R0,Rn)': 'store_indexed_register_indirect_bank', + 'FRm,FPUL': 'frm_to_fpul', + 'DRm,FPUL': 'drm_to_fpul', + 'FPUL,FRn': 'fpul_to_frn', + 'FPUL,DRn': 'fpul_to_drn', + 'FR0,FRm,FRn': 'fr0_frm_frn', + 'Rm,FPSCR': 'transfer_to_fpscr', + 'Rm,FPUL': 'transfer_to_fpul', + '@Rm+,FPSCR': 'load_to_fpscr', + '@Rm+,FPUL': 'load_to_fpul', + 'FPSCR,Rn': 'transfer_from_fpscr', + 'FPUL,Rn': 'transfer_from_fpul', + 'FPUL,@-Rn': 'store_from_fpul', + 'FPSCR,@-Rn': 'store_from_fpscr', + 'FVm,FVn': 'fvm_fvn', + 'XMTRX,FVn': 'xmtrx_fvn', +} + +def instruction_function_name(ins): + name = ins.instruction.replace('.', '_').replace('/', '_').lower() + assert ins.operands in mode_name, (ins.instruction, ins.operands) + mode = mode_name[ins.operands] + return '__'.join([name, mode]) diff --git a/python/instruction_table.py b/python/instruction_table.py index 3a576d2..c10e842 100644 --- a/python/instruction_table.py +++ b/python/instruction_table.py @@ -223,7 +223,7 @@ def untabulate_instructions_sh2(): ("t_bit" , 102) ])) - return untabulate_instructions(os.path.join(directory, "sh2.txt"), columns) + return untabulate_instructions(os.path.join(directory, "..", "sh2.txt"), columns) def untabulate_instructions_sh4(): columns = dict(column_bounds([ @@ -235,20 +235,4 @@ def untabulate_instructions_sh4(): ("t_bit" , 116) ])) - return untabulate_instructions(os.path.join(directory, "sh4.txt"), columns) - -l = untabulate_instructions_sh4() -from pprint import pprint -for ins in list(l): - if ins.operands: - fn = ' '.join([ins.instruction, ins.operands]) - else: - fn = ins.instruction - fn = fn.replace('/', '_') - code = list(f'{ins.code.code_bits:016b}') - for operand in ins.code.operands.values(): - for i in range(operand.lsb, operand.lsb + operand.length): - code[15 - i] = operand.operand - - with open(os.path.join(directory, "sh4", fn), 'w') as f: - f.write(''.join(code) + '\n') + return untabulate_instructions(os.path.join(directory, "..", "sh4.txt"), columns) diff --git a/test_lexer.py b/test_lexer.py new file mode 100644 index 0000000..1bb2a2b --- /dev/null +++ b/test_lexer.py @@ -0,0 +1,40 @@ +import os +import os.path + +from lexer import Lexer, IntegerConstant, Punctuator, Identifier + +def is_instruction_descriptor(s): + return all(c in {'0', '1', 'n', 'm', 'i', 'd'} for c in s) + +def parse_file(path): + with open(path) as f: + buf = f.read() + + lines = buf.split('\n', maxsplit=2) + assert len(lines[0]) == 16 and is_instruction_descriptor(lines[0]), lines[0] + + if lines[1].startswith('Available only when'): + buf = lines[2] + else: + if len(lines) >= 3: + buf = '\n'.join([lines[1], lines[2]]) + else: + buf = lines[1] + + lexer = Lexer(buf) + + while True: + try: + token = lexer.next_token() + except IndexError: + break + yield token + +files = os.listdir('sh4') +for filename in files: + if filename.startswith('F'): + continue + path = os.path.join('sh4', filename) + for token in parse_file(path): + if type(token) is Identifier: + print(token.token) diff --git a/test_parser.py b/test_parser.py new file mode 100644 index 0000000..49db307 --- /dev/null +++ b/test_parser.py @@ -0,0 +1,39 @@ +from lexer import Lexer +from parser import Parser +from generator import generate + +sources = [ + "op1 ← FloatValue64(DR2n);", + "UTLB[MMUCR.URC].ASID ← PTEH.ASID;", + + "IF (n_field = m_field)" + "{" + "m_address ← m_address + 4;" + "n_address ← n_address + 4;" + "}", + + """ + IF (op1 ≥ 0) + op2 ← op2 << shift_amount; + ELSE IF (shift_amount ≠ 0) + op2 ← op2 >> (32 - shift_amount); + """ +] + +def all_tokens(lexer): + while True: + try: + token = lexer.next_token() + except IndexError: + break + yield token + +for source in sources: + lexer = Lexer(source) + tokens = list(all_tokens(lexer)) + parser = Parser(tokens) + from pprint import pprint + root = parser.statement() + s = "".join(generate(root)) + print(s, end='') + print() diff --git a/transform.py b/transform.py new file mode 100644 index 0000000..5a8681f --- /dev/null +++ b/transform.py @@ -0,0 +1,118 @@ +import sys +import os + +from lexer import Lexer +from parser import Parser +from generator import generate, CTX + +from instruction_table import untabulate_instructions_sh4 +from instruction_table import untabulate_instructions_sh2 +from instruction_function_name import instruction_function_name +from instruction_file_name import instruction_file_name +from instruction_properties import has_delay_slot + +def is_instruction_descriptor(s): + return all(c in {'0', '1', 'n', 'm', 'i', 'd'} for c in s) + +def parse_file(path): + with open(path) as f: + buf = f.read() + + lines = buf.split('\n', maxsplit=2) + assert len(lines[0]) == 16 and is_instruction_descriptor(lines[0]), lines[0] + + if lines[1].startswith('Available only when'): + buf = lines[2] + else: + if len(lines) >= 3: + buf = '\n'.join([lines[1], lines[2]]) + else: + buf = lines[1] + + lexer = Lexer(buf) + + while True: + try: + token = lexer.next_token() + except IndexError: + break + yield token + +def generate_function_declaration(instruction_name, function_name, variables): + args = ", ".join([ + "struct architectural_state * state", + "struct memory_map * map", + *[ + f"const uint32_t {x}" + for x in variables + ] + ]) + + yield f"/* {instruction_name} */" + yield f"void {function_name}({args})" + +def generate_file(instruction_name, function_name, variables, delay_slot, src_path): + tokens = list(parse_file(src_path)) + parser = Parser(tokens) + + ctx = CTX(identifiers=set()) + + yield from generate_function_declaration(instruction_name, function_name, variables) + yield "{" + output = [] + while parser.tokens[parser.pos:]: + stmt = parser.statement() + src = "".join(generate(ctx, stmt)) + output.append(src) + + for line in "".join(output).rstrip().split('\n'): + yield f" {line}" + yield "" + yield f" state->is_delay_slot = {delay_slot};" + yield "}" + yield "" + yield "" + +def main(): + output = [ + '#include "impl.h"', + '#include "operations.h"', + '#include "exception.h"', + '#include "state_helpers.h"', + '', + ] + + header_output = [ + '#pragma once', + '', + '#include "state.h"', + '#include "memory_map.h"', + '', + ] + + sh2_instructions = set((ins.instruction, ins.operands) for ins in untabulate_instructions_sh2()) + for ins in untabulate_instructions_sh4(): + if (ins.instruction, ins.operands) not in sh2_instructions: + continue + _name = [ins.instruction, ins.operands] if ins.operands else [ins.instruction] + instruction_name = " ".join(_name) + function_name = instruction_function_name(ins) + file_name = instruction_file_name(ins) + src_path = os.path.join("sh4", file_name) + delay_slot = "true" if has_delay_slot(ins) else "false" + gen = generate_file(instruction_name, function_name, ins.variables, delay_slot, src_path) + output.extend(gen) + + gen = generate_function_declaration(instruction_name, function_name, ins.variables) + lgen = list(gen) + lgen[-1] += ';' + header_output.extend(lgen) + + with open(sys.argv[1], "w") as f: + f.write("\n".join(output)) + + with open(sys.argv[2], "w") as f: + f.write("\n".join(header_output)) + +if __name__ == '__main__': + main()