00001
00012
00013
00014 #ifndef _MACRO68_H_
00015 #define _MACRO68_H_
00016
00017 #include "emu68/srdef68.h"
00018 #include "emu68/excep68.h"
00019
00020 #ifdef __cplusplus
00021 extern "C" {
00022 #endif
00023
00039 #ifndef EMU68CYCLE
00040 # define ADDCYCLE(N)
00041 # define SETCYCLE(N)
00042 #else
00043 # define ADDCYCLE(N) reg68.cycle += (N)
00044 # define SETCYCLE(N) reg68.cycle = (N)
00045 #endif
00046
00053 #define EXCEPTION(VECTOR,LVL) \
00054 { \
00055 pushl(reg68.pc); pushw(reg68.sr); \
00056 reg68.sr &= 0x70FF; \
00057 reg68.sr |= (0x2000+((LVL)<<SR_IPL_BIT)); \
00058 reg68.pc = read_L(VECTOR); \
00059 }
00060
00062 #define ILLEGAL \
00063 {\
00064 EMU68error_add("Illegal pc:%06x",reg68.pc); \
00065 EXCEPTION(ILLEGAL_VECTOR,ILLEGAL_LVL); \
00066 }
00067
00069 #define BUSERROR(ADDR,MODE) \
00070 {\
00071 EMU68error_add("bus error pc:%06x addr:%06x (%c)",\
00072 reg68.pc,ADDR,MODE?'W':'R');\
00073 EXCEPTION(BUSERROR_VECTOR,BUSERROR_LVL) \
00074 }
00075
00077 #define LINEA EXCEPTION(LINEA_VECTOR,LINEA_LVL)
00078
00080 #define LINEF EXCEPTION(LINEF_VECTOR,LINEF_LVL)
00081
00083 #define TRAPV if(reg68.sr&SR_V) EXCEPTION(TRAPV_VECTOR,TRAPV_LVL)
00084
00086 #define TRAP(TRAP_N) EXCEPTION(TRAP_VECTOR(TRAP_N),TRAP_LVL)
00087
00089 #define CHK EXCEPTION(CHK_VECTOR,CHK_LVL)
00090
00092 #define CHKW(CHK_A,CHK_B) if((CHK_B)<0 || (CHK_B)>(CHK_A)){ CHK; }
00093
00102 #define NOP
00103
00105 #define RESET EMU68_reset()
00106
00111 #define STOP reg68.sr = (u16)get_nextw(); reg68.status = 1
00112
00114 #define RTS reg68.pc = popl()
00115
00117 #define RTE reg68.sr = popw(); RTS
00118
00120 #define RTR reg68.sr = (reg68.sr&0xFF00) | (u8)popw(); RTS
00121
00130 #define NBCDB(NBCD_S,NBCD_A) (NBCD_S)=(NBCD_A)
00131
00133 #define EXG(A,B) (A)^=(B); (B)^=(A); (A)^=(B)
00134
00136 #define EXTW(D) (D) = ((D)&0xFFFF0000) | ((u16)(s32)(s8)(D))
00137
00139 #define EXTL(D) (D) = (s32)(s16)(D)
00140
00142 #define TAS(TAS_A) { TSTB(TAS_A,TAS_A); (TAS_A) |= 0x80000000; }
00143
00145 #define CLR(CLR_S,CLR_A) \
00146 {\
00147 (CLR_A) = (CLR_A); \
00148 reg68.sr =(reg68.sr&~(SR_N|SR_V|SR_C)) | SR_Z;\
00149 CLR_S = 0;\
00150 }
00151
00153 #define CLRB(A,B) CLR(A,B)
00154
00156 #define CLRW(A,B) CLR(A,B)
00157
00159 #define CLRL(A,B) CLR(A,B)
00160
00162 #define LINK(R_LNK) \
00163 pushl(reg68.a[R_LNK]); \
00164 reg68.a[R_LNK] = reg68.a[7]; \
00165 reg68.a[7] += get_nextw()
00166
00168 #define UNLK(R_LNK) \
00169 reg68.a[7]=reg68.a[R_LNK]; \
00170 reg68.a[R_LNK]=popl()
00171
00173 #define SWAP(SWP_A) \
00174 { \
00175 (SWP_A) = ((u32)(SWP_A)>>16) | ((SWP_A)<<16); \
00176 reg68.sr = (reg68.sr&~(SR_V|SR_C|SR_Z|SR_N)) | \
00177 ((!(SWP_A))<<SR_Z_BIT) | \
00178 (((s32)(SWP_A)>>31)&SR_N); \
00179 }
00180
00188 #if 0
00189
00190 #define BTST(V,BIT) \
00191 reg68.sr = (reg68.sr&(~SR_Z)) | ((((V)&(1<<(BIT)))==0)<<SR_Z_BIT)
00192
00194 #define BSET(V,BIT) BTST(V,BIT); (V) |= (1<<(BIT));
00195
00197 #define BCLR(V,BIT) BTST(V,BIT); (V) &= ~(1<<(BIT));
00198
00200 #define BCHG(V,BIT) BTST(V,BIT); (V) ^= (1<<(BIT));
00201 */
00202 #endif
00203
00205 #define BTST(V,BIT) \
00206 reg68.sr = (reg68.sr&(~SR_Z)) | (((((V)>>(BIT))&1)^1)<<SR_Z_BIT)
00207
00209 #define BSET(V,BIT) \
00210 if( (V)&(1<<(BIT)) ) { reg68.sr &= ~SR_Z; }\
00211 else { (V) |= 1<<(BIT); reg68.sr |= SR_Z; }
00212
00214 #define BCLR(V,BIT) \
00215 if( (V)&(1<<(BIT)) ) { (V) &= ~(1<<(BIT)); reg68.sr &= ~SR_Z; }\
00216 else { reg68.sr |= SR_Z; }
00217
00219 #define BCHG(V,BIT) \
00220 if( (V)&(1<<(BIT)) ) { (V) &= ~(1<<(BIT)); reg68.sr &= ~SR_Z; }\
00221 else { (V) |= 1<<(BIT); reg68.sr |= SR_Z; }
00222
00230 #define MOVE(MOV_A) reg68.sr = (reg68.sr&(0xFF00 | SR_X)) \
00231 | (((MOV_A)==0)<<SR_Z_BIT) | (((s32)(MOV_A)>>31)&SR_N);
00232 #define TST(TST_V) MOVE(TST_V)
00233 #define TSTB(TST_S,TST_A) { TST_S=TST_A; TST(TST_S); }
00234 #define TSTW(TST_S,TST_A) { TST_S=TST_A; TST(TST_S); }
00235 #define TSTL(TST_S,TST_A) { TST_S=TST_A; TST(TST_S); }
00236
00245 #define MULSW(MUL_S, MUL_A, MUL_B) MUL_S = muls68(MUL_A, MUL_B)
00246
00248 #define MULUW(MUL_S, MUL_A, MUL_B) MUL_S = mulu68(MUL_A, MUL_B)
00249
00251 #define DIVSW(DIV_S, DIV_A, DIV_B) DIV_S = divs68(DIV_A, DIV_B)
00252
00254 #define DIVUW(DIV_S, DIV_A, DIV_B) DIV_S = divu68(DIV_A, DIV_B)
00255
00264 #define AND(AND_S, AND_A, AND_B) AND_S = and68(AND_A, AND_B)
00265
00267 #define ANDB(AND_S, AND_A, AND_B) AND(AND_S, AND_A, AND_B)
00268
00270 #define ANDW(AND_S, AND_A, AND_B) AND(AND_S, AND_A, AND_B)
00271
00273 #define ANDL(AND_S, AND_A, AND_B) AND(AND_S, AND_A, AND_B)
00274
00275
00277 #define ORR(ORR_S, ORR_A, ORR_B) ORR_S = orr68(ORR_A, ORR_B)
00278
00280 #define ORB(ORR_S, ORR_A, ORR_B) ORR(ORR_S, ORR_A, ORR_B)
00281
00283 #define ORW(ORR_S, ORR_A, ORR_B) ORR(ORR_S, ORR_A, ORR_B)
00284
00286 #define ORL(ORR_S, ORR_A, ORR_B) ORR(ORR_S, ORR_A, ORR_B)
00287
00288
00290 #define EOR(EOR_S, EOR_A, EOR_B) EOR_S = eor68(EOR_A, EOR_B)
00291
00293 #define EORB(EOR_S, EOR_A, EOR_B) EOR(EOR_S, EOR_A, EOR_B)
00294
00296 #define EORW(EOR_S, EOR_A, EOR_B) EOR(EOR_S, EOR_A, EOR_B)
00297
00299 #define EORL(EOR_S, EOR_A, EOR_B) EOR(EOR_S, EOR_A, EOR_B)
00300
00301
00303 #define NOT(NOT_S,NOT_A) NOT_S = not68(NOT_A)
00304
00306 #define NOTB(A,B) NOT(A,B)
00307
00309 #define NOTW(A,B) NOT(A,B)
00310
00312 #define NOTL(A,B) NOT(A,B)
00313
00318 #define ADD(ADD_S,ADD_A,ADD_B,ADD_X) ADD_S=add68(ADD_A,ADD_B,ADD_X)
00319 #define SUB(SUB_S,SUB_A,SUB_B,SUB_X) SUB_S=sub68(SUB_B,SUB_A,SUB_X)
00320 #define CMP(SUB_A,SUB_B) sub68(SUB_B,SUB_A,0)
00321
00322 #define ADDB(ADD_S, ADD_A, ADD_B) ADD(ADD_S, ADD_A, ADD_B,0)
00323 #define ADDW(ADD_S, ADD_A, ADD_B) ADD(ADD_S, ADD_A, ADD_B,0)
00324 #define ADDL(ADD_S, ADD_A, ADD_B) ADD(ADD_S, ADD_A, ADD_B,0)
00325 #define ADDXB(ADD_S, ADD_A, ADD_B) \
00326 ADD(ADD_S, ADD_A, ADD_B, (reg68.sr&SR_X)<<(24-SR_X_BIT))
00327 #define ADDXW(ADD_S, ADD_A, ADD_B) \
00328 ADD(ADD_S, ADD_A, ADD_B, (reg68.sr&SR_X)<<(16-SR_X_BIT))
00329 #define ADDXL(ADD_S, ADD_A, ADD_B) \
00330 ADD(ADD_S, ADD_A, ADD_B, (reg68.sr&SR_X)>>SR_X_BIT )
00331
00332 #define ADDA(ADD_S, ADD_A, ADD_B) (ADD_S) = (ADD_A) + (ADD_B)
00333 #define ADDAW(ADD_S, ADD_A, ADD_B) ADDA(ADD_S, ADD_A>>16, ADD_B)
00334 #define ADDAL(ADD_S, ADD_A, ADD_B) ADDA(ADD_S, ADD_A, ADD_B)
00335
00336 #define SUBB(SUB_S, SUB_A, SUB_B) SUB(SUB_S, SUB_A, SUB_B,0)
00337 #define SUBW(SUB_S, SUB_A, SUB_B) SUB(SUB_S, SUB_A, SUB_B,0)
00338 #define SUBL(SUB_S, SUB_A, SUB_B) SUB(SUB_S, SUB_A, SUB_B,0)
00339
00340 #define SUBXB(SUB_S, SUB_A, SUB_B) \
00341 SUB(SUB_S, SUB_A, SUB_B, (reg68.sr&SR_X)<<(24-SR_X_BIT))
00342 #define SUBXW(SUB_S, SUB_A, SUB_B) \
00343 SUB(SUB_S, SUB_A, SUB_B, (reg68.sr&SR_X)<<(16-SR_X_BIT))
00344 #define SUBXL(SUB_S, SUB_A, SUB_B) \
00345 SUB(SUB_S, SUB_A, SUB_B, (reg68.sr&SR_X)>>SR_X_BIT)
00346
00347 #define SUBA(SUB_S, SUB_A, SUB_B) (SUB_S) = (SUB_B) - (SUB_A)
00348 #define SUBAW(SUB_S, SUB_A, SUB_B) \
00349 {\
00350 s32 ZOB = (SUB_A)>>16;\
00351 SUBA(SUB_S, ZOB, SUB_B);\
00352 }
00353 #define SUBAL(SUB_S, SUB_A, SUB_B) SUBA(SUB_S, SUB_A, SUB_B)
00354
00355 #define CMPB(CMP_A, CMP_B) CMP(CMP_A, CMP_B)
00356 #define CMPW(CMP_A, CMP_B) CMP(CMP_A, CMP_B)
00357 #define CMPL(CMP_A, CMP_B) CMP(CMP_A, CMP_B)
00358 #define CMPA(CMP_A, CMP_B) CMP(CMP_A, CMP_B)
00359 #define CMPAW(CMP_A, CMP_B) \
00360 {\
00361 s32 ZOB = (CMP_A)>>16;\
00362 CMPA( ZOB, CMP_B);\
00363 }
00364 #define CMPAL(CMP_A, CMP_B) CMP(CMP_A, CMP_B)
00365
00366 #define NEGB(NEG_S,NEG_A) SUBB(NEG_S,NEG_A,0)
00367 #define NEGW(NEG_S,NEG_A) SUBW(NEG_S,NEG_A,0)
00368 #define NEGL(NEG_S,NEG_A) SUBL(NEG_S,NEG_A,0)
00369
00370 #define NEGXB(NEG_S,NEG_A) SUBXB(NEG_S,NEG_A,0)
00371 #define NEGXW(NEG_S,NEG_A) SUBXW(NEG_S,NEG_A,0)
00372 #define NEGXL(NEG_S,NEG_A) SUBXL(NEG_S,NEG_A,0)
00373
00382 #define LSR(LSR_A,LSR_D,LSR_MSK,LSR_C) \
00383 {\
00384 reg68.sr &= 0xFF00;\
00385 if((LSR_D)!=0) \
00386 {\
00387 ADDCYCLE(2*(LSR_D));\
00388 (LSR_A) >>= (LSR_D)-1;\
00389 if((LSR_A)&(LSR_C)) reg68.sr |= SR_X | SR_C;\
00390 (LSR_A)>>=1;\
00391 }\
00392 (LSR_A) &= (LSR_MSK);\
00393 reg68.sr |= (((LSR_A)==0)<<SR_Z_BIT) | (((s32)(LSR_A)<0)<<SR_N_BIT);\
00394 }
00395
00397 #define LSRB(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFF000000,(1<<24))
00398
00400 #define LSRW(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFFFF0000,(1<<16))
00401
00403 #define LSRL(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFFFFFFFF,(1<<0))
00404
00406 #define ASRB(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFF000000,(1<<24))
00407
00409 #define ASRW(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFFFF0000,(1<<16))
00410
00412 #define ASRL(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFFFFFFFF,(1<<0))
00413
00415 #define LSL(LSL_A,LSL_D,LSL_MSK) \
00416 {\
00417 reg68.sr &= 0xFF00;\
00418 if((LSL_D)!=0) \
00419 {\
00420 ADDCYCLE(2*(LSL_D));\
00421 (LSL_A) <<= (LSL_D)-1;\
00422 if((LSL_A)&0x80000000) reg68.sr |= SR_X | SR_C;\
00423 (LSL_A)<<=1;\
00424 }\
00425 (LSL_A) &= (LSL_MSK);\
00426 reg68.sr |= (((LSL_A)==0)<<SR_Z_BIT) | (((s32)(LSL_A)<0)<<SR_N_BIT);\
00427 }
00428
00430 #define LSLB(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFF000000)
00431
00433 #define LSLW(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFFFF0000)
00434
00436 #define LSLL(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFFFFFFFF)
00437
00439 #define ASLB(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFF000000)
00440
00442 #define ASLW(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFFFF0000)
00443
00445 #define ASLL(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFFFFFFFF)
00446
00448 #define ROR(ROR_A,ROR_D,ROR_MSK,ROR_SZ) \
00449 {\
00450 reg68.sr &= 0xFF00 | SR_X;\
00451 if((ROR_D)!=0) \
00452 {\
00453 ADDCYCLE(2*(ROR_D));\
00454 ROR_D &= (ROR_SZ)-1;\
00455 if((ROR_A)&(1<<((ROR_D)-1+32-(ROR_SZ)))) reg68.sr |= SR_C;\
00456 (ROR_A) &= (ROR_MSK);\
00457 (ROR_A) = ((ROR_A)>>(ROR_D)) + ((ROR_A)<<((ROR_SZ)-(ROR_D)));\
00458 }\
00459 (ROR_A) &= (ROR_MSK);\
00460 reg68.sr |= (((ROR_A)==0)<<SR_Z_BIT) | (((s32)(ROR_A)<0)<<SR_N_BIT);\
00461 }
00462
00464 #define ROL(ROR_A,ROR_D,ROR_MSK,ROR_SZ) \
00465 {\
00466 reg68.sr &= 0xFF00 | SR_X;\
00467 if((ROR_D)!=0) \
00468 {\
00469 ADDCYCLE(2*(ROR_D));\
00470 ROR_D &= (ROR_SZ)-1;\
00471 if((ROR_A)&(1<<(32-(ROR_D)))) reg68.sr |= SR_C;\
00472 (ROR_A) &= (ROR_MSK);\
00473 (ROR_A) = ((ROR_A)<<(ROR_D)) + ((ROR_A)>>((ROR_SZ)-(ROR_D)));\
00474 }\
00475 (ROR_A) &= (ROR_MSK);\
00476 reg68.sr |= (((ROR_A)==0)<<SR_Z_BIT) | (((s32)(ROR_A)<0)<<SR_N_BIT);\
00477 }
00478
00479 #define RORB(ROR_A,ROR_B) ROR(ROR_A,ROR_B,0xFF000000,8)
00480 #define RORW(ROR_A,ROR_B) ROR(ROR_A,ROR_B,0xFFFF0000,16)
00481 #define RORL(ROR_A,ROR_B) ROR(ROR_A,ROR_B,0xFFFFFFFF,32)
00482 #define ROLB(ROR_A,ROR_B) ROL(ROR_A,ROR_B,0xFF000000,8)
00483 #define ROLW(ROR_A,ROR_B) ROL(ROR_A,ROR_B,0xFFFF0000,16)
00484 #define ROLL(ROR_A,ROR_B) ROL(ROR_A,ROR_B,0xFFFFFFFF,32)
00485
00487 #define ROXR(ROR_A,ROR_D,ROR_MSK,ROR_SZ) \
00488 {\
00489 u32 ROR_X = (reg68.sr>>SR_X_BIT)&1;\
00490 reg68.sr &= 0xFF00;\
00491 if((ROR_D)!=0) \
00492 {\
00493 ADDCYCLE(2*(ROR_D));\
00494 ROR_D &= (ROR_SZ)-1;\
00495 if((ROR_A)&(1<<((ROR_D)-1+32-(ROR_SZ)))) reg68.sr |= SR_C | SR_X;\
00496 (ROR_A) &= (ROR_MSK);\
00497 (ROR_A) = ((ROR_A)>>(ROR_D)) + ((ROR_A)<<((ROR_SZ)-(ROR_D)+1));\
00498 (ROR_A) |= (ROR_X)<<(32-(ROR_D));\
00499 }\
00500 (ROR_A) &= (ROR_MSK);\
00501 reg68.sr |= (((ROR_A)==0)<<SR_Z_BIT) | (((s32)(ROR_A)<0)<<SR_N_BIT);\
00502 }
00503
00505 #define ROXL(ROR_A,ROR_D,ROR_MSK,ROR_SZ) \
00506 {\
00507 u32 ROR_X = (reg68.sr>>SR_X_BIT)&1;\
00508 reg68.sr &= 0xFF00;\
00509 if((ROR_D)!=0) \
00510 {\
00511 ADDCYCLE(2*(ROR_D));\
00512 ROR_D &= (ROR_SZ)-1;\
00513 if((ROR_A)&(1<<(32-(ROR_D)))) reg68.sr |= SR_C | SR_X ;\
00514 (ROR_A) &= (ROR_MSK);\
00515 (ROR_A) = ((ROR_A)<<(ROR_D)) + ((ROR_A)>>((ROR_SZ)-(ROR_D)+1));\
00516 (ROR_A) |= (ROR_X)<<((ROR_D)-1+(32-(ROR_SZ)));\
00517 }\
00518 (ROR_A) &= (ROR_MSK);\
00519 reg68.sr |= (((ROR_A)==0)<<SR_Z_BIT) | (((s32)(ROR_A)<0)<<SR_N_BIT);\
00520 }
00521
00522
00523 #define ROXRB(ROR_A,ROR_B) ROXR(ROR_A,ROR_B,0xFF000000,8)
00524 #define ROXRW(ROR_A,ROR_B) ROXR(ROR_A,ROR_B,0xFFFF0000,16)
00525 #define ROXRL(ROR_A,ROR_B) ROXR(ROR_A,ROR_B,0xFFFFFFFF,32)
00526 #define ROXLB(ROR_A,ROR_B) ROXL(ROR_A,ROR_B,0xFF000000,8)
00527 #define ROXLW(ROR_A,ROR_B) ROXL(ROR_A,ROR_B,0xFFFF0000,16)
00528 #define ROXLL(ROR_A,ROR_B) ROXL(ROR_A,ROR_B,0xFFFFFFFF,32)
00529
00536 #ifdef __cplusplus
00537 }
00538 #endif
00539
00540 #endif