//#define __AVR_ENHANCED__ 0 #include "fp32def.h" #include "asmdef.h" #define HI40_PIO2 0x3FC90FDA /* high 4 bytes Pi/2 */ #define LO40_PIO2 0xA2 FUNCTION __fp_sinus ENTRY __fp_sinus push ZL sbrs ZL, 0 rjmp 1f ldi rBE, LO40_PIO2 ldi rB0, lo8(HI40_PIO2) ldi rB1, hi8(HI40_PIO2) ldi rB2, hlo8(HI40_PIO2) ldi rB3, hhi8(HI40_PIO2 | 0x80000000) rcall _U(__addsf3x) ;Subtract PI/2 1: pop r0 inc r0 sbrc r0, 1 subi rA3, 0x80 rcall _U(__fp_splitA) brcs .LP0 sbrs rA2,7 ;Skip if normal .LP0: rjmp _U(__fp_mpack) ;Return NAN or subnormal cpi rA3,0x73 brcs 2f ;SIN(X)=X for X<2^-12 ldi ZL, lo8(.L_table) ldi ZH, hi8(.L_table) rcall _U(__fp_powsodd2) 2: cpi rAE,0x82 ;0x84 ;Better rounding properties than 0x80 sbci rA0,0xff sbci rA1,0xff sbci rA2,0xff sbci rA3,0xff lsl rA2 lsr rA3 ror rA2 bld rA3,7 ret #define RM0 R2 #define RM1 R3 #define RM2 R4 #define RM3 R5 #define SQX0 R6 #define SQX1 R7 #define SQX2 R8 #define SQX3 R9 #define WR0 R14 #define WR1 R15 #define WR2 R16 #define WR3 R17 ; At this point: Argument split !!! in rA3.rA2:rA1:rA0:rAE ; Abs(Arg)<=PI/2 __fp_powsodd2: 0: push RM0 push RM1 push RM2 push RM3 push SQX0 push SQX1 push SQX2 push SQX3 push WR0 push WR1 push WR2 push WR3 mov RM0,rAE mov RM1,rA0 mov RM2,rA1 mov RM3,rA2 mov rBE,rA3 subi rBE,0x7f breq 2f 1: lsr RM3 ror RM2 ror RM1 ror RM0 inc rBE brne 1b 2: X_movw rB0,RM0 X_movw rB2,RM2 rcall _mulab ;Get X^2 (RM*rB) X_movw SQX0,rB0 X_movw SQX2,rB2 ;Store in SQX mov RM0,rAE mov RM1,rA0 mov RM2,rA1 mov RM3,rA2 rjmp 0f ;We need rB sub rjmp 1f sub rAE,rB0 sbc rA0,rB1 sbc rA1,rB2 sbc rA2,rB3 rjmp 2f 1: add rAE,rB0 adc rA0,rB1 adc rA1,rB2 adc rA2,rB3 2: or WR3,rB2 brne .loop or WR3,rB1 brne .loop cpi rB0,0x80 brcc .loop sbrc rA2,7 rjmp 3f lsl rAE rol rA0 rol rA1 rol rA2 dec rA3 3: pop WR3 pop WR2 pop WR1 pop WR0 pop SQX3 pop SQX2 pop SQX1 pop SQX0 pop RM3 pop RM2 pop RM1 pop RM0 ret #if defined(__AVR_ENHANCED__) && __AVR_ENHANCED__ ; rB=RM*rB, upper 4 bytes result, completely ignore lower 4 bytes!! _mulab: clr WR1 mul RM3,rB0 mov WR0,r0 mov rB0,r1 mul RM1,rB2 add WR0,r0 adc rB0,r1 adc WR1,rBE mul RM2,rB1 add WR0,r0 adc rB0,r1 adc WR1,rBE mul RM3,rB3 movw WR2,r0 ;Upper bytes breq 2f ;if rB3==0 (happens a lot) mul RM0,rB3 add WR0,r0 adc rB0,R1 adc WR1,rBE mul RM2,rB3 add WR1,R0 adc WR2,R1 adc WR3,rBE mul RM1,rB3 add rB0,R0 adc WR1,R1 adc WR2,rBE adc WR3,rBE 2: mul RM3,rB1 add rB0,R0 adc WR1,R1 adc WR2,rBE adc WR3,rBE mul RM2,rB2 add rB0,R0 adc WR1,R1 adc WR2,rBE adc WR3,rBE mul RM3,rB2 add WR1,R0 adc WR2,R1 adc WR3,rBE mov rB1,WR1 movw rB2,WR2 ret #else // not __AVR_ENHANCED__ _mulab: clr r0 clr WR1 clr rBE ; *rB0 mov WR3,RM3 1: lsr WR3 ror WR1 sbrs rB0,7 rjmp 2f add rBE,WR1 adc r0,WR3 2: lsl rB0 brne 1b clr WR1 ; *rB1 * RM3.2 mov WR3,RM3 mov WR2,RM2 1: lsr WR3 ror WR2 ror rB0 sbrs rB1,7 rjmp 2f add rBE,rB0 adc r0,WR2 adc WR1,WR3 2: lsl rB1 brne 1b ; *rB2 * RM3.2.1 clr rB0 mov WR3,RM3 mov WR2,RM2 mov rB1,RM1 1: lsr WR3 ror WR2 ror rB1 ror r1 sbrs rB2,7 rjmp 2f add rBE,r1 adc r0,rB1 adc WR1,WR2 adc rB0,WR3 2: lsl rB2 brne 1b mov rB2,RM0 ;Keep RM0 intact clr WR3 clr rB1 1: lsr RM3 ror RM2 ror RM1 ror rB2 ror rB1 sbrs rB3,7 rjmp 2f add rBE,rB1 adc r0,rB2 adc WR1,RM1 adc rB0,RM2 adc WR3,RM3 2: lsl rB3 brne 1b clr r1 mov rB2,rB0 mov rB3,WR3 mov rB0,r0 mov rB1,WR1 ret #endif // __AVR_ENHANCED__ PGM_SECTION .L_table: .byte 0xab,0xaa,0xaa,0xaa ; Mantissa 1/(2*3), odd, sub .byte 0x32,0x33,0x33,0x33 ; Mantissa 1/(4*5), even, add .byte 0x19,0x86,0x61,0x18 ; Mantissa 1/(6*7), odd, sub .byte 0x8e,0xe3,0x38,0x0e ; Mantissa 1/(8*9), even, add .byte 0x95,0x20,0x4f,0x09 ; Mantissa 1/(10*11), odd, sub .byte 0x06,0x69,0x90,0x06 ; Mantissa 1/(12*13), even, add .end ENDFUNC