1/* mc68020 __mpn_rshift -- Shift right a low-level natural-number integer. 2 3Copyright (C) 1996, 1998 Free Software Foundation, Inc. 4 5This file is part of the GNU MP Library. 6 7The GNU MP Library is free software; you can redistribute it and/or modify 8it under the terms of the GNU Lesser General Public License as published by 9the Free Software Foundation; either version 2.1 of the License, or (at your 10option) any later version. 11 12The GNU MP Library is distributed in the hope that it will be useful, but 13WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 14or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public 15License for more details. 16 17You should have received a copy of the GNU Lesser General Public License 18along with the GNU MP Library; see the file COPYING.LIB. If not, write to 19the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, 20MA 02111-1307, USA. */ 21 22/* 23 INPUT PARAMETERS 24 res_ptr (sp + 4) 25 s_ptr (sp + 8) 26 s_size (sp + 16) 27 cnt (sp + 12) 28*/ 29 30#include "sysdep.h" 31#include "asm-syntax.h" 32 33#define res_ptr a1 34#define s_ptr a0 35#define s_size d6 36#define cnt d4 37 38 TEXT 39ENTRY(__mpn_rshift) 40/* Save used registers on the stack. */ 41 moveml R(d2)-R(d6)/R(a2),MEM_PREDEC(sp) 42 43/* Copy the arguments to registers. */ 44 movel MEM_DISP(sp,28),R(res_ptr) 45 movel MEM_DISP(sp,32),R(s_ptr) 46 movel MEM_DISP(sp,36),R(s_size) 47 movel MEM_DISP(sp,40),R(cnt) 48 49 moveql #1,R(d5) 50 cmpl R(d5),R(cnt) 51 bne L(Lnormal) 52 cmpl R(res_ptr),R(s_ptr) 53 bls L(Lspecial) /* jump if res_ptr >= s_ptr */ 54#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)) 55 lea MEM_INDX1(res_ptr,s_size,l,4),R(a2) 56#else /* not mc68020 */ 57 movel R(s_size),R(d0) 58 asll #2,R(d0) 59 lea MEM_INDX(res_ptr,d0,l),R(a2) 60#endif 61 cmpl R(s_ptr),R(a2) 62 bls L(Lspecial) /* jump if s_ptr >= res_ptr + s_size */ 63 64L(Lnormal:) 65 moveql #32,R(d5) 66 subl R(cnt),R(d5) 67 movel MEM_POSTINC(s_ptr),R(d2) 68 movel R(d2),R(d0) 69 lsll R(d5),R(d0) /* compute carry limb */ 70 71 lsrl R(cnt),R(d2) 72 movel R(d2),R(d1) 73 subql #1,R(s_size) 74 beq L(Lend) 75 lsrl #1,R(s_size) 76 bcs L(L1) 77 subql #1,R(s_size) 78 79L(Loop:) 80 movel MEM_POSTINC(s_ptr),R(d2) 81 movel R(d2),R(d3) 82 lsll R(d5),R(d3) 83 orl R(d3),R(d1) 84 movel R(d1),MEM_POSTINC(res_ptr) 85 lsrl R(cnt),R(d2) 86L(L1:) 87 movel MEM_POSTINC(s_ptr),R(d1) 88 movel R(d1),R(d3) 89 lsll R(d5),R(d3) 90 orl R(d3),R(d2) 91 movel R(d2),MEM_POSTINC(res_ptr) 92 lsrl R(cnt),R(d1) 93 94 dbf R(s_size),L(Loop) 95 subl #0x10000,R(s_size) 96 bcc L(Loop) 97 98L(Lend:) 99 movel R(d1),MEM(res_ptr) /* store most significant limb */ 100 101/* Restore used registers from stack frame. */ 102 moveml MEM_POSTINC(sp),R(d2)-R(d6)/R(a2) 103 rts 104 105/* We loop from most significant end of the arrays, which is only 106 permissible if the source and destination don't overlap, since the 107 function is documented to work for overlapping source and destination. */ 108 109L(Lspecial:) 110#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)) 111 lea MEM_INDX1(s_ptr,s_size,l,4),R(s_ptr) 112 lea MEM_INDX1(res_ptr,s_size,l,4),R(res_ptr) 113#else /* not mc68000 */ 114 movel R(s_size),R(d0) 115 asll #2,R(d0) 116 addl R(s_size),R(s_ptr) 117 addl R(s_size),R(res_ptr) 118#endif 119 120 clrl R(d0) /* initialize carry */ 121 eorw #1,R(s_size) 122 lsrl #1,R(s_size) 123 bcc L(LL1) 124 subql #1,R(s_size) 125 126L(LLoop:) 127 movel MEM_PREDEC(s_ptr),R(d2) 128 roxrl #1,R(d2) 129 movel R(d2),MEM_PREDEC(res_ptr) 130L(LL1:) 131 movel MEM_PREDEC(s_ptr),R(d2) 132 roxrl #1,R(d2) 133 movel R(d2),MEM_PREDEC(res_ptr) 134 135 dbf R(s_size),L(LLoop) 136 roxrl #1,R(d0) /* save cy in msb */ 137 subl #0x10000,R(s_size) 138 bcs L(LLend) 139 addl R(d0),R(d0) /* restore cy */ 140 bra L(LLoop) 141 142L(LLend:) 143/* Restore used registers from stack frame. */ 144 moveml MEM_POSTINC(sp),R(d2)-R(d6)/R(a2) 145 rts 146END(__mpn_rshift) 147