|
| quickmove(char *dst, char *src, long nbytes):
| quickly copy "nbytes" bytes from src to dst. Assumes that both
| src and dst are word aligned.
|
	.text
	.globl	_quickmove
_quickmove:
	movel	sp@(4), a0		| get dst
	movel	sp@(8), a1		| get src
	movel	sp@(12), d0		| get nbytes
	lsrl	#8, d0			| 
	lsrl	#1, d0			| d0 = nbytes / 512
	subql	#1, d0			| prepare for dbra loop
	bmi	Leftover		| if < 0, skip
	moveml	d1-d7/a2-a6, sp@-	| save regs
L1:
	moveml	a1@+, d1-d7/a2-a6	| read 12*4 = 48 bytes
	moveml	d1-d7/a2-a6, a0@	|
	moveml	a1@+, d1-d7/a2-a6	| 2nd read
	moveml	d1-d7/a2-a6, a0@(48)	|
	moveml	a1@+, d1-d7/a2-a6	| 3rd read
	moveml	d1-d7/a2-a6, a0@(96)	|
	moveml	a1@+, d1-d7/a2-a6	| 4th read
	moveml	d1-d7/a2-a6, a0@(144)	|
	moveml	a1@+, d1-d7/a2-a6	| 5th
	moveml	d1-d7/a2-a6, a0@(192)	|
	moveml	a1@+, d1-d7/a2-a6	| 6th
	moveml	d1-d7/a2-a6, a0@(240)	|
	moveml	a1@+, d1-d7/a2-a6	| 7th
	moveml	d1-d7/a2-a6, a0@(288)	|
	moveml	a1@+, d1-d7/a2-a6	| 8th
	moveml	d1-d7/a2-a6, a0@(336)	|
	moveml	a1@+, d1-d7/a2-a6	| 9th
	moveml	d1-d7/a2-a6, a0@(384)	|
	moveml	a1@+, d1-d7/a2-a6	| 10th
	moveml	d1-d7/a2-a6, a0@(432)	| At this point, 480 bytes done
	moveml	a1@+, d1-d7/a2		| Only do 32 more bytes
	moveml	d1-d7/a2, a0@(480)	| for a total of 512

	lea	a0@(512), a0
	subql	#1, d0
	bge	L1

	moveml	sp@+, d1-d7/a2-a6	| pop registers

Leftover:				| do the remaining bytes
	movel	sp@(12), d1
	andl	#0x01ff, d1		| d1 = nbytes % 512
	subql	#1, d1			| prepare for dbra loop
	bmi	Ldone
L2:
	moveb	a1@+, a0@+
	dbra	d1, L2
Ldone:
	rts				| return
