;
; Some practical assembler utility routines. See sutils.h for corresponding C prototypes.
; These are mostly called from within video.c as most of them are kinda copy
; routines which handle cropping and adding together different-sized channels.
;
; Michael Rausch  14-4-94  1:14:30
;

	SECTION	text,CODE

ER	EQU	-1

OK	EQU	1
UNDERFLOW EQU	-2

	XREF	@correct_underflow
	XREF	_bitBuffer
	XREF	_bufLength
	XREF	_bitBuffer
	XREF	_bitOffset
;	XREF	_bitCount	; ANALYSIS
;	XREF	_mbCoeffPtr	; ANALYSIS

	XREF	_dct_coeff_first
	XREF	_dct_coeff_next
	XREF	_dct_coeff_tbl_0
	XREF	_dct_coeff_tbl_1
	XREF	_dct_coeff_tbl_2
	XREF	_dct_coeff_tbl_3


	XREF	_clamp


; ***************************************************************


	XDEF	@next_start_code
@next_start_code:
	moveq	#120,d1			; was 2
	cmp.l	_bufLength(a4),d1
	ble.s	nsc_nc
	bsr	@correct_underflow
nsc_nc:
	move.l	d2,a1

	move.l	_bitOffset(a4),d1
	move.l	_bitBuffer(a4),a0
	move.l	_bufLength(a4),d2

	moveq	#7,d0
	and.l	d1,d0
	beq.s	nsc_is_byte_border
	addq.l	#8,d1
	sub.l	d0,d1
	moveq	#32,d0
	cmp.l	d0,d1
	blt.s	nsc_is_byte_border
	sub.l	d0,d1
	addq.l	#4,a0
	subq.l	#1,d2
nsc_is_byte_border:

	lsr.l	#3,d1
	add.l	d1,a0			; a0 is on a longword bound, so align a0 to the byte boundary

	lsl.l	#2,d2
	add.l	a0,d2			; end of the buffer

	move.w	(a0)+,d0		; prefetch 16 bits

	moveq	#1,d1
nsc_loop:
	cmp.l	d2,a0
	bge.s	nsc_underflow
	ext.l	d0			; this one is critical!
	lsl.l	#8,d0
	move.b	(a0)+,d0
	tst.l	d0
	beq.s	nsc_found
	cmp.l	d1,d0
	bne.s	nsc_loop
nsc_found:

	subq.l	#3,a0			; last 24 bits are the new startcode
	move.l	a0,d1
	and.l	#3,d1
	sub.l	d1,a0			; new bitBuffer
	lsl.l	#3,d1			; new bitOffset
	sub.l	a0,d2			; new bufLength
	lsr.l	#2,d2

	move.l	a0,_bitBuffer(a4)
	move.l	d1,_bitOffset(a4)
	move.l	d2,_bufLength(a4)
	move.l	a1,d2
	moveq	#OK,d0
	rts
nsc_underflow:
	move.l	a0,_bitBuffer(a4)	; i.e. exact end of buffer
	clr.l	_bitOffset(a4)		; no data available anymore
	clr.l	_bufLength(a4)
	move.l	a1,d2
	moveq	#UNDERFLOW,d0
	rts

; ***************************************************************


	XDEF	@s_DecodeCBP
@s_DecodeCBP:
	moveq	#9,d0
	lea	_coded_block_pattern,a0
	bra.s	@s_get_byte_huff

	XDEF	@sn_DecodeCBP
@sn_DecodeCBP:
	moveq	#9,d0
	lea	_coded_block_pattern,a0
	bra.s	@sn_get_byte_huff


gsh_x:	move.l	d2,-(sp)
	move.l	a0,-(sp)
	move.l	d0,d2
	bsr	@correct_underflow
	move.l	d2,d0
	move.l	(sp)+,a0
	bra.s	sngsh

	XDEF	@s_get_byte_huff
@s_get_byte_huff:
	moveq	#2,d1
	cmp.l	_bufLength(a4),d1
	bgt.s	gsh_x

	XDEF	@sn_get_byte_huff
@sn_get_byte_huff:
	move.l	d2,-(sp)
sngsh:

	move.l	_bitOffset(a4),d1
	move.l	_bitBuffer(a4),a1
	bfextu	(a1){d1:d0},d2

	move.w	(a0,d2.w*2),d0

	moveq	#0,d2
	move.b	d0,d2

;	add.l	d2,_bitCount(a4)	; ANALYSIS

	add.l	d1,d2
	moveq	#32,d1
	cmp.l	d1,d2
	blt.s	sgdhw
	sub.l	d1,d2
	addq.l	#4,_bitBuffer(a4)
	subq.l	#1,_bufLength(a4)
sgdhw:
	move.l	d2,_bitOffset(a4)

	lsr.w	#8,d0
	extb.l	d0

	move.l	(sp)+,d2
	rts

; ***************************************************************

	XDEF	@s_DecodeDCTDCSizeLum
@s_DecodeDCTDCSizeLum:
	moveq	#7,d1
	lea	_dct_dc_size_luminance,a1
	bra.s	sdlc

	XDEF	@s_DecodeDCTDCSizeChrom
@s_DecodeDCTDCSizeChrom:
	moveq	#8,d1
	lea	_dct_dc_size_chrominance,a1
sdlc:	move.l	d2,-(sp)
	move.l	d3,-(sp)

	move.l	_bitOffset(a4),d0
	move.l	_bitBuffer(a4),a0
	moveq	#16,d3
	bfextu	(a0){d0:d3},d2

	move.l	d2,d0
	sub.l	d1,d3
	lsr.l	d3,d0
	move.w	(a1,d0.w*2),d0		; macroval

	move.b	d0,d3			; flushed.l
	lsl.w	d3,d2
	lsr.w	d3,d2			; mask off the header

	lsr.w	#8,d0			; size
	tst.w	d0			; necessary ?
	beq.s	cdlflush

	add.w	d0,d3
	moveq	#16,d1
	sub.l	d3,d1
	lsr.l	d1,d2			; trash unused lower bits

	subq.l	#1,d0			; MPEG has some really strange sign extension, just like JPEG! Weirdo!
	btst	d0,d2
	bne	cdlsign
	moveq	#-2,d1
	lsl.l	d0,d1
	or.l	d1,d2
	addq.l	#1,d2
cdlsign: move.l	d2,d0			; the desired result
cdlflush:

;	add.l	d3,_bitCount(a4)	; ANALYSIS

	add.l	_bitOffset(a4),d3
	moveq	#32,d1
	cmp.l	d1,d3
	blt.s	cdlXw
	sub.l	d1,d3
	addq.l	#4,_bitBuffer(a4)
	subq.l	#1,_bufLength(a4)
cdlXw:
	move.l	d3,_bitOffset(a4)
	move.l	(sp)+,d3
	move.l	(sp)+,d2
	rts


; ***************************************************************

sgb1:	bsr	@correct_underflow
	bra.s	@sn_get_bits1

	XDEF	@s_get_bits1
@s_get_bits1:
	moveq	#2,d1
	cmp.l	_bufLength(a4),d1
	bgt.s	sgb1

	XDEF	@sn_get_bits1
@sn_get_bits1:
;	addq.l	#1,_bitCount(a4)	; ANALYSIS
	move.l	_bitBuffer(a4),a0
	move.l	_bitOffset(a4),d1
	moveq	#1,d0			; hmm ?!
	bfextu	(a0){d1:d0},d0
	addq.l	#1,d1
	btst	#5,d1
	beq.s	gb1r
	addq.l	#4,a0
	moveq	#0,d1
	move.l	a0,_bitBuffer(a4)
	subq.l	#1,_bufLength(a4)
gb1r:	move.l	d1,_bitOffset(A4)
	rts



sgbX:	move.l	d0,-(sp)
	bsr	@correct_underflow
	move.l	(sp)+,d0
	bra.s	@sn_get_bitsX

	XDEF	@s_get_bitsX
@s_get_bitsX:
	moveq	#2,d1
	cmp.l	_bufLength(a4),d1
	bgt.s	sgbX

	XDEF	@sn_get_bitsX
@sn_get_bitsX:
;	add.l	d0,_bitCount(a4)	; ANALYSIS
	move.l	_bitOffset(a4),d1
	move.l	_bitBuffer(a4),a0
	move.l	d1,a1
	add.l	d0,a1
	bfextu	(a0){d1:d0},d0
	moveq	#32,d1
	cmp.l	d1,a1
	blt.s	gsbXw
	sub.l	d1,a1
	addq.l	#4,a0
	move.l	a0,_bitBuffer(a4)
	subq.l	#1,_bufLength(a4)
gsbXw:	move.l	a1,_bitOffset(a4)
	rts


; ***************************************************************

ssb32_ofoi:
	bsr	@correct_underflow		; ofoi OPT
	bra.s	@sn_show_bits32

	XDEF	@s_show_bits32
@s_show_bits32
	moveq	#2,d1
	cmp.l	_bufLength(a4),d1
	bgt.s	ssb32_ofoi

	XDEF	@sn_show_bits32
@sn_show_bits32:
	move.l	_bitBuffer(a4),a0
	move.l	_bitOffset(a4),d1
	moveq	#32,d0
	bfextu	(a0){d1:d0},d0
	rts


ssb_x:	move.l	d0,-(sp)
	bsr	@correct_underflow
	move.l	(sp)+,d0
	bra.s	@sn_show_bitsX

	XDEF	@s_show_bitsX
@s_show_bitsX
	moveq	#2,d1
	cmp.l	_bufLength(a4),d1
	bgt.s	ssb_x

	XDEF	@sn_show_bitsX
@sn_show_bitsX:
	move.l	_bitOffset(a4),d1
	move.l	_bitBuffer(a4),a0
	bfextu	(a0){d1:d0},d0
	rts



; ***************************************************************


sf32_ofoi:
	bsr	@correct_underflow	; moved away
	bra.s	@sn_flush_bits32

	XDEF @s_flush_bits32
@s_flush_bits32:
	moveq	#2,d1
	cmp.l	_bufLength(a4),d1
	bgt.s	sf32_ofoi

	XDEF @sn_flush_bits32
@sn_flush_bits32:
;	add.l	#32,_bitCount(a4)	; ANALYSIS
	addq.l	#4,_bitBuffer(a4)
	subq.l	#1,_bufLength(a4)
	rts


sf_outtafoi:
	move.l	d0,-(sp)
	bsr	@correct_underflow	; moved away, out of flow of instructions
	move.l	(sp)+,d0
	bra.s	@sn_flush_bits

	XDEF @s_flush_bits
@s_flush_bits:
	moveq	#2,d1
	cmp.l	_bufLength(a4),d1
	bgt.s	sf_outtafoi

	XDEF @sn_flush_bits
@sn_flush_bits:
;	add.l	d0,_bitCount(a4)	; ANALYSIS
	add.l	_bitOffset(a4),d0
	moveq	#32,d1
	cmp.l	d1,d0
	blt.s	sbf32
	sub.l	d1,d0
	addq.l	#4,_bitBuffer(a4)
	subq.l	#1,_bufLength(a4)
sbf32:	move.l 	d0,_bitOffset(a4)
	rts

locallevel:	ds.l	1

; ***************************************************************

bgsh_x:	move.l	a0,-(sp)
	move.l	d0,-(sp)
	move.l	a1,-(sp)
	bsr	@correct_underflow
	move.l	(sp)+,a1
	move.l	(sp)+,d0
	bra.s	bsngsh

	XDEF	@s_get_bits_huff
@s_get_bits_huff:
	moveq	#2,d1
	cmp.l	_bufLength(a4),d1
	bgt.s	bgsh_x

	XDEF	@sn_get_bits_huff
@sn_get_bits_huff:
	move.l	a0,-(sp)
bsngsh:

	move.l	_bitOffset(a4),d1
	move.l	_bitBuffer(a4),a0
	bfextu	(a0){d1:d0},d2

	move.l	(sp)+,a0

	move.w	(a1,d2.w*2),d0
	bmi.s	gbh_error

	moveq	#0,d2
	move.b	d0,d2

;	add.l	d2,_bitCount(a4)	; ANALYSIS

	add.l	d1,d2
	moveq	#32,d1
	cmp.l	d1,d2
	blt.s	bsgdhw
	sub.l	d1,d2
	addq.l	#4,_bitBuffer(a4)
	subq.l	#1,_bufLength(a4)
bsgdhw:
	move.l	d2,_bitOffset(a4)

	lsr.w	#8,d0
	moveq	#0,d1
	lsr.w	#1,d0
	addx.w	d1,d1
	move.l	d1,(a0)			; save intra
	rts

gbh_error:
	moveq	#1,d0
	move.l	d0,(a0)
	rts



; ***************************************************************

;void PMB2_reconstruct(char *dest, char *dest1, char *source, char *source1, int row_incr);

	XDEF	_PMB2_reconstruct
_PMB2_reconstruct:
	subq.l	#2,d0			; (int*)   correct row_size
	lsl.l	#2,d0
	moveq	#7,d1
pmb2_all_rows:
	move.l	(a2)+,(a0)+
	move.l	(a2)+,(a0)+
	move.l	(a3)+,(a1)+
	move.l	(a3)+,(a1)+
	add.l	d0,a0
	add.l	d0,a1
	add.l	d0,a2
	add.l	d0,a3
	dbra	d1,pmb2_all_rows
	rts

; ***************************************************************

;void PMB1_reconstruct(char *dest, char *source, int row_incr);

	XDEF	_PMB1_reconstruct
_PMB1_reconstruct:
	subq.l	#4,d0			; correct row_size
	lsl.l	#2,d0
	moveq	#15,d1
pmb1_all_rows:
	move.l	(a1)+,(a0)+
	move.l	(a1)+,(a0)+
	move.l	(a1)+,(a0)+
	move.l	(a1)+,(a0)+
	add.l	d0,a0
	add.l	d0,a1
	dbra	d1,pmb1_all_rows
	rts

; ***************************************************************

; void PSB4_reconstruct(char *dest, char *dest1, char *source1, char *source2, char *source1a, char *source2a, int row_size);
;

	XDEF	_PSB4_reconstruct
_PSB4_reconstruct:
psb4reg REG	d2-d4
	movem.l	psb4reg,-(sp)

	subq.l	#8,d0			; correct row_size

	move.l	#$fefefefe,d3

	moveq	#7,d4
psb4_all_rows:

	REPT	2
	move.l	(a2)+,d1
	move.l	(a3)+,d2
	and.l	d3,d1
	and.l	d3,d2
	add.l	d1,d2
	roxr.l	#1,d2			; tricky! get bit no 32 from the previous addition
	move.l	d2,(a0)+		; this one kicks ass !!!
	ENDR

	REPT	2
	move.l	(a5)+,d1
	move.l	(a6)+,d2
	and.l	d3,d1
	and.l	d3,d2
	add.l	d1,d2
	roxr.l	#1,d2
	move.l	d2,(a1)+
	ENDR

	add.l	d0,a0
	add.l	d0,a1
	dbra	d4,psb4_all_rows

	movem.l	(sp)+,psb4reg
	rts


; ***************************************************************

; void PSB3_reconstruct(char *dest, char *source1, char *source2, int row_size);
;

	XDEF	_PSB3_reconstruct
_PSB3_reconstruct:
psb3reg REG	d2-d4
	movem.l	psb3reg,-(sp)

	sub.l	#16,d0			; correct row_size

	move.l	#$fefefefe,d3

	moveq	#15,d4
psb3_all_rows:

	REPT	4
	move.l	(a1)+,d1
	move.l	(a2)+,d2
	and.l	d3,d1
	and.l	d3,d2
	add.l	d1,d2
	roxr.l	#1,d2			; tricky! get bit no 32 from the previous addition
	move.l	d2,(a0)+		; this one kicks ass !!!
	ENDR

	add.l	d0,a0
	dbra	d4,psb3_all_rows

	movem.l	(sp)+,psb3reg
	rts

; ***************************************************************

;void PSB2_reconstruct(char *dest, char *dest1, char *source, char *source1, int row_incr);

	XDEF	_PSB2_reconstruct
_PSB2_reconstruct:
	subq.l	#2,d0			; (int*)   correct row_size
	lsl.l	#2,d0
	moveq	#7,d1
psb2_all_rows:
	move.l	(a2)+,(a0)+
	move.l	(a2)+,(a0)+
	move.l	(a3)+,(a1)+
	move.l	(a3)+,(a1)+
	add.l	d0,a0
	add.l	d0,a1
	dbra	d1,psb2_all_rows
	rts


; ***************************************************************

;void PSB1_reconstruct(char *dest, char *source, int row_incr);

	XDEF	_PSB1_reconstruct
_PSB1_reconstruct:
	subq.l	#4,d0			; (int*) correct row_size
	lsl.l	#2,d0
	moveq	#15,d1
psb1_all_rows:
	move.l	(a1)+,(a0)+
	move.l	(a1)+,(a0)+
	move.l	(a1)+,(a0)+
	move.l	(a1)+,(a0)+
	add.l	d0,a0
	dbra	d1,psb1_all_rows
	rts

; ***************************************************************

; void RSB4_reconstruct(char *dest, char *source1, char *source2, int row_size);
;

	XDEF	_RSB4_reconstruct
_RSB4_reconstruct:
rsb4reg REG	d2-d4
	movem.l	rsb4reg,-(sp)

	subq.l	#8,d0			; correct row_size

	move.l	#$fefefefe,d3

	moveq	#7,d4
rsb4_all_rows:

	REPT	2
	move.l	(a1)+,d1
	move.l	(a2)+,d2
	and.l	d3,d1
	and.l	d3,d2
	add.l	d1,d2
	roxr.l	#1,d2			; tricky! get bit no 32 from the previous addition
	move.l	d2,(a0)+		; this one kicks ass !!!
	ENDR

	add.l	d0,a1
	add.l	d0,a2
	dbra	d4,rsb4_all_rows

	movem.l	(sp)+,rsb4reg
	rts


; ***************************************************************

;void RSB3_reconstruct(char *dest, char *source, int row_size);

	XDEF	_RSB3_reconstruct
_RSB3_reconstruct:
	subq.l	#8,d0			; correct row_size
	moveq	#3,d1
rsb3_all_rows:
	rept	2
	move.l	(a1)+,(a0)+
	move.l	(a1)+,(a0)+
	add.l	d0,a1
	endr
	dbra	d1,rsb3_all_rows
	rts

; ***************************************************************

; void RSB2_reconstruct(char *dest, char *source1, char *source2, int row_size);
;

	XDEF	_RSB2_reconstruct
_RSB2_reconstruct:
rsb2reg REG	d2-d4
	movem.l	rsb2reg,-(sp)

	sub.l	#16,d0			; correct row_size

	move.l	#$fefefefe,d3

	moveq	#15,d4
rsb2_all_rows:

	REPT	4
	move.l	(a1)+,d1
	move.l	(a2)+,d2
	and.l	d3,d1
	and.l	d3,d2
	add.l	d1,d2
	roxr.l	#1,d2			; tricky! get bit no 32 from the previous addition
	move.l	d2,(a0)+		; this one kicks ass !!!
	ENDR

	add.l	d0,a1
	add.l	d0,a2
	dbra	d4,rsb2_all_rows

	movem.l	(sp)+,rsb2reg
	rts


; ***************************************************************

;void RSB1_reconstruct(char *dest, char *source, int row_size);

	XDEF	_RSB1_reconstruct
_RSB1_reconstruct:
	sub.l	#16,d0			; correct row_size
	moveq	#15,d1
rsb1_all_rows:
	move.l	(a1)+,(a0)+
	move.l	(a1)+,(a0)+
	move.l	(a1)+,(a0)+
	move.l	(a1)+,(a0)+
	add.l	d0,a1
	dbra	d1,rsb1_all_rows
	rts

; ***************************************************************
	ifeq	1

;void BM_reconstruct(char *index, char *rindex1, int row_size);

	XDEF	_BM_reconstruct
_BM_reconstruct:
	subq.l	#8,d0			; correct row_size
	moveq	#3,d1
bm_all_rows:
	rept	2
	move.l	(a1)+,(a0)+
	move.l	(a1)+,(a0)+
	add.l	d0,a0
	add.l	d0,a1
	endr
	dbra	d1,bm_all_rows
	rts


; ***************************************************************

;void BMcm_reconstruct(char *index, char *rindex1, short *blockvals, int row_size);


	XDEF	_BMcm_reconstruct
_BMcm_reconstruct:
bmcmreg REG	d2-d5/a4
	movem.l	bmcmreg,-(sp)

	lea	_clamp,a4

	subq.l	#8,d0			; correct row_size

	moveq	#7,d5
bmcm_all_rows:


	REPT	2
	move.l	(a1)+,d3

	moveq	#0,d1
	move.b	d3,d1

	lsr.l	#8,d3
	moveq	#0,d4
	move.b	d3,d4

	lsr.w	#8,d3		; *grin evilly*  eadiz, peecee  *strut*
	add.l	(a2)+,d3
	move.w	d3,d2
	swap	d3

	move.w	(a4,d3.w*2),d3
	move.b	(a4,d2.w*2),d3
	swap	d3

	add.w	(a2)+,d4
	add.w	(a2)+,d1
	move.w	(a4,d4.w*2),d3
	move.b	(a4,d1.w*2),d3

	move.l	d3,(a0)+
	ENDR

	add.l	d0,a0
	add.l	d0,a1
	dbra	d5,bmcm_all_rows

	movem.l	(sp)+,bmcmreg
	rts


; ***************************************************************

;void BIMcm_reconstruct(char *index, char *rindex1, char *bindex1, short *blockvals, int row_size);


	XDEF	_BIMcm_reconstruct
_BIMcm_reconstruct:
bimcmreg REG	d2-d6/a4
	movem.l	bimcmreg,-(sp)

	lea	_clamp,a4

	subq.l	#8,d0			; correct row_size

	move.l	#$fefefefe,d4

	moveq	#7,d5
bimcm_all_rows:

	REPT	2

	move.l	(a1)+,d1
	move.l	(a2)+,d3
	and.l	d4,d1
	and.l	d4,d3
	add.l	d1,d3
	roxr.l	#1,d3		; tricky! get bit #32 from the previous addition

	moveq	#0,d1
	move.b	d3,d1

	lsr.l	#8,d3
	moveq	#0,d6
	move.b	d3,d6

	lsr.w	#8,d3		; *grin evilly*  eadiz, peecee  *strut*
	add.l	(a3)+,d3
	move.w	d3,d2
	swap	d3

	move.w	(a4,d3.w*2),d3
	move.b	(a4,d2.w*2),d3
	swap	d3


	add.w	(a3)+,d6
	add.w	(a3)+,d1
	move.w	(a4,d6.w*2),d3
	move.b	(a4,d1.w*2),d3

	move.l	d3,(a0)+
	ENDR

	add.l	d0,a0
	add.l	d0,a1
	add.l	d0,a2
	dbra	d5,bimcm_all_rows

	movem.l	(sp)+,bimcmreg
	rts


; ***************************************************************

;void BIM_reconstruct(char *index, char *rindex1, char *bindex1, int row_size);
;

	XDEF	_BIM_reconstruct
_BIM_reconstruct:
bimreg REG	d2-d4
	movem.l	bimreg,-(sp)

	subq.l	#8,d0			; correct row_size

	move.l	#$fefefefe,d3

	moveq	#7,d4
bim_all_rows:

	REPT	2
	move.l	(a1)+,d1
	move.l	(a2)+,d2
	and.l	d3,d1
	and.l	d3,d2
	add.l	d1,d2
	roxr.l	#1,d2			; tricky! get bit no 32 from the previous addition
	move.l	d2,(a0)+		; this one kicks ass !!!
	ENDR

	add.l	d0,a0
	add.l	d0,a1
	add.l	d0,a2
	dbra	d4,bim_all_rows

	movem.l	(sp)+,bimreg
	rts


; ***************************************************************

	XDEF	_IM_reconstruct
_IM_reconstruct:
cdctreg REG	d2-d3/a2
	movem.l	cdctreg,-(sp)

	lea	_clamp,a2

	subq.l	#8,d0			; correct row_size

	moveq	#7,d2
cdct_all_rows:

	REPT	2
	move.w	(a1)+,d1
	move.w	(a2,d1.w*2),d3
	move.w	(a1)+,d1
	move.b	(a2,d1.w*2),d3
	swap	d3
	move.w	(a1)+,d1
	move.w	(a2,d1.w*2),d3
	move.w	(a1)+,d1
	move.b	(a2,d1.w*2),d3
	move.l	d3,(a0)+
	ENDR

	add.l	d0,a0
	dbra	d2,cdct_all_rows

	movem.l	(sp)+,cdctreg
	rts

	endc
; ***************************************************************

	XDEF	@clear64words
@clear64words:
	moveq	#7,d0
c64i:	clr.l	(a0)+
	clr.l	(a0)+
	clr.l	(a0)+
	clr.l	(a0)+
	dbra	d0,c64i
	rts

; ***************************************************************

	SECTION	__MERGED,DATA


;
; Decoding table for coded_block_pattern
;

;	XDEF	_coded_block_pattern

_coded_block_pattern:
	dc.b	ER,0,ER,0,39,9,27,9,59,9,55,9,47,9,31,9
	dc.b	58,8,58,8,54,8,54,8,46,8,46,8,30,8,30,8
	dc.b	57,8,57,8,53,8,53,8,45,8,45,8,29,8,29,8
	dc.b	38,8,38,8,26,8,26,8,37,8,37,8,25,8,25,8
	dc.b	43,8,43,8,23,8,23,8,51,8,51,8,15,8,15,8
	dc.b	42,8,42,8,22,8,22,8,50,8,50,8,14,8,14,8
	dc.b	41,8,41,8,21,8,21,8,49,8,49,8,13,8,13,8
	dc.b	35,8,35,8,19,8,19,8,11,8,11,8,7,8,7,8
	dc.b	34,7,34,7,34,7,34,7,18,7,18,7,18,7,18,7
	dc.b	10,7,10,7,10,7,10,7,6,7,6,7,6,7,6,7
	dc.b	33,7,33,7,33,7,33,7,17,7,17,7,17,7,17,7
	dc.b	9,7,9,7,9,7,9,7,5,7,5,7,5,7,5,7
	dc.b	63,6,63,6,63,6,63,6,63,6,63,6,63,6,63,6
	dc.b	3,6,3,6,3,6,3,6,3,6,3,6,3,6,3,6
	dc.b	36,6,36,6,36,6,36,6,36,6,36,6,36,6,36,6
	dc.b	24,6,24,6,24,6,24,6,24,6,24,6,24,6,24,6
	dc.b	62,5,62,5,62,5,62,5,62,5,62,5,62,5,62,5
	dc.b	62,5,62,5,62,5,62,5,62,5,62,5,62,5,62,5
	dc.b	2,5,2,5,2,5,2,5,2,5,2,5,2,5,2,5
	dc.b	2,5,2,5,2,5,2,5,2,5,2,5,2,5,2,5
	dc.b	61,5,61,5,61,5,61,5,61,5,61,5,61,5,61,5
	dc.b	61,5,61,5,61,5,61,5,61,5,61,5,61,5,61,5
	dc.b	1,5,1,5,1,5,1,5,1,5,1,5,1,5,1,5
	dc.b	1,5,1,5,1,5,1,5,1,5,1,5,1,5,1,5
	dc.b	56,5,56,5,56,5,56,5,56,5,56,5,56,5,56,5
	dc.b	56,5,56,5,56,5,56,5,56,5,56,5,56,5,56,5
	dc.b	52,5,52,5,52,5,52,5,52,5,52,5,52,5,52,5
	dc.b	52,5,52,5,52,5,52,5,52,5,52,5,52,5,52,5
	dc.b	44,5,44,5,44,5,44,5,44,5,44,5,44,5,44,5
	dc.b	44,5,44,5,44,5,44,5,44,5,44,5,44,5,44,5
	dc.b	28,5,28,5,28,5,28,5,28,5,28,5,28,5,28,5
	dc.b	28,5,28,5,28,5,28,5,28,5,28,5,28,5,28,5
	dc.b	40,5,40,5,40,5,40,5,40,5,40,5,40,5,40,5
	dc.b	40,5,40,5,40,5,40,5,40,5,40,5,40,5,40,5
	dc.b	20,5,20,5,20,5,20,5,20,5,20,5,20,5,20,5
	dc.b	20,5,20,5,20,5,20,5,20,5,20,5,20,5,20,5
	dc.b	48,5,48,5,48,5,48,5,48,5,48,5,48,5,48,5
	dc.b	48,5,48,5,48,5,48,5,48,5,48,5,48,5,48,5
	dc.b	12,5,12,5,12,5,12,5,12,5,12,5,12,5,12,5
	dc.b	12,5,12,5,12,5,12,5,12,5,12,5,12,5,12,5
	dc.b	32,4,32,4,32,4,32,4,32,4,32,4,32,4,32,4
	dc.b	32,4,32,4,32,4,32,4,32,4,32,4,32,4,32,4
	dc.b	32,4,32,4,32,4,32,4,32,4,32,4,32,4,32,4
	dc.b	32,4,32,4,32,4,32,4,32,4,32,4,32,4,32,4
	dc.b	16,4,16,4,16,4,16,4,16,4,16,4,16,4,16,4
	dc.b	16,4,16,4,16,4,16,4,16,4,16,4,16,4,16,4
	dc.b	16,4,16,4,16,4,16,4,16,4,16,4,16,4,16,4
	dc.b	16,4,16,4,16,4,16,4,16,4,16,4,16,4,16,4
	dc.b	8,4,8,4,8,4,8,4,8,4,8,4,8,4,8,4
	dc.b	8,4,8,4,8,4,8,4,8,4,8,4,8,4,8,4
	dc.b	8,4,8,4,8,4,8,4,8,4,8,4,8,4,8,4
	dc.b	8,4,8,4,8,4,8,4,8,4,8,4,8,4,8,4
	dc.b	4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4
	dc.b	4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4
	dc.b	4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4
	dc.b	4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4
	dc.b	60,3,60,3,60,3,60,3,60,3,60,3,60,3,60,3
	dc.b	60,3,60,3,60,3,60,3,60,3,60,3,60,3,60,3
	dc.b	60,3,60,3,60,3,60,3,60,3,60,3,60,3,60,3
	dc.b	60,3,60,3,60,3,60,3,60,3,60,3,60,3,60,3
	dc.b	60,3,60,3,60,3,60,3,60,3,60,3,60,3,60,3
	dc.b	60,3,60,3,60,3,60,3,60,3,60,3,60,3,60,3
	dc.b	60,3,60,3,60,3,60,3,60,3,60,3,60,3,60,3
	dc.b	60,3,60,3,60,3,60,3,60,3,60,3,60,3,60,3


;	XDEF	_dct_dc_size_luminance
;	XDEF	_dct_dc_size_chrominance

_dct_dc_size_luminance:
	dc.b	1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2
	dc.b	1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2
	dc.b	1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2
	dc.b	1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2
	dc.b	2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2
	dc.b	2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2
	dc.b	2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2
	dc.b	2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2
	dc.b	0,3,0,3,0,3,0,3,0,3,0,3,0,3,0,3
	dc.b	0,3,0,3,0,3,0,3,0,3,0,3,0,3,0,3
	dc.b	3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3
	dc.b	3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3
	dc.b	4,3,4,3,4,3,4,3,4,3,4,3,4,3,4,3
	dc.b	4,3,4,3,4,3,4,3,4,3,4,3,4,3,4,3
	dc.b	5,4,5,4,5,4,5,4,5,4,5,4,5,4,5,4
	dc.b	6,5,6,5,6,5,6,5,7,6,7,6,8,7,ER,0


_dct_dc_size_chrominance:
	dc.b	0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2
	dc.b	0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2
	dc.b	0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2
	dc.b	0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2
	dc.b	0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2
	dc.b	0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2
	dc.b	0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2
	dc.b	0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2
	dc.b	1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2
	dc.b	1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2
	dc.b	1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2
	dc.b	1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2
	dc.b	1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2
	dc.b	1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2
	dc.b	1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2
	dc.b	1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2
	dc.b	2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2
	dc.b	2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2
	dc.b	2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2
	dc.b	2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2
	dc.b	2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2
	dc.b	2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2
	dc.b	2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2
	dc.b	2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2
	dc.b	3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3
	dc.b	3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3
	dc.b	3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3
	dc.b	3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3
	dc.b	4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4
	dc.b	4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4
	dc.b	5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5
	dc.b	6,6,6,6,6,6,6,6,7,7,7,7,8,8,ER,0


	END
