The C code below converts an unsigned 16-bit binary number into its decimal equivalent, in packed BCD format. Does this algorithm extend to other sizes of binary numbers?
void adjust (unsigned char *p)
{
unsigned char t = *p + 3;
if (t & 0x08) *p = t;
t = *p + 0x30;
if (t & 0x80) *p = t;
}
unsigned long binary2bcd (unsigned int n)
{
unsigned char bcd[3] = {0, 0, 0};
int i;
for (i=0; i<16; ++i) {
adjust (&bcd[0]);
adjust (&bcd[1]);
adjust (&bcd[2]);
bcd[2] <<= 1;
if (bcd[1] & 0x80) ++bcd[2];
bcd[1] <<= 1;
if (bcd[0] & 0x80) ++bcd[1];
bcd[0] <<= 1;
if (n & 0x8000) ++bcd[0];
n <<= 1;
}
return (unsigned long) (bcd[2]<<16) | (bcd[1]<<8) | bcd[0];
}
— ADVERTISMENT—
—Advertise Here—