Index: uspace/lib/softfloat/generic/add.c
===================================================================
--- uspace/lib/softfloat/generic/add.c	(revision 750636ac4a65360c44846b7681af6ae46854dd3a)
+++ uspace/lib/softfloat/generic/add.c	(revision bb285b42d51bf15d56c4d20ab034e93a463a8b7f)
@@ -1,4 +1,5 @@
 /*
  * Copyright (c) 2005 Josef Cejka
+ * Copyright (c) 2011 Petr Koupy
  * All rights reserved.
  *
@@ -30,5 +31,5 @@
  * @{
  */
-/** @file
+/** @file Addition functions.
  */
 
@@ -36,11 +37,17 @@
 #include <add.h>
 #include <comparison.h>
-
-/** Add two Float32 numbers with same signs
+#include <common.h>
+
+/**
+ * Add two single-precision floats with the same signs.
+ *
+ * @param a First input operand.
+ * @param b Second input operand.
+ * @return Result of addition.
  */
 float32 addFloat32(float32 a, float32 b)
 {
 	int expdiff;
-	uint32_t exp1, exp2,frac1, frac2;
+	uint32_t exp1, exp2, frac1, frac2;
 	
 	expdiff = a.parts.exp - b.parts.exp;
@@ -49,8 +56,8 @@
 			/* TODO: fix SigNaN */
 			if (isFloat32SigNaN(b)) {
-			};
-
-			return b;
-		};
+			}
+
+			return b;
+		}
 		
 		if (b.parts.exp == FLOAT32_MAX_EXPONENT) { 
@@ -67,7 +74,7 @@
 			/* TODO: fix SigNaN */
 			if (isFloat32SigNaN(a) || isFloat32SigNaN(b)) {
-			};
-			return (isFloat32NaN(a)?a:b);
-		};
+			}
+			return (isFloat32NaN(a) ? a : b);
+		}
 		
 		if (a.parts.exp == FLOAT32_MAX_EXPONENT) { 
@@ -79,5 +86,5 @@
 		frac2 = b.parts.fraction;
 		exp2 = b.parts.exp;
-	};
+	}
 	
 	if (exp1 == 0) {
@@ -87,8 +94,8 @@
 			/* result is not denormalized */
 			a.parts.exp = 1;
-		};
+		}
 		a.parts.fraction = frac1;
 		return a;
-	};
+	}
 	
 	frac1 |= FLOAT32_HIDDEN_BIT_MASK; /* add hidden bit */
@@ -100,5 +107,5 @@
 		/* add hidden bit to second operand */
 		frac2 |= FLOAT32_HIDDEN_BIT_MASK; 
-	};
+	}
 	
 	/* create some space for rounding */
@@ -118,5 +125,5 @@
 		++exp1;
 		frac1 >>= 1;
-	};
+	}
 	
 	/* rounding - if first bit after fraction is set then round up */
@@ -127,22 +134,26 @@
 		++exp1;
 		frac1 >>= 1;
-	};
-	
+	}
 	
 	if ((exp1 == FLOAT32_MAX_EXPONENT ) || (exp2 > exp1)) {
-			/* overflow - set infinity as result */
-			a.parts.exp = FLOAT32_MAX_EXPONENT;
-			a.parts.fraction = 0;
-			return a;
-			}
+		/* overflow - set infinity as result */
+		a.parts.exp = FLOAT32_MAX_EXPONENT;
+		a.parts.fraction = 0;
+		return a;
+	}
 	
 	a.parts.exp = exp1;
 	
 	/* Clear hidden bit and shift */
-	a.parts.fraction = ((frac1 >> 6) & (~FLOAT32_HIDDEN_BIT_MASK)) ; 
+	a.parts.fraction = ((frac1 >> 6) & (~FLOAT32_HIDDEN_BIT_MASK)); 
 	return a;
 }
 
-/** Add two Float64 numbers with same signs
+/**
+ * Add two double-precision floats with the same signs.
+ *
+ * @param a First input operand.
+ * @param b Second input operand.
+ * @return Result of addition.
  */
 float64 addFloat64(float64 a, float64 b)
@@ -152,16 +163,16 @@
 	uint64_t frac1, frac2;
 	
-	expdiff = ((int )a.parts.exp) - b.parts.exp;
+	expdiff = ((int) a.parts.exp) - b.parts.exp;
 	if (expdiff < 0) {
 		if (isFloat64NaN(b)) {
 			/* TODO: fix SigNaN */
 			if (isFloat64SigNaN(b)) {
-			};
-
-			return b;
-		};
+			}
+
+			return b;
+		}
 		
 		/* b is infinity and a not */	
-		if (b.parts.exp == FLOAT64_MAX_EXPONENT ) { 
+		if (b.parts.exp == FLOAT64_MAX_EXPONENT) { 
 			return b;
 		}
@@ -176,10 +187,10 @@
 			/* TODO: fix SigNaN */
 			if (isFloat64SigNaN(a) || isFloat64SigNaN(b)) {
-			};
+			}
 			return a;
-		};
+		}
 		
 		/* a is infinity and b not */
-		if (a.parts.exp == FLOAT64_MAX_EXPONENT ) { 
+		if (a.parts.exp == FLOAT64_MAX_EXPONENT) { 
 			return a;
 		}
@@ -189,5 +200,5 @@
 		frac2 = b.parts.fraction;
 		exp2 = b.parts.exp;
-	};
+	}
 	
 	if (exp1 == 0) {
@@ -197,8 +208,8 @@
 			/* result is not denormalized */
 			a.parts.exp = 1;
-		};
+		}
 		a.parts.fraction = frac1;
 		return a;
-	};
+	}
 	
 	/* add hidden bit - frac1 is sure not denormalized */
@@ -212,5 +223,5 @@
 		/* is not denormalized */
 		frac2 |= FLOAT64_HIDDEN_BIT_MASK;
-	};
+	}
 	
 	/* create some space for rounding */
@@ -218,5 +229,5 @@
 	frac2 <<= 6;
 	
-	if (expdiff < (FLOAT64_FRACTION_SIZE + 2) ) {
+	if (expdiff < (FLOAT64_FRACTION_SIZE + 2)) {
 		frac2 >>= expdiff;
 		frac1 += frac2;
@@ -227,8 +238,8 @@
 	}
 	
-	if (frac1 & (FLOAT64_HIDDEN_BIT_MASK << 7) ) {
+	if (frac1 & (FLOAT64_HIDDEN_BIT_MASK << 7)) {
 		++exp1;
 		frac1 >>= 1;
-	};
+	}
 	
 	/* rounding - if first bit after fraction is set then round up */
@@ -239,20 +250,170 @@
 		++exp1;
 		frac1 >>= 1;
-	};
+	}
 	
 	if ((exp1 == FLOAT64_MAX_EXPONENT ) || (exp2 > exp1)) {
-			/* overflow - set infinity as result */
-			a.parts.exp = FLOAT64_MAX_EXPONENT;
-			a.parts.fraction = 0;
-			return a;
-			}
+		/* overflow - set infinity as result */
+		a.parts.exp = FLOAT64_MAX_EXPONENT;
+		a.parts.fraction = 0;
+		return a;
+	}
 	
 	a.parts.exp = exp1;
 	/* Clear hidden bit and shift */
-	a.parts.fraction = ( (frac1 >> 6 ) & (~FLOAT64_HIDDEN_BIT_MASK));
-	
+	a.parts.fraction = ((frac1 >> 6 ) & (~FLOAT64_HIDDEN_BIT_MASK));
 	return a;
 }
 
+/**
+ * Add two quadruple-precision floats with the same signs.
+ *
+ * @param a First input operand.
+ * @param b Second input operand.
+ * @return Result of addition.
+ */
+float128 addFloat128(float128 a, float128 b)
+{
+	int expdiff;
+	uint32_t exp1, exp2;
+	uint64_t frac1_hi, frac1_lo, frac2_hi, frac2_lo, tmp_hi, tmp_lo;
+
+	expdiff = ((int) a.parts.exp) - b.parts.exp;
+	if (expdiff < 0) {
+		if (isFloat128NaN(b)) {
+			/* TODO: fix SigNaN */
+			if (isFloat128SigNaN(b)) {
+			}
+
+			return b;
+		}
+
+		/* b is infinity and a not */
+		if (b.parts.exp == FLOAT128_MAX_EXPONENT) {
+			return b;
+		}
+
+		frac1_hi = b.parts.frac_hi;
+		frac1_lo = b.parts.frac_lo;
+		exp1 = b.parts.exp;
+		frac2_hi = a.parts.frac_hi;
+		frac2_lo = a.parts.frac_lo;
+		exp2 = a.parts.exp;
+		expdiff *= -1;
+	} else {
+		if (isFloat128NaN(a)) {
+			/* TODO: fix SigNaN */
+			if (isFloat128SigNaN(a) || isFloat128SigNaN(b)) {
+			}
+			return a;
+		}
+
+		/* a is infinity and b not */
+		if (a.parts.exp == FLOAT128_MAX_EXPONENT) {
+			return a;
+		}
+
+		frac1_hi = a.parts.frac_hi;
+		frac1_lo = a.parts.frac_lo;
+		exp1 = a.parts.exp;
+		frac2_hi = b.parts.frac_hi;
+		frac2_lo = b.parts.frac_lo;
+		exp2 = b.parts.exp;
+	}
+
+	if (exp1 == 0) {
+		/* both are denormalized */
+		add128(frac1_hi, frac1_lo, frac2_hi, frac2_lo, &frac1_hi, &frac1_lo);
+
+		and128(frac1_hi, frac1_lo,
+		    FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+		    &tmp_hi, &tmp_lo);
+		if (lt128(0x0ll, 0x0ll, tmp_hi, tmp_lo)) {
+			/* result is not denormalized */
+			a.parts.exp = 1;
+		}
+
+		a.parts.frac_hi = frac1_hi;
+		a.parts.frac_lo = frac1_lo;
+		return a;
+	}
+
+	/* add hidden bit - frac1 is sure not denormalized */
+	or128(frac1_hi, frac1_lo,
+	    FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+	    &frac1_hi, &frac1_lo);
+
+	/* second operand ... */
+	if (exp2 == 0) {
+		/* ... is denormalized */
+		--expdiff;
+	} else {
+		/* is not denormalized */
+		or128(frac2_hi, frac2_lo,
+		    FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+		    &frac2_hi, &frac2_lo);
+	}
+
+	/* create some space for rounding */
+	lshift128(frac1_hi, frac1_lo, 6, &frac1_hi, &frac1_lo);
+	lshift128(frac2_hi, frac2_lo, 6, &frac2_hi, &frac2_lo);
+
+	if (expdiff < (FLOAT128_FRACTION_SIZE + 2)) {
+		rshift128(frac2_hi, frac2_lo, expdiff, &frac2_hi, &frac2_lo);
+		add128(frac1_hi, frac1_lo, frac2_hi, frac2_lo, &frac1_hi, &frac1_lo);
+	} else {
+		a.parts.exp = exp1;
+
+		rshift128(frac1_hi, frac1_lo, 6, &frac1_hi, &frac1_lo);
+		not128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+		    &tmp_hi, &tmp_lo);
+		and128(frac1_hi, frac1_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+
+		a.parts.frac_hi = tmp_hi;
+		a.parts.frac_lo = tmp_lo;
+		return a;
+	}
+
+	lshift128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO, 7,
+	    &tmp_hi, &tmp_lo);
+	and128(frac1_hi, frac1_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+	if (lt128(0x0ll, 0x0ll, tmp_hi, tmp_lo)) {
+		++exp1;
+		rshift128(frac1_hi, frac1_lo, 1, &frac1_hi, &frac1_lo);
+	}
+
+	/* rounding - if first bit after fraction is set then round up */
+	add128(frac1_hi, frac1_lo, 0x0ll, 0x1ll << 5, &frac1_hi, &frac1_lo);
+
+	lshift128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO, 7,
+	   &tmp_hi, &tmp_lo);
+	and128(frac1_hi, frac1_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+	if (lt128(0x0ll, 0x0ll, tmp_hi, tmp_lo)) {
+		/* rounding overflow */
+		++exp1;
+		rshift128(frac1_hi, frac1_lo, 1, &frac1_hi, &frac1_lo);
+	}
+
+	if ((exp1 == FLOAT128_MAX_EXPONENT ) || (exp2 > exp1)) {
+		/* overflow - set infinity as result */
+		a.parts.exp = FLOAT64_MAX_EXPONENT;
+		a.parts.frac_hi = 0;
+		a.parts.frac_lo = 0;
+		return a;
+	}
+
+	a.parts.exp = exp1;
+	
+	/* Clear hidden bit and shift */
+	rshift128(frac1_hi, frac1_lo, 6, &frac1_hi, &frac1_lo);
+	not128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+	    &tmp_hi, &tmp_lo);
+	and128(frac1_hi, frac1_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+
+	a.parts.frac_hi = tmp_hi;
+	a.parts.frac_lo = tmp_lo;
+
+	return a;
+}
+
 /** @}
  */
Index: uspace/lib/softfloat/generic/common.c
===================================================================
--- uspace/lib/softfloat/generic/common.c	(revision 750636ac4a65360c44846b7681af6ae46854dd3a)
+++ uspace/lib/softfloat/generic/common.c	(revision bb285b42d51bf15d56c4d20ab034e93a463a8b7f)
@@ -1,4 +1,5 @@
 /*
  * Copyright (c) 2005 Josef Cejka
+ * Copyright (c) 2011 Petr Koupy
  * All rights reserved.
  *
@@ -30,5 +31,5 @@
  * @{
  */
-/** @file
+/** @file Common helper operations.
  */
 
@@ -36,5 +37,5 @@
 #include <common.h>
 
-/* Table for fast leading zeroes counting */
+/* Table for fast leading zeroes counting. */
 char zeroTable[256] = {
 	8, 7, 7, 6, 6, 6, 6, 4, 4, 4, 4, 4, 4, 4, 4, \
@@ -56,11 +57,12 @@
 };
 
-
-
-/** Take fraction shifted by 10 bits to left, round it, normalize it and detect exceptions
- * @param cexp exponent with bias
- * @param cfrac fraction shifted 10 places left with added hidden bit
- * @param sign
- * @return valied float64
+/** 
+ * Take fraction shifted by 10 bits to the left, round it, normalize it
+ * and detect exceptions
+ * 
+ * @param cexp Exponent with bias.
+ * @param cfrac Fraction shifted 10 bits to the left with added hidden bit.
+ * @param sign Resulting sign.
+ * @return Finished double-precision float.
  */
 float64 finishFloat64(int32_t cexp, uint64_t cfrac, char sign)
@@ -71,11 +73,13 @@
 
 	/* find first nonzero digit and shift result and detect possibly underflow */
-	while ((cexp > 0) && (cfrac) && (!(cfrac & (FLOAT64_HIDDEN_BIT_MASK << (64 - FLOAT64_FRACTION_SIZE - 1 ) )))) {
+	while ((cexp > 0) && (cfrac) &&
+	    (!(cfrac & (FLOAT64_HIDDEN_BIT_MASK << (64 - FLOAT64_FRACTION_SIZE - 1))))) {
 		cexp--; 
 		cfrac <<= 1;
-			/* TODO: fix underflow */
-	};
-	
-	if ((cexp < 0) || ( cexp == 0 && (!(cfrac & (FLOAT64_HIDDEN_BIT_MASK << (64 - FLOAT64_FRACTION_SIZE - 1)))))) {
+		/* TODO: fix underflow */
+	}
+	
+	if ((cexp < 0) || (cexp == 0 &&
+	    (!(cfrac & (FLOAT64_HIDDEN_BIT_MASK << (64 - FLOAT64_FRACTION_SIZE - 1)))))) {
 		/* FIXME: underflow */
 		result.parts.exp = 0;
@@ -93,6 +97,6 @@
 		
 		if (!(cfrac & (FLOAT64_HIDDEN_BIT_MASK << (64 - FLOAT64_FRACTION_SIZE - 1)))) {
-			
-			result.parts.fraction = ((cfrac >>(64 - FLOAT64_FRACTION_SIZE - 2) ) & (~FLOAT64_HIDDEN_BIT_MASK)); 
+			result.parts.fraction =
+			    ((cfrac >> (64 - FLOAT64_FRACTION_SIZE - 2)) & (~FLOAT64_HIDDEN_BIT_MASK));
 			return result;
 		}	
@@ -103,5 +107,5 @@
 	++cexp;
 
-	if (cfrac & (FLOAT64_HIDDEN_BIT_MASK << (64 - FLOAT64_FRACTION_SIZE - 1 ))) {
+	if (cfrac & (FLOAT64_HIDDEN_BIT_MASK << (64 - FLOAT64_FRACTION_SIZE - 1))) {
 		++cexp;
 		cfrac >>= 1;
@@ -109,5 +113,5 @@
 
 	/* check overflow */
-	if (cexp >= FLOAT64_MAX_EXPONENT ) {
+	if (cexp >= FLOAT64_MAX_EXPONENT) {
 		/* FIXME: overflow, return infinity */
 		result.parts.exp = FLOAT64_MAX_EXPONENT;
@@ -116,19 +120,156 @@
 	}
 
-	result.parts.exp = (uint32_t)cexp;
-	
-	result.parts.fraction = ((cfrac >>(64 - FLOAT64_FRACTION_SIZE - 2 ) ) & (~FLOAT64_HIDDEN_BIT_MASK)); 
+	result.parts.exp = (uint32_t) cexp;
+	
+	result.parts.fraction = 
+	    ((cfrac >> (64 - FLOAT64_FRACTION_SIZE - 2)) & (~FLOAT64_HIDDEN_BIT_MASK));
 	
 	return result;	
 }
 
-/** Counts leading zeroes in 64bit unsigned integer
- * @param i 
+/**
+ * Take fraction, round it, normalize it and detect exceptions
+ * 
+ * @param cexp Exponent with bias.
+ * @param cfrac_hi High part of the fraction shifted 14 bits to the left
+ *     with added hidden bit.
+ * @param cfrac_lo Low part of the fraction shifted 14 bits to the left
+ *     with added hidden bit.
+ * @param sign Resulting sign.
+ * @param shift_out Bits right-shifted out from fraction by the caller.
+ * @return Finished quadruple-precision float.
+ */
+float128 finishFloat128(int32_t cexp, uint64_t cfrac_hi, uint64_t cfrac_lo, 
+    char sign, uint64_t shift_out)
+{
+	float128 result;
+	uint64_t tmp_hi, tmp_lo;
+
+	result.parts.sign = sign;
+
+	/* find first nonzero digit and shift result and detect possibly underflow */
+	lshift128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+	    1, &tmp_hi, &tmp_lo);
+	and128(cfrac_hi, cfrac_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+	while ((cexp > 0) && (lt128(0x0ll, 0x0ll, cfrac_hi, cfrac_lo)) &&
+	    (!lt128(0x0ll, 0x0ll, tmp_hi, tmp_lo))) {
+		cexp--;
+		lshift128(cfrac_hi, cfrac_lo, 1, &cfrac_hi, &cfrac_lo);
+		/* TODO: fix underflow */
+
+		lshift128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+		    1, &tmp_hi, &tmp_lo);
+		and128(cfrac_hi, cfrac_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+	}
+
+	lshift128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+	    1, &tmp_hi, &tmp_lo);
+	and128(cfrac_hi, cfrac_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+	if ((cexp < 0) || (cexp == 0 &&
+	    (!lt128(0x0ll, 0x0ll, tmp_hi, tmp_lo)))) {
+		/* FIXME: underflow */
+		result.parts.exp = 0;
+		if ((cexp + FLOAT128_FRACTION_SIZE + 1) < 0) { /* +1 is place for rounding */
+			result.parts.frac_hi = 0x0ll;
+			result.parts.frac_lo = 0x0ll;
+			return result;
+		}
+
+		while (cexp < 0) {
+			cexp++;
+			rshift128(cfrac_hi, cfrac_lo, 1, &cfrac_hi, &cfrac_lo);
+		}
+
+		if (shift_out & (0x1ull < 64)) {
+			add128(cfrac_hi, cfrac_lo, 0x0ll, 0x1ll, &cfrac_hi, &cfrac_lo);
+		}
+
+		lshift128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+		    1, &tmp_hi, &tmp_lo);
+		and128(cfrac_hi, cfrac_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+		if (!lt128(0x0ll, 0x0ll, tmp_hi, tmp_lo)) {
+			not128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+			    &tmp_hi, &tmp_lo);
+			and128(cfrac_hi, cfrac_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+			result.parts.frac_hi = tmp_hi;
+			result.parts.frac_lo = tmp_lo;
+			return result;
+		}
+	} else {
+		if (shift_out & (0x1ull < 64)) {
+			add128(cfrac_hi, cfrac_lo, 0x0ll, 0x1ll, &cfrac_hi, &cfrac_lo);
+		}
+	}
+
+	++cexp;
+
+	lshift128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+	    1, &tmp_hi, &tmp_lo);
+	and128(cfrac_hi, cfrac_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+	if (lt128(0x0ll, 0x0ll, tmp_hi, tmp_lo)) {
+		++cexp;
+		rshift128(cfrac_hi, cfrac_lo, 1, &cfrac_hi, &cfrac_lo);
+	}
+
+	/* check overflow */
+	if (cexp >= FLOAT128_MAX_EXPONENT) {
+		/* FIXME: overflow, return infinity */
+		result.parts.exp = FLOAT128_MAX_EXPONENT;
+		result.parts.frac_hi = 0x0ll;
+		result.parts.frac_lo = 0x0ll;
+		return result;
+	}
+
+	result.parts.exp = (uint32_t) cexp;
+
+	not128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+	    &tmp_hi, &tmp_lo);
+	and128(cfrac_hi, cfrac_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+	result.parts.frac_hi = tmp_hi;
+	result.parts.frac_lo = tmp_lo;
+
+	return result;	
+}
+
+/**
+ * Counts leading zeroes in byte.
+ *
+ * @param i Byte for which to count leading zeroes.
+ * @return Number of detected leading zeroes.
+ */
+int countZeroes8(uint8_t i)
+{
+	return zeroTable[i];
+}
+
+/** 
+ * Counts leading zeroes in 32bit unsigned integer.
+ *
+ * @param i Integer for which to count leading zeroes.
+ * @return Number of detected leading zeroes.
+ */
+int countZeroes32(uint32_t i)
+{
+	int j;
+	for (j = 0; j < 32; j += 8) {
+		if (i & (0xFF << (24 - j))) {
+			return (j + countZeroes8(i >> (24 - j)));
+		}
+	}
+
+	return 32;
+}
+
+/**
+ * Counts leading zeroes in 64bit unsigned integer.
+ *
+ * @param i Integer for which to count leading zeroes.
+ * @return Number of detected leading zeroes.
  */
 int countZeroes64(uint64_t i)
 {
 	int j;
-	for (j =0; j < 64; j += 8) {
-		if ( i & (0xFFll << (56 - j))) {
+	for (j = 0; j < 64; j += 8) {
+		if (i & (0xFFll << (56 - j))) {
 			return (j + countZeroes8(i >> (56 - j)));
 		}
@@ -138,75 +279,410 @@
 }
 
-/** Counts leading zeroes in 32bit unsigned integer
- * @param i 
- */
-int countZeroes32(uint32_t i)
-{
-	int j;
-	for (j =0; j < 32; j += 8) {
-		if ( i & (0xFF << (24 - j))) {
-			return (j + countZeroes8(i >> (24 - j)));
-		}
-	}
-
-	return 32;
-}
-
-/** Counts leading zeroes in byte
- * @param i 
- */
-int countZeroes8(uint8_t i)
-{
-	return zeroTable[i];
-}
-
-/** Round and normalize number expressed by exponent and fraction with first bit (equal to hidden bit) at 30. bit
- * @param exp exponent 
- * @param fraction part with hidden bit shifted to 30. bit
+/**
+ * Round and normalize number expressed by exponent and fraction with
+ * first bit (equal to hidden bit) at 30th bit.
+ *
+ * @param exp Exponent part.
+ * @param fraction Fraction with hidden bit shifted to 30th bit.
  */
 void roundFloat32(int32_t *exp, uint32_t *fraction)
 {
 	/* rounding - if first bit after fraction is set then round up */
-	(*fraction) += (0x1 << 6);
-	
-	if ((*fraction) & (FLOAT32_HIDDEN_BIT_MASK << 8)) { 
+	(*fraction) += (0x1 << (32 - FLOAT32_FRACTION_SIZE - 3));
+	
+	if ((*fraction) & 
+	    (FLOAT32_HIDDEN_BIT_MASK << (32 - FLOAT32_FRACTION_SIZE - 1))) {
 		/* rounding overflow */
 		++(*exp);
 		(*fraction) >>= 1;
-	};
-	
-	if (((*exp) >= FLOAT32_MAX_EXPONENT ) || ((*exp) < 0)) {
+	}
+	
+	if (((*exp) >= FLOAT32_MAX_EXPONENT) || ((*exp) < 0)) {
 		/* overflow - set infinity as result */
 		(*exp) = FLOAT32_MAX_EXPONENT;
 		(*fraction) = 0;
-		return;
-	}
-
-	return;
-}
-
-/** Round and normalize number expressed by exponent and fraction with first bit (equal to hidden bit) at 62. bit
- * @param exp exponent 
- * @param fraction part with hidden bit shifted to 62. bit
+	}
+}
+
+/**
+ * Round and normalize number expressed by exponent and fraction with
+ * first bit (equal to hidden bit) at 62nd bit.
+ *
+ * @param exp Exponent part.
+ * @param fraction Fraction with hidden bit shifted to 62nd bit.
  */
 void roundFloat64(int32_t *exp, uint64_t *fraction)
 {
 	/* rounding - if first bit after fraction is set then round up */
-	(*fraction) += (0x1 << 9);
-	
-	if ((*fraction) & (FLOAT64_HIDDEN_BIT_MASK << 11)) { 
+	(*fraction) += (0x1 << (64 - FLOAT64_FRACTION_SIZE - 3));
+	
+	if ((*fraction) & 
+	    (FLOAT64_HIDDEN_BIT_MASK << (64 - FLOAT64_FRACTION_SIZE - 3))) {
 		/* rounding overflow */
 		++(*exp);
 		(*fraction) >>= 1;
-	};
-	
-	if (((*exp) >= FLOAT64_MAX_EXPONENT ) || ((*exp) < 0)) {
+	}
+	
+	if (((*exp) >= FLOAT64_MAX_EXPONENT) || ((*exp) < 0)) {
 		/* overflow - set infinity as result */
 		(*exp) = FLOAT64_MAX_EXPONENT;
 		(*fraction) = 0;
-		return;
-	}
-
-	return;
+	}
+}
+
+/**
+ * Round and normalize number expressed by exponent and fraction with
+ * first bit (equal to hidden bit) at 126th bit.
+ *
+ * @param exp Exponent part.
+ * @param frac_hi High part of fraction part with hidden bit shifted to 126th bit.
+ * @param frac_lo Low part of fraction part with hidden bit shifted to 126th bit.
+ */
+void roundFloat128(int32_t *exp, uint64_t *frac_hi, uint64_t *frac_lo)
+{
+	uint64_t tmp_hi, tmp_lo;
+
+	/* rounding - if first bit after fraction is set then round up */
+	lshift128(0x0ll, 0x1ll, (128 - FLOAT128_FRACTION_SIZE - 3), &tmp_hi, &tmp_lo);
+	add128(*frac_hi, *frac_lo, tmp_hi, tmp_lo, frac_hi, frac_lo);
+
+	lshift128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+	    (128 - FLOAT128_FRACTION_SIZE - 3), &tmp_hi, &tmp_lo);
+	and128(*frac_hi, *frac_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+	if (lt128(0x0ll, 0x0ll, tmp_hi, tmp_lo)) {
+		/* rounding overflow */
+		++(*exp);
+		rshift128(*frac_hi, *frac_lo, 1, frac_hi, frac_lo);
+	}
+
+	if (((*exp) >= FLOAT128_MAX_EXPONENT) || ((*exp) < 0)) {
+		/* overflow - set infinity as result */
+		(*exp) = FLOAT128_MAX_EXPONENT;
+		(*frac_hi) = 0;
+		(*frac_lo) = 0;
+	}
+}
+
+/**
+ * Logical shift left on the 128-bit operand.
+ *
+ * @param a_hi High part of the input operand.
+ * @param a_lo Low part of the input operand.
+ * @param shift Number of bits by witch to shift.
+ * @param r_hi Address to store high part of the result.
+ * @param r_lo Address to store low part of the result.
+ */
+void lshift128(
+    uint64_t a_hi, uint64_t a_lo, int shift,
+    uint64_t *r_hi, uint64_t *r_lo)
+{
+	if (shift <= 0) {
+		/* do nothing */
+	} else if (shift >= 128) {
+		a_hi = 0;
+		a_lo = 0;
+	} else if (shift >= 64) {
+		a_hi = a_lo << (shift - 64);
+		a_lo = 0;
+	} else {
+		a_hi <<= shift;
+		a_hi |= a_lo >> (64 - shift);
+		a_lo <<= shift;
+	}
+
+	*r_hi = a_hi;
+	*r_lo = a_lo;
+}
+
+/**
+ * Logical shift right on the 128-bit operand.
+ *
+ * @param a_hi High part of the input operand.
+ * @param a_lo Low part of the input operand.
+ * @param shift Number of bits by witch to shift.
+ * @param r_hi Address to store high part of the result.
+ * @param r_lo Address to store low part of the result.
+ */
+void rshift128(
+    uint64_t a_hi, uint64_t a_lo, int shift,
+    uint64_t *r_hi, uint64_t *r_lo)
+{
+	if (shift <= 0) {
+		/* do nothing */
+	} else 	if (shift >= 128) {
+		a_hi = 0;
+		a_lo = 0;
+	} else if (shift >= 64) {
+		a_lo = a_hi >> (shift - 64);
+		a_hi = 0;
+	} else {
+		a_lo >>= shift;
+		a_lo |= a_hi << (64 - shift);
+		a_hi >>= shift;
+	}
+
+	*r_hi = a_hi;
+	*r_lo = a_lo;
+}
+
+/**
+ * Bitwise AND on 128-bit operands.
+ *
+ * @param a_hi High part of the first input operand.
+ * @param a_lo Low part of the first input operand.
+ * @param b_hi High part of the second input operand.
+ * @param b_lo Low part of the second input operand.
+ * @param r_hi Address to store high part of the result.
+ * @param r_lo Address to store low part of the result.
+ */
+void and128(
+    uint64_t a_hi, uint64_t a_lo,
+    uint64_t b_hi, uint64_t b_lo,
+    uint64_t *r_hi, uint64_t *r_lo)
+{
+	*r_hi = a_hi & b_hi;
+	*r_lo = a_lo & b_lo;
+}
+
+/**
+ * Bitwise inclusive OR on 128-bit operands.
+ *
+ * @param a_hi High part of the first input operand.
+ * @param a_lo Low part of the first input operand.
+ * @param b_hi High part of the second input operand.
+ * @param b_lo Low part of the second input operand.
+ * @param r_hi Address to store high part of the result.
+ * @param r_lo Address to store low part of the result.
+ */
+void or128(
+    uint64_t a_hi, uint64_t a_lo,
+    uint64_t b_hi, uint64_t b_lo,
+    uint64_t *r_hi, uint64_t *r_lo)
+{
+	*r_hi = a_hi | b_hi;
+	*r_lo = a_lo | b_lo;
+}
+
+/**
+ * Bitwise exclusive OR on 128-bit operands.
+ *
+ * @param a_hi High part of the first input operand.
+ * @param a_lo Low part of the first input operand.
+ * @param b_hi High part of the second input operand.
+ * @param b_lo Low part of the second input operand.
+ * @param r_hi Address to store high part of the result.
+ * @param r_lo Address to store low part of the result.
+ */
+void xor128(
+    uint64_t a_hi, uint64_t a_lo,
+    uint64_t b_hi, uint64_t b_lo,
+    uint64_t *r_hi, uint64_t *r_lo)
+{
+	*r_hi = a_hi ^ b_hi;
+	*r_lo = a_lo ^ b_lo;
+}
+
+/**
+ * Bitwise NOT on the 128-bit operand.
+ *
+ * @param a_hi High part of the input operand.
+ * @param a_lo Low part of the input operand.
+ * @param r_hi Address to store high part of the result.
+ * @param r_lo Address to store low part of the result.
+ */
+void not128(
+    uint64_t a_hi, uint64_t a_lo,
+	uint64_t *r_hi, uint64_t *r_lo)
+{
+	*r_hi = ~a_hi;
+	*r_lo = ~a_lo;
+}
+
+/**
+ * Equality comparison of 128-bit operands.
+ *
+ * @param a_hi High part of the first input operand.
+ * @param a_lo Low part of the first input operand.
+ * @param b_hi High part of the second input operand.
+ * @param b_lo Low part of the second input operand.
+ * @return 1 if operands are equal, 0 otherwise.
+ */
+int eq128(uint64_t a_hi, uint64_t a_lo, uint64_t b_hi, uint64_t b_lo)
+{
+	return (a_hi == b_hi) && (a_lo == b_lo);
+}
+
+/**
+ * Lower-or-equal comparison of 128-bit operands.
+ *
+ * @param a_hi High part of the first input operand.
+ * @param a_lo Low part of the first input operand.
+ * @param b_hi High part of the second input operand.
+ * @param b_lo Low part of the second input operand.
+ * @return 1 if a is lower or equal to b, 0 otherwise.
+ */
+int le128(uint64_t a_hi, uint64_t a_lo, uint64_t b_hi, uint64_t b_lo)
+{
+	return (a_hi < b_hi) || ((a_hi == b_hi) && (a_lo <= b_lo));
+}
+
+/**
+ * Lower-than comparison of 128-bit operands.
+ *
+ * @param a_hi High part of the first input operand.
+ * @param a_lo Low part of the first input operand.
+ * @param b_hi High part of the second input operand.
+ * @param b_lo Low part of the second input operand.
+ * @return 1 if a is lower than b, 0 otherwise.
+ */
+int lt128(uint64_t a_hi, uint64_t a_lo, uint64_t b_hi, uint64_t b_lo)
+{
+	return (a_hi < b_hi) || ((a_hi == b_hi) && (a_lo < b_lo));
+}
+
+/**
+ * Addition of two 128-bit unsigned integers.
+ *
+ * @param a_hi High part of the first input operand.
+ * @param a_lo Low part of the first input operand.
+ * @param b_hi High part of the second input operand.
+ * @param b_lo Low part of the second input operand.
+ * @param r_hi Address to store high part of the result.
+ * @param r_lo Address to store low part of the result.
+ */
+void add128(uint64_t a_hi, uint64_t a_lo,
+    uint64_t b_hi, uint64_t b_lo,
+    uint64_t *r_hi, uint64_t *r_lo)
+{
+	uint64_t low = a_lo + b_lo;
+	*r_lo = low;
+	/* detect overflow to add a carry */
+	*r_hi = a_hi + b_hi + (low < a_lo);
+}
+
+/**
+ * Substraction of two 128-bit unsigned integers.
+ *
+ * @param a_hi High part of the first input operand.
+ * @param a_lo Low part of the first input operand.
+ * @param b_hi High part of the second input operand.
+ * @param b_lo Low part of the second input operand.
+ * @param r_hi Address to store high part of the result.
+ * @param r_lo Address to store low part of the result.
+ */
+void sub128(uint64_t a_hi, uint64_t a_lo,
+    uint64_t b_hi, uint64_t b_lo,
+    uint64_t *r_hi, uint64_t *r_lo)
+{
+	*r_lo = a_lo - b_lo;
+	/* detect underflow to substract a carry */
+	*r_hi = a_hi - b_hi - (a_lo < b_lo);
+}
+
+/**
+ * Multiplication of two 64-bit unsigned integers.
+ * 
+ * @param a First input operand.
+ * @param b Second input operand.
+ * @param r_hi Address to store high part of the result.
+ * @param r_lo Address to store low part of the result.
+ */
+void mul64(uint64_t a, uint64_t b, uint64_t *r_hi, uint64_t *r_lo)
+{
+	uint64_t low, high, middle1, middle2;
+	uint32_t alow, blow;
+
+	alow = a & 0xFFFFFFFF;
+	blow = b & 0xFFFFFFFF;
+
+	a >>= 32;
+	b >>= 32;
+
+	low = ((uint64_t) alow) * blow;
+	middle1 = a * blow;
+	middle2 = alow * b;
+	high = a * b;
+
+	middle1 += middle2;
+	high += (((uint64_t) (middle1 < middle2)) << 32) + (middle1 >> 32);
+	middle1 <<= 32;
+	low += middle1;
+	high += (low < middle1);
+	*r_lo = low;
+	*r_hi = high;
+}
+
+/**
+ * Multiplication of two 128-bit unsigned integers.
+ * 
+ * @param a_hi High part of the first input operand.
+ * @param a_lo Low part of the first input operand.
+ * @param b_hi High part of the second input operand.
+ * @param b_lo Low part of the second input operand.
+ * @param r_hihi Address to store first (highest) quarter of the result.
+ * @param r_hilo Address to store second quarter of the result.
+ * @param r_lohi Address to store third quarter of the result.
+ * @param r_lolo Address to store fourth (lowest) quarter of the result.
+ */
+void mul128(uint64_t a_hi, uint64_t a_lo, uint64_t b_hi, uint64_t b_lo,
+    uint64_t *r_hihi, uint64_t *r_hilo, uint64_t *r_lohi, uint64_t *r_lolo)
+{
+	uint64_t hihi, hilo, lohi, lolo;
+	uint64_t tmp1, tmp2;
+
+	mul64(a_lo, b_lo, &lohi, &lolo);
+	mul64(a_lo, b_hi, &hilo, &tmp2);
+	add128(hilo, tmp2, 0x0ll, lohi, &hilo, &lohi);
+	mul64(a_hi, b_hi, &hihi, &tmp1);
+	add128(hihi, tmp1, 0x0ll, hilo, &hihi, &hilo);
+	mul64(a_hi, b_lo, &tmp1, &tmp2);
+	add128(tmp1, tmp2, 0x0ll, lohi, &tmp1, &lohi);
+	add128(hihi, hilo, 0x0ll, tmp1, &hihi, &hilo);
+
+	*r_hihi = hihi;
+	*r_hilo = hilo;
+	*r_lohi = lohi;
+	*r_lolo = lolo;
+}
+
+/**
+ * Estimate the quotient of 128-bit unsigned divident and 64-bit unsigned
+ * divisor.
+ * 
+ * @param a_hi High part of the divident.
+ * @param a_lo Low part of the divident.
+ * @param b Divisor.
+ * @return Quotient approximation.
+ */
+uint64_t div128est(uint64_t a_hi, uint64_t a_lo, uint64_t b)
+{
+	uint64_t b_hi, b_lo;
+	uint64_t rem_hi, rem_lo;
+	uint64_t tmp_hi, tmp_lo;
+	uint64_t result;
+
+	if (b <= a_hi) {
+		return 0xFFFFFFFFFFFFFFFFull;
+	}
+
+	b_hi = b >> 32;
+	result = ((b_hi << 32) <= a_hi) ? (0xFFFFFFFFull << 32) : (a_hi / b_hi) << 32;
+	mul64(b, result, &tmp_hi, &tmp_lo);
+	sub128(a_hi, a_lo, tmp_hi, tmp_lo, &rem_hi, &rem_lo);
+	
+	while ((int64_t) rem_hi < 0) {
+		result -= 0x1ll << 32;
+		b_lo = b << 32;
+		add128(rem_hi, rem_lo, b_hi, b_lo, &rem_hi, &rem_lo);
+	}
+
+	rem_hi = (rem_hi << 32) | (rem_lo >> 32);
+	if ((b_hi << 32) <= rem_hi) {
+		result |= 0xFFFFFFFF;
+	} else {
+		result |= rem_hi / b_hi;
+	}
+
+	return result;
 }
 
Index: uspace/lib/softfloat/generic/comparison.c
===================================================================
--- uspace/lib/softfloat/generic/comparison.c	(revision 750636ac4a65360c44846b7681af6ae46854dd3a)
+++ uspace/lib/softfloat/generic/comparison.c	(revision bb285b42d51bf15d56c4d20ab034e93a463a8b7f)
@@ -1,4 +1,5 @@
 /*
  * Copyright (c) 2005 Josef Cejka
+ * Copyright (c) 2011 Petr Koupy
  * All rights reserved.
  *
@@ -30,44 +31,138 @@
  * @{
  */
-/** @file
+/** @file Comparison functions.
  */
 
 #include <sftypes.h>
 #include <comparison.h>
-
-/* NaN : exp = 0xff and nonzero fraction */
+#include <common.h>
+
+/**
+ * Determines whether the given float represents NaN (either signalling NaN or
+ * quiet NaN).
+ *
+ * @param f Single-precision float.
+ * @return 1 if float is NaN, 0 otherwise.
+ */
 int isFloat32NaN(float32 f)
 {
+	/* NaN : exp = 0xff and nonzero fraction */
 	return ((f.parts.exp == 0xFF) && (f.parts.fraction));
 }
 
-/* NaN : exp = 0x7ff and nonzero fraction */
+/**
+ * Determines whether the given float represents NaN (either signalling NaN or
+ * quiet NaN).
+ *
+ * @param d Double-precision float.
+ * @return 1 if float is NaN, 0 otherwise.
+ */
 int isFloat64NaN(float64 d)
 {
+	/* NaN : exp = 0x7ff and nonzero fraction */
 	return ((d.parts.exp == 0x7FF) && (d.parts.fraction));
 }
 
-/* SigNaN : exp = 0xff fraction = 0xxxxx..x (binary), where at least one x is nonzero */
+/**
+ * Determines whether the given float represents NaN (either signalling NaN or
+ * quiet NaN).
+ *
+ * @param ld Quadruple-precision float.
+ * @return 1 if float is NaN, 0 otherwise.
+ */
+int isFloat128NaN(float128 ld)
+{
+	/* NaN : exp = 0x7fff and nonzero fraction */
+	return ((ld.parts.exp == 0x7FF) &&
+	    !eq128(ld.parts.frac_hi, ld.parts.frac_lo, 0x0ll, 0x0ll));
+}
+
+/**
+ * Determines whether the given float represents signalling NaN.
+ *
+ * @param f Single-precision float.
+ * @return 1 if float is signalling NaN, 0 otherwise.
+ */
 int isFloat32SigNaN(float32 f)
 {
-	return ((f.parts.exp == 0xFF) && (f.parts.fraction < 0x400000) && (f.parts.fraction));
-}
-
-/* SigNaN : exp = 0x7ff fraction = 0xxxxx..x (binary), where at least one x is nonzero */
+	/* SigNaN : exp = 0xff and fraction = 0xxxxx..x (binary),
+	 * where at least one x is nonzero */
+	return ((f.parts.exp == 0xFF) &&
+	    (f.parts.fraction < 0x400000) && (f.parts.fraction));
+}
+
+/**
+ * Determines whether the given float represents signalling NaN.
+ *
+ * @param d Double-precision float.
+ * @return 1 if float is signalling NaN, 0 otherwise.
+ */
 int isFloat64SigNaN(float64 d)
 {
-	return ((d.parts.exp == 0x7FF) && (d.parts.fraction) && (d.parts.fraction < 0x8000000000000ll));
-}
-
+	/* SigNaN : exp = 0x7ff and fraction = 0xxxxx..x (binary),
+	 * where at least one x is nonzero */
+	return ((d.parts.exp == 0x7FF) &&
+	    (d.parts.fraction) && (d.parts.fraction < 0x8000000000000ll));
+}
+
+/**
+ * Determines whether the given float represents signalling NaN.
+ *
+ * @param ld Quadruple-precision float.
+ * @return 1 if float is signalling NaN, 0 otherwise.
+ */
+int isFloat128SigNaN(float128 ld)
+{
+	/* SigNaN : exp = 0x7fff and fraction = 0xxxxx..x (binary),
+	 * where at least one x is nonzero */
+	return ((ld.parts.exp == 0x7FFF) &&
+	    (ld.parts.frac_hi || ld.parts.frac_lo) &&
+	    lt128(ld.parts.frac_hi, ld.parts.frac_lo, 0x800000000000ll, 0x0ll));
+
+}
+
+/**
+ * Determines whether the given float represents positive or negative infinity.
+ *
+ * @param f Single-precision float.
+ * @return 1 if float is infinite, 0 otherwise.
+ */
 int isFloat32Infinity(float32 f)
 {
+	/* NaN : exp = 0x7ff and zero fraction */
 	return ((f.parts.exp == 0xFF) && (f.parts.fraction == 0x0));
 }
 
+/**
+ * Determines whether the given float represents positive or negative infinity.
+ *
+ * @param d Double-precision float.
+ * @return 1 if float is infinite, 0 otherwise.
+ */
 int isFloat64Infinity(float64 d) 
 {
+	/* NaN : exp = 0x7ff and zero fraction */
 	return ((d.parts.exp == 0x7FF) && (d.parts.fraction == 0x0));
 }
 
+/**
+ * Determines whether the given float represents positive or negative infinity.
+ *
+ * @param ld Quadruple-precision float.
+ * @return 1 if float is infinite, 0 otherwise.
+ */
+int isFloat128Infinity(float128 ld)
+{
+	/* NaN : exp = 0x7fff and zero fraction */
+	return ((ld.parts.exp == 0x7FFF) &&
+	    eq128(ld.parts.frac_hi, ld.parts.frac_lo, 0x0ll, 0x0ll));
+}
+
+/**
+ * Determines whether the given float represents positive or negative zero.
+ *
+ * @param f Single-precision float.
+ * @return 1 if float is zero, 0 otherwise.
+ */
 int isFloat32Zero(float32 f)
 {
@@ -75,4 +170,10 @@
 }
 
+/**
+ * Determines whether the given float represents positive or negative zero.
+ *
+ * @param d Double-precision float.
+ * @return 1 if float is zero, 0 otherwise.
+ */
 int isFloat64Zero(float64 d)
 {
@@ -81,25 +182,93 @@
 
 /**
- * @return 1 if both floats are equal - but NaNs are not recognized
+ * Determines whether the given float represents positive or negative zero.
+ *
+ * @param ld Quadruple-precision float.
+ * @return 1 if float is zero, 0 otherwise.
+ */
+int isFloat128Zero(float128 ld)
+{
+	uint64_t tmp_hi;
+	uint64_t tmp_lo;
+
+	and128(ld.binary.hi, ld.binary.lo,
+	    0x7FFFFFFFFFFFFFFFll, 0xFFFFFFFFFFFFFFFFll, &tmp_hi, &tmp_lo);
+
+	return eq128(tmp_hi, tmp_lo, 0x0ll, 0x0ll);
+}
+
+/**
+ * Determine whether two floats are equal. NaNs are not recognized.
+ *
+ * @a First single-precision operand.
+ * @b Second single-precision operand.
+ * @return 1 if both floats are equal, 0 otherwise.
  */
 int isFloat32eq(float32 a, float32 b)
 {
 	/* a equals to b or both are zeros (with any sign) */
-	return ((a.binary==b.binary) || (((a.binary | b.binary) & 0x7FFFFFFF) == 0));
-}
-
-/**
- * @return 1 if a < b - but NaNs are not recognized 
+	return ((a.binary == b.binary) ||
+	    (((a.binary | b.binary) & 0x7FFFFFFF) == 0));
+}
+
+/**
+ * Determine whether two floats are equal. NaNs are not recognized.
+ *
+ * @a First double-precision operand.
+ * @b Second double-precision operand.
+ * @return 1 if both floats are equal, 0 otherwise.
+ */
+int isFloat64eq(float64 a, float64 b)
+{
+	/* a equals to b or both are zeros (with any sign) */
+	return ((a.binary == b.binary) ||
+	    (((a.binary | b.binary) & 0x7FFFFFFFFFFFFFFFll) == 0));
+}
+
+/**
+ * Determine whether two floats are equal. NaNs are not recognized.
+ *
+ * @a First quadruple-precision operand.
+ * @b Second quadruple-precision operand.
+ * @return 1 if both floats are equal, 0 otherwise.
+ */
+int isFloat128eq(float128 a, float128 b)
+{
+	uint64_t tmp_hi;
+	uint64_t tmp_lo;
+
+	/* both are zeros (with any sign) */
+	or128(a.binary.hi, a.binary.lo,
+	    b.binary.hi, b.binary.lo, &tmp_hi, &tmp_lo);
+	and128(tmp_hi, tmp_lo,
+	    0x7FFFFFFFFFFFFFFFll, 0xFFFFFFFFFFFFFFFFll, &tmp_hi, &tmp_lo);
+	int both_zero = eq128(tmp_hi, tmp_lo, 0x0ll, 0x0ll);
+	
+	/* a equals to b */
+	int are_equal = eq128(a.binary.hi, a.binary.lo, b.binary.hi, b.binary.lo);
+
+	return are_equal || both_zero;
+}
+
+/**
+ * Lower-than comparison between two floats. NaNs are not recognized.
+ *
+ * @a First single-precision operand.
+ * @b Second single-precision operand.
+ * @return 1 if a is lower than b, 0 otherwise.
  */
 int isFloat32lt(float32 a, float32 b) 
 {
-	if (((a.binary | b.binary) & 0x7FFFFFFF) == 0)
+	if (((a.binary | b.binary) & 0x7FFFFFFF) == 0) {
 		return 0; /* +- zeroes */
+	}
 	
-	if ((a.parts.sign) && (b.parts.sign))
+	if ((a.parts.sign) && (b.parts.sign)) {
 		/* if both are negative, smaller is that with greater binary value */
 		return (a.binary > b.binary);
+	}
 	
-	/* lets negate signs - now will be positive numbers allways bigger than negative (first bit will be set for unsigned integer comparison) */
+	/* lets negate signs - now will be positive numbers allways bigger than
+	 * negative (first bit will be set for unsigned integer comparison) */
 	a.parts.sign = !a.parts.sign;
 	b.parts.sign = !b.parts.sign;
@@ -108,16 +277,80 @@
 
 /**
- * @return 1 if a > b - but NaNs are not recognized 
+ * Lower-than comparison between two floats. NaNs are not recognized.
+ *
+ * @a First double-precision operand.
+ * @b Second double-precision operand.
+ * @return 1 if a is lower than b, 0 otherwise.
+ */
+int isFloat64lt(float64 a, float64 b)
+{
+	if (((a.binary | b.binary) & 0x7FFFFFFFFFFFFFFFll) == 0) {
+		return 0; /* +- zeroes */
+	}
+
+	if ((a.parts.sign) && (b.parts.sign)) {
+		/* if both are negative, smaller is that with greater binary value */
+		return (a.binary > b.binary);
+	}
+
+	/* lets negate signs - now will be positive numbers allways bigger than
+	 * negative (first bit will be set for unsigned integer comparison) */
+	a.parts.sign = !a.parts.sign;
+	b.parts.sign = !b.parts.sign;
+	return (a.binary < b.binary);
+}
+
+/**
+ * Lower-than comparison between two floats. NaNs are not recognized.
+ *
+ * @a First quadruple-precision operand.
+ * @b Second quadruple-precision operand.
+ * @return 1 if a is lower than b, 0 otherwise.
+ */
+int isFloat128lt(float128 a, float128 b)
+{
+	uint64_t tmp_hi;
+	uint64_t tmp_lo;
+
+	or128(a.binary.hi, a.binary.lo,
+	    b.binary.hi, b.binary.lo, &tmp_hi, &tmp_lo);
+	and128(tmp_hi, tmp_lo,
+	    0x7FFFFFFFFFFFFFFFll, 0xFFFFFFFFFFFFFFFFll, &tmp_hi, &tmp_lo);
+	if (eq128(tmp_hi, tmp_lo, 0x0ll, 0x0ll)) {
+		return 0; /* +- zeroes */
+	}
+
+	if ((a.parts.sign) && (b.parts.sign)) {
+		/* if both are negative, smaller is that with greater binary value */
+		return lt128(b.binary.hi, b.binary.lo, a.binary.hi, a.binary.lo);
+	}
+
+	/* lets negate signs - now will be positive numbers allways bigger than
+	 * negative (first bit will be set for unsigned integer comparison) */
+	a.parts.sign = !a.parts.sign;
+	b.parts.sign = !b.parts.sign;
+	return lt128(a.binary.hi, a.binary.lo, b.binary.hi, b.binary.lo);
+}
+
+/**
+ * Greater-than comparison between two floats. NaNs are not recognized.
+ *
+ * @a First single-precision operand.
+ * @b Second single-precision operand.
+ * @return 1 if a is greater than b, 0 otherwise.
  */
 int isFloat32gt(float32 a, float32 b) 
 {
-	if (((a.binary | b.binary) & 0x7FFFFFFF) == 0)
+	if (((a.binary | b.binary) & 0x7FFFFFFF) == 0) {
 		return 0; /* zeroes are equal with any sign */
+	}
 	
-	if ((a.parts.sign) && (b.parts.sign))
+	if ((a.parts.sign) && (b.parts.sign)) {
 		/* if both are negative, greater is that with smaller binary value */
 		return (a.binary < b.binary);
+	}
 	
-	/* lets negate signs - now will be positive numbers allways bigger than negative (first bit will be set for unsigned integer comparison) */
+	/* lets negate signs - now will be positive numbers allways bigger than
+	 *  negative (first bit will be set for unsigned integer comparison) */
 	a.parts.sign = !a.parts.sign;
 	b.parts.sign = !b.parts.sign;
@@ -125,4 +358,61 @@
 }
 
+/**
+ * Greater-than comparison between two floats. NaNs are not recognized.
+ *
+ * @a First double-precision operand.
+ * @b Second double-precision operand.
+ * @return 1 if a is greater than b, 0 otherwise.
+ */
+int isFloat64gt(float64 a, float64 b)
+{
+	if (((a.binary | b.binary) & 0x7FFFFFFFFFFFFFFFll) == 0) {
+		return 0; /* zeroes are equal with any sign */
+	}
+
+	if ((a.parts.sign) && (b.parts.sign)) {
+		/* if both are negative, greater is that with smaller binary value */
+		return (a.binary < b.binary);
+	}
+
+	/* lets negate signs - now will be positive numbers allways bigger than
+	 *  negative (first bit will be set for unsigned integer comparison) */
+	a.parts.sign = !a.parts.sign;
+	b.parts.sign = !b.parts.sign;
+	return (a.binary > b.binary);
+}
+
+/**
+ * Greater-than comparison between two floats. NaNs are not recognized.
+ *
+ * @a First quadruple-precision operand.
+ * @b Second quadruple-precision operand.
+ * @return 1 if a is greater than b, 0 otherwise.
+ */
+int isFloat128gt(float128 a, float128 b)
+{
+	uint64_t tmp_hi;
+	uint64_t tmp_lo;
+
+	or128(a.binary.hi, a.binary.lo,
+	    b.binary.hi, b.binary.lo, &tmp_hi, &tmp_lo);
+	and128(tmp_hi, tmp_lo,
+	    0x7FFFFFFFFFFFFFFFll, 0xFFFFFFFFFFFFFFFFll, &tmp_hi, &tmp_lo);
+	if (eq128(tmp_hi, tmp_lo, 0x0ll, 0x0ll)) {
+		return 0; /* zeroes are equal with any sign */
+	}
+
+	if ((a.parts.sign) && (b.parts.sign)) {
+		/* if both are negative, greater is that with smaller binary value */
+		return lt128(a.binary.hi, a.binary.lo, b.binary.hi, b.binary.lo);
+	}
+
+	/* lets negate signs - now will be positive numbers allways bigger than
+	 *  negative (first bit will be set for unsigned integer comparison) */
+	a.parts.sign = !a.parts.sign;
+	b.parts.sign = !b.parts.sign;
+	return lt128(b.binary.hi, b.binary.lo, a.binary.hi, a.binary.lo);
+}
+
 /** @}
  */
Index: uspace/lib/softfloat/generic/conversion.c
===================================================================
--- uspace/lib/softfloat/generic/conversion.c	(revision 750636ac4a65360c44846b7681af6ae46854dd3a)
+++ uspace/lib/softfloat/generic/conversion.c	(revision bb285b42d51bf15d56c4d20ab034e93a463a8b7f)
@@ -1,4 +1,5 @@
 /*
  * Copyright (c) 2005 Josef Cejka
+ * Copyright (c) 2011 Petr Koupy
  * All rights reserved.
  *
@@ -30,11 +31,11 @@
  * @{
  */
-/** @file
- */
-
-#include "sftypes.h"
-#include "conversion.h"
-#include "comparison.h"
-#include "common.h"
+/** @file Conversion of precision and conversion between integers and floats.
+ */
+
+#include <sftypes.h>
+#include <conversion.h>
+#include <comparison.h>
+#include <common.h>
 
 float64 convertFloat32ToFloat64(float32 a) 
@@ -48,8 +49,8 @@
 	
 	if ((isFloat32Infinity(a)) || (isFloat32NaN(a))) {
-		result.parts.exp = 0x7FF;
+		result.parts.exp = FLOAT64_MAX_EXPONENT;
 		/* TODO; check if its correct for SigNaNs*/
 		return result;
-	};
+	}
 	
 	result.parts.exp = a.parts.exp + ((int) FLOAT64_BIAS - FLOAT32_BIAS);
@@ -57,6 +58,6 @@
 		/* normalize denormalized numbers */
 
-		if (result.parts.fraction == 0ll) { /* fix zero */
-			result.parts.exp = 0ll;
+		if (result.parts.fraction == 0) { /* fix zero */
+			result.parts.exp = 0;
 			return result;
 		}
@@ -64,15 +65,114 @@
 		frac = result.parts.fraction;
 		
-		while (!(frac & (0x10000000000000ll))) {
+		while (!(frac & FLOAT64_HIDDEN_BIT_MASK)) {
 			frac <<= 1;
 			--result.parts.exp;
-		};
+		}
 		
 		++result.parts.exp;
 		result.parts.fraction = frac;
-	};
-	
-	return result;
-	
+	}
+	
+	return result;
+}
+
+float128 convertFloat32ToFloat128(float32 a)
+{
+	float128 result;
+	uint64_t frac_hi, frac_lo;
+	uint64_t tmp_hi, tmp_lo;
+
+	result.parts.sign = a.parts.sign;
+	result.parts.frac_hi = 0;
+	result.parts.frac_lo = a.parts.fraction;
+	lshift128(result.parts.frac_hi, result.parts.frac_lo,
+	    (FLOAT128_FRACTION_SIZE - FLOAT32_FRACTION_SIZE),
+	    &frac_hi, &frac_lo);
+	result.parts.frac_hi = frac_hi;
+	result.parts.frac_lo = frac_lo;
+
+	if ((isFloat32Infinity(a)) || (isFloat32NaN(a))) {
+		result.parts.exp = FLOAT128_MAX_EXPONENT;
+		/* TODO; check if its correct for SigNaNs*/
+		return result;
+	}
+
+	result.parts.exp = a.parts.exp + ((int) FLOAT128_BIAS - FLOAT32_BIAS);
+	if (a.parts.exp == 0) {
+		/* normalize denormalized numbers */
+
+		if (eq128(result.parts.frac_hi,
+		    result.parts.frac_lo, 0x0ll, 0x0ll)) { /* fix zero */
+			result.parts.exp = 0;
+			return result;
+		}
+
+		frac_hi = result.parts.frac_hi;
+		frac_lo = result.parts.frac_lo;
+
+		and128(frac_hi, frac_lo,
+		    FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+		    &tmp_hi, &tmp_lo);
+		while (!lt128(0x0ll, 0x0ll, tmp_hi, tmp_lo)) {
+			lshift128(frac_hi, frac_lo, 1, &frac_hi, &frac_lo);
+			--result.parts.exp;
+		}
+
+		++result.parts.exp;
+		result.parts.frac_hi = frac_hi;
+		result.parts.frac_lo = frac_lo;
+	}
+
+	return result;
+}
+
+float128 convertFloat64ToFloat128(float64 a)
+{
+	float128 result;
+	uint64_t frac_hi, frac_lo;
+	uint64_t tmp_hi, tmp_lo;
+
+	result.parts.sign = a.parts.sign;
+	result.parts.frac_hi = 0;
+	result.parts.frac_lo = a.parts.fraction;
+	lshift128(result.parts.frac_hi, result.parts.frac_lo,
+	    (FLOAT128_FRACTION_SIZE - FLOAT64_FRACTION_SIZE),
+	    &frac_hi, &frac_lo);
+	result.parts.frac_hi = frac_hi;
+	result.parts.frac_lo = frac_lo;
+
+	if ((isFloat64Infinity(a)) || (isFloat64NaN(a))) {
+		result.parts.exp = FLOAT128_MAX_EXPONENT;
+		/* TODO; check if its correct for SigNaNs*/
+		return result;
+	}
+
+	result.parts.exp = a.parts.exp + ((int) FLOAT128_BIAS - FLOAT64_BIAS);
+	if (a.parts.exp == 0) {
+		/* normalize denormalized numbers */
+
+		if (eq128(result.parts.frac_hi,
+		    result.parts.frac_lo, 0x0ll, 0x0ll)) { /* fix zero */
+			result.parts.exp = 0;
+			return result;
+		}
+
+		frac_hi = result.parts.frac_hi;
+		frac_lo = result.parts.frac_lo;
+
+		and128(frac_hi, frac_lo,
+		    FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+		    &tmp_hi, &tmp_lo);
+		while (!lt128(0x0ll, 0x0ll, tmp_hi, tmp_lo)) {
+			lshift128(frac_hi, frac_lo, 1, &frac_hi, &frac_lo);
+			--result.parts.exp;
+		}
+
+		++result.parts.exp;
+		result.parts.frac_hi = frac_hi;
+		result.parts.frac_lo = frac_lo;
+	}
+
+	return result;
 }
 
@@ -86,32 +186,31 @@
 	
 	if (isFloat64NaN(a)) {
-		
-		result.parts.exp = 0xFF;
+		result.parts.exp = FLOAT32_MAX_EXPONENT;
 		
 		if (isFloat64SigNaN(a)) {
-			result.parts.fraction = 0x400000; /* set first bit of fraction nonzero */
+			/* set first bit of fraction nonzero */
+			result.parts.fraction = FLOAT32_HIDDEN_BIT_MASK >> 1;
 			return result;
 		}
-	
-		result.parts.fraction = 0x1; /* fraction nonzero but its first bit is zero */
-		return result;
-	};
+
+		/* fraction nonzero but its first bit is zero */
+		result.parts.fraction = 0x1;
+		return result;
+	}
 
 	if (isFloat64Infinity(a)) {
 		result.parts.fraction = 0;
-		result.parts.exp = 0xFF;
-		return result;
-	};
-
-	exp = (int)a.parts.exp - FLOAT64_BIAS + FLOAT32_BIAS;
-	
-	if (exp >= 0xFF) {
-		/*FIXME: overflow*/
+		result.parts.exp = FLOAT32_MAX_EXPONENT;
+		return result;
+	}
+
+	exp = (int) a.parts.exp - FLOAT64_BIAS + FLOAT32_BIAS;
+	
+	if (exp >= FLOAT32_MAX_EXPONENT) {
+		/* FIXME: overflow */
 		result.parts.fraction = 0;
-		result.parts.exp = 0xFF;
-		return result;
-		
-	} else if (exp <= 0 ) {
-		
+		result.parts.exp = FLOAT32_MAX_EXPONENT;
+		return result;
+	} else if (exp <= 0) {
 		/* underflow or denormalized */
 		
@@ -119,14 +218,14 @@
 		
 		exp *= -1;	
-		if (exp > FLOAT32_FRACTION_SIZE ) {
+		if (exp > FLOAT32_FRACTION_SIZE) {
 			/* FIXME: underflow */
 			result.parts.fraction = 0;
 			return result;
-		};
+		}
 		
 		/* denormalized */
 		
 		frac = a.parts.fraction; 
-		frac |= 0x10000000000000ll; /* denormalize and set hidden bit */
+		frac |= FLOAT64_HIDDEN_BIT_MASK; /* denormalize and set hidden bit */
 		
 		frac >>= (FLOAT64_FRACTION_SIZE - FLOAT32_FRACTION_SIZE + 1);
@@ -135,19 +234,179 @@
 			--exp;
 			frac >>= 1;
-		};
+		}
 		result.parts.fraction = frac;
 		
 		return result;
-	};
+	}
 
 	result.parts.exp = exp;
-	result.parts.fraction = a.parts.fraction >> (FLOAT64_FRACTION_SIZE - FLOAT32_FRACTION_SIZE);
-	return result;
-}
-
-
-/** Helping procedure for converting float32 to uint32
- * @param a floating point number in normalized form (no NaNs or Inf are checked )
- * @return unsigned integer
+	result.parts.fraction =
+	    a.parts.fraction >> (FLOAT64_FRACTION_SIZE - FLOAT32_FRACTION_SIZE);
+	return result;
+}
+
+float32 convertFloat128ToFloat32(float128 a)
+{
+	float32 result;
+	int32_t exp;
+	uint64_t frac_hi, frac_lo;
+
+	result.parts.sign = a.parts.sign;
+
+	if (isFloat128NaN(a)) {
+		result.parts.exp = FLOAT32_MAX_EXPONENT;
+
+		if (isFloat128SigNaN(a)) {
+			/* set first bit of fraction nonzero */
+			result.parts.fraction = FLOAT32_HIDDEN_BIT_MASK >> 1;
+			return result;
+		}
+
+		/* fraction nonzero but its first bit is zero */
+		result.parts.fraction = 0x1;
+		return result;
+	}
+
+	if (isFloat128Infinity(a)) {
+		result.parts.fraction = 0;
+		result.parts.exp = FLOAT32_MAX_EXPONENT;
+		return result;
+	}
+
+	exp = (int) a.parts.exp - FLOAT128_BIAS + FLOAT32_BIAS;
+
+	if (exp >= FLOAT32_MAX_EXPONENT) {
+		/* FIXME: overflow */
+		result.parts.fraction = 0;
+		result.parts.exp = FLOAT32_MAX_EXPONENT;
+		return result;
+	} else if (exp <= 0) {
+		/* underflow or denormalized */
+
+		result.parts.exp = 0;
+
+		exp *= -1;
+		if (exp > FLOAT32_FRACTION_SIZE) {
+			/* FIXME: underflow */
+			result.parts.fraction = 0;
+			return result;
+		}
+
+		/* denormalized */
+
+		frac_hi = a.parts.frac_hi;
+		frac_lo = a.parts.frac_lo;
+
+		/* denormalize and set hidden bit */
+		frac_hi |= FLOAT128_HIDDEN_BIT_MASK_HI;
+
+		rshift128(frac_hi, frac_lo,
+		    (FLOAT128_FRACTION_SIZE - FLOAT32_FRACTION_SIZE + 1),
+		    &frac_hi, &frac_lo);
+
+		while (exp > 0) {
+			--exp;
+			rshift128(frac_hi, frac_lo, 1, &frac_hi, &frac_lo);
+		}
+		result.parts.fraction = frac_lo;
+
+		return result;
+	}
+
+	result.parts.exp = exp;
+	frac_hi = a.parts.frac_hi;
+	frac_lo = a.parts.frac_lo;
+	rshift128(frac_hi, frac_lo,
+	    (FLOAT128_FRACTION_SIZE - FLOAT32_FRACTION_SIZE + 1),
+	    &frac_hi, &frac_lo);
+	result.parts.fraction = frac_lo;
+	return result;
+}
+
+float64 convertFloat128ToFloat64(float128 a)
+{
+	float64 result;
+	int32_t exp;
+	uint64_t frac_hi, frac_lo;
+
+	result.parts.sign = a.parts.sign;
+
+	if (isFloat128NaN(a)) {
+		result.parts.exp = FLOAT64_MAX_EXPONENT;
+
+		if (isFloat128SigNaN(a)) {
+			/* set first bit of fraction nonzero */
+			result.parts.fraction = FLOAT64_HIDDEN_BIT_MASK >> 1;
+			return result;
+		}
+
+		/* fraction nonzero but its first bit is zero */
+		result.parts.fraction = 0x1;
+		return result;
+	}
+
+	if (isFloat128Infinity(a)) {
+		result.parts.fraction = 0;
+		result.parts.exp = FLOAT64_MAX_EXPONENT;
+		return result;
+	}
+
+	exp = (int) a.parts.exp - FLOAT128_BIAS + FLOAT64_BIAS;
+
+	if (exp >= FLOAT64_MAX_EXPONENT) {
+		/* FIXME: overflow */
+		result.parts.fraction = 0;
+		result.parts.exp = FLOAT64_MAX_EXPONENT;
+		return result;
+	} else if (exp <= 0) {
+		/* underflow or denormalized */
+
+		result.parts.exp = 0;
+
+		exp *= -1;
+		if (exp > FLOAT64_FRACTION_SIZE) {
+			/* FIXME: underflow */
+			result.parts.fraction = 0;
+			return result;
+		}
+
+		/* denormalized */
+
+		frac_hi = a.parts.frac_hi;
+		frac_lo = a.parts.frac_lo;
+
+		/* denormalize and set hidden bit */
+		frac_hi |= FLOAT128_HIDDEN_BIT_MASK_HI;
+
+		rshift128(frac_hi, frac_lo,
+		    (FLOAT128_FRACTION_SIZE - FLOAT64_FRACTION_SIZE + 1),
+		    &frac_hi, &frac_lo);
+
+		while (exp > 0) {
+			--exp;
+			rshift128(frac_hi, frac_lo, 1, &frac_hi, &frac_lo);
+		}
+		result.parts.fraction = frac_lo;
+
+		return result;
+	}
+
+	result.parts.exp = exp;
+	frac_hi = a.parts.frac_hi;
+	frac_lo = a.parts.frac_lo;
+	rshift128(frac_hi, frac_lo,
+	    (FLOAT128_FRACTION_SIZE - FLOAT64_FRACTION_SIZE + 1),
+	    &frac_hi, &frac_lo);
+	result.parts.fraction = frac_lo;
+	return result;
+}
+
+
+/** 
+ * Helping procedure for converting float32 to uint32.
+ *
+ * @param a Floating point number in normalized form
+ *     (NaNs or Inf are not checked).
+ * @return Converted unsigned integer.
  */
 static uint32_t _float32_to_uint32_helper(float32 a)
@@ -156,5 +415,5 @@
 	
 	if (a.parts.exp < FLOAT32_BIAS) {
-		/*TODO: rounding*/
+		/* TODO: rounding */
 		return 0;
 	}
@@ -175,5 +434,5 @@
 }
 
-/* Convert float to unsigned int32
+/* 
  * FIXME: Im not sure what to return if overflow/underflow happens 
  * 	- now its the biggest or the smallest int
@@ -194,5 +453,5 @@
 }
 
-/* Convert float to signed int32
+/* 
  * FIXME: Im not sure what to return if overflow/underflow happens 
  * 	- now its the biggest or the smallest int
@@ -214,22 +473,96 @@
 
 
-/** Helping procedure for converting float64 to uint64
- * @param a floating point number in normalized form (no NaNs or Inf are checked )
- * @return unsigned integer
+/**
+ * Helping procedure for converting float32 to uint64.
+ *
+ * @param a Floating point number in normalized form
+ *     (NaNs or Inf are not checked).
+ * @return Converted unsigned integer.
+ */
+static uint64_t _float32_to_uint64_helper(float32 a)
+{
+	uint64_t frac;
+
+	if (a.parts.exp < FLOAT32_BIAS) {
+		/*TODO: rounding*/
+		return 0;
+	}
+
+	frac = a.parts.fraction;
+
+	frac |= FLOAT32_HIDDEN_BIT_MASK;
+	/* shift fraction to left so hidden bit will be the most significant bit */
+	frac <<= 64 - FLOAT32_FRACTION_SIZE - 1;
+
+	frac >>= 64 - (a.parts.exp - FLOAT32_BIAS) - 1;
+	if ((a.parts.sign == 1) && (frac != 0)) {
+		frac = ~frac;
+		++frac;
+	}
+
+	return frac;
+}
+
+/* 
+ * FIXME: Im not sure what to return if overflow/underflow happens
+ * 	- now its the biggest or the smallest int
+ */
+uint64_t float32_to_uint64(float32 a)
+{
+	if (isFloat32NaN(a))
+		return UINT64_MAX;
+
+
+	if (isFloat32Infinity(a) || (a.parts.exp >= (64 + FLOAT32_BIAS))) {
+		if (a.parts.sign)
+			return UINT64_MIN;
+
+		return UINT64_MAX;
+	}
+
+	return _float32_to_uint64_helper(a);
+}
+
+/* 
+ * FIXME: Im not sure what to return if overflow/underflow happens
+ * 	- now its the biggest or the smallest int
+ */
+int64_t float32_to_int64(float32 a)
+{
+	if (isFloat32NaN(a))
+		return INT64_MAX;
+
+	if (isFloat32Infinity(a) || (a.parts.exp >= (64 + FLOAT32_BIAS))) {
+		if (a.parts.sign)
+			return INT64_MIN;
+
+		return INT64_MAX;
+	}
+
+	return _float32_to_uint64_helper(a);
+}
+
+
+/**
+ * Helping procedure for converting float64 to uint64.
+ *
+ * @param a Floating point number in normalized form
+ *     (NaNs or Inf are not checked).
+ * @return Converted unsigned integer.
  */
 static uint64_t _float64_to_uint64_helper(float64 a)
 {
 	uint64_t frac;
-	
+
 	if (a.parts.exp < FLOAT64_BIAS) {
 		/*TODO: rounding*/
 		return 0;
 	}
-	
+
 	frac = a.parts.fraction;
-	
+
 	frac |= FLOAT64_HIDDEN_BIT_MASK;
 	/* shift fraction to left so hidden bit will be the most significant bit */
-	frac <<= 64 - FLOAT64_FRACTION_SIZE - 1; 
+	frac <<= 64 - FLOAT64_FRACTION_SIZE - 1;
 
 	frac >>= 64 - (a.parts.exp - FLOAT64_BIAS) - 1;
@@ -238,9 +571,48 @@
 		++frac;
 	}
-	
+
 	return frac;
 }
 
-/* Convert float to unsigned int64
+/*
+ * FIXME: Im not sure what to return if overflow/underflow happens
+ * 	- now its the biggest or the smallest int
+ */
+uint32_t float64_to_uint32(float64 a)
+{
+	if (isFloat64NaN(a))
+		return UINT32_MAX;
+
+	if (isFloat64Infinity(a) || (a.parts.exp >= (32 + FLOAT64_BIAS))) {
+		if (a.parts.sign)
+			return UINT32_MIN;
+
+		return UINT32_MAX;
+	}
+
+	return (uint32_t) _float64_to_uint64_helper(a);
+}
+
+/*
+ * FIXME: Im not sure what to return if overflow/underflow happens
+ * 	- now its the biggest or the smallest int
+ */
+int32_t float64_to_int32(float64 a)
+{
+	if (isFloat64NaN(a))
+		return INT32_MAX;
+
+	if (isFloat64Infinity(a) || (a.parts.exp >= (32 + FLOAT64_BIAS))) {
+		if (a.parts.sign)
+			return INT32_MIN;
+
+		return INT32_MAX;
+	}
+
+	return (int32_t) _float64_to_uint64_helper(a);
+}
+
+
+/* 
  * FIXME: Im not sure what to return if overflow/underflow happens 
  * 	- now its the biggest or the smallest int
@@ -251,5 +623,4 @@
 		return UINT64_MAX;
 	
-	
 	if (isFloat64Infinity(a) || (a.parts.exp >= (64 + FLOAT64_BIAS))) {
 		if (a.parts.sign)
@@ -262,5 +633,5 @@
 }
 
-/* Convert float to signed int64
+/* 
  * FIXME: Im not sure what to return if overflow/underflow happens 
  * 	- now its the biggest or the smallest int
@@ -271,5 +642,4 @@
 		return INT64_MAX;
 	
-	
 	if (isFloat64Infinity(a) || (a.parts.exp >= (64 + FLOAT64_BIAS))) {
 		if (a.parts.sign)
@@ -283,119 +653,116 @@
 
 
-
-
-
-/** Helping procedure for converting float32 to uint64
- * @param a floating point number in normalized form (no NaNs or Inf are checked )
- * @return unsigned integer
- */
-static uint64_t _float32_to_uint64_helper(float32 a)
-{
-	uint64_t frac;
-	
-	if (a.parts.exp < FLOAT32_BIAS) {
+/**
+ * Helping procedure for converting float128 to uint64.
+ *
+ * @param a Floating point number in normalized form
+ *     (NaNs or Inf are not checked).
+ * @return Converted unsigned integer.
+ */
+static uint64_t _float128_to_uint64_helper(float128 a)
+{
+	uint64_t frac_hi, frac_lo;
+
+	if (a.parts.exp < FLOAT128_BIAS) {
 		/*TODO: rounding*/
 		return 0;
 	}
-	
-	frac = a.parts.fraction;
-	
-	frac |= FLOAT32_HIDDEN_BIT_MASK;
+
+	frac_hi = a.parts.frac_hi;
+	frac_lo = a.parts.frac_lo;
+
+	frac_hi |= FLOAT128_HIDDEN_BIT_MASK_HI;
 	/* shift fraction to left so hidden bit will be the most significant bit */
-	frac <<= 64 - FLOAT32_FRACTION_SIZE - 1; 
-
-	frac >>= 64 - (a.parts.exp - FLOAT32_BIAS) - 1;
-	if ((a.parts.sign == 1) && (frac != 0)) {
-		frac = ~frac;
-		++frac;
-	}
-	
-	return frac;
-}
-
-/* Convert float to unsigned int64
- * FIXME: Im not sure what to return if overflow/underflow happens 
- * 	- now its the biggest or the smallest int
- */ 
-uint64_t float32_to_uint64(float32 a)
-{
-	if (isFloat32NaN(a))
+	lshift128(frac_hi, frac_lo,
+	    (128 - FLOAT128_FRACTION_SIZE - 1), &frac_hi, &frac_lo);
+
+	rshift128(frac_hi, frac_lo,
+	    (128 - (a.parts.exp - FLOAT128_BIAS) - 1), &frac_hi, &frac_lo);
+	if ((a.parts.sign == 1) && !eq128(frac_hi, frac_lo, 0x0ll, 0x0ll)) {
+		not128(frac_hi, frac_lo, &frac_hi, &frac_lo);
+		add128(frac_hi, frac_lo, 0x0ll, 0x1ll, &frac_hi, &frac_lo);
+	}
+
+	return frac_lo;
+}
+
+/*
+ * FIXME: Im not sure what to return if overflow/underflow happens
+ * 	- now its the biggest or the smallest int
+ */
+uint32_t float128_to_uint32(float128 a)
+{
+	if (isFloat128NaN(a))
+		return UINT32_MAX;
+
+	if (isFloat128Infinity(a) || (a.parts.exp >= (32 + FLOAT128_BIAS))) {
+		if (a.parts.sign)
+			return UINT32_MIN;
+
+		return UINT32_MAX;
+	}
+
+	return (uint32_t) _float128_to_uint64_helper(a);
+}
+
+/*
+ * FIXME: Im not sure what to return if overflow/underflow happens
+ * 	- now its the biggest or the smallest int
+ */
+int32_t float128_to_int32(float128 a)
+{
+	if (isFloat128NaN(a))
+		return INT32_MAX;
+
+	if (isFloat128Infinity(a) || (a.parts.exp >= (32 + FLOAT128_BIAS))) {
+		if (a.parts.sign)
+			return INT32_MIN;
+
+		return INT32_MAX;
+	}
+
+	return (int32_t) _float128_to_uint64_helper(a);
+}
+
+
+/*
+ * FIXME: Im not sure what to return if overflow/underflow happens
+ * 	- now its the biggest or the smallest int
+ */
+uint64_t float128_to_uint64(float128 a)
+{
+	if (isFloat128NaN(a))
 		return UINT64_MAX;
-	
-	
-	if (isFloat32Infinity(a) || (a.parts.exp >= (64 + FLOAT32_BIAS))) {
+
+	if (isFloat128Infinity(a) || (a.parts.exp >= (64 + FLOAT128_BIAS))) {
 		if (a.parts.sign)
 			return UINT64_MIN;
-		
+
 		return UINT64_MAX;
 	}
-	
-	return _float32_to_uint64_helper(a);
-}
-
-/* Convert float to signed int64
- * FIXME: Im not sure what to return if overflow/underflow happens 
- * 	- now its the biggest or the smallest int
- */ 
-int64_t float32_to_int64(float32 a)
-{
-	if (isFloat32NaN(a))
+
+	return _float128_to_uint64_helper(a);
+}
+
+/*
+ * FIXME: Im not sure what to return if overflow/underflow happens
+ * 	- now its the biggest or the smallest int
+ */
+int64_t float128_to_int64(float128 a)
+{
+	if (isFloat128NaN(a))
 		return INT64_MAX;
-	
-	if (isFloat32Infinity(a) || (a.parts.exp >= (64 + FLOAT32_BIAS))) {
+
+	if (isFloat128Infinity(a) || (a.parts.exp >= (64 + FLOAT128_BIAS))) {
 		if (a.parts.sign)
 			return INT64_MIN;
-		
+
 		return INT64_MAX;
 	}
-	
-	return _float32_to_uint64_helper(a);
-}
-
-
-/* Convert float64 to unsigned int32
- * FIXME: Im not sure what to return if overflow/underflow happens 
- * 	- now its the biggest or the smallest int
- */ 
-uint32_t float64_to_uint32(float64 a)
-{
-	if (isFloat64NaN(a))
-		return UINT32_MAX;
-	
-	
-	if (isFloat64Infinity(a) || (a.parts.exp >= (32 + FLOAT64_BIAS))) {
-		if (a.parts.sign)
-			return UINT32_MIN;
-		
-		return UINT32_MAX;
-	}
-	
-	return (uint32_t) _float64_to_uint64_helper(a);
-}
-
-/* Convert float64 to signed int32
- * FIXME: Im not sure what to return if overflow/underflow happens 
- * 	- now its the biggest or the smallest int
- */ 
-int32_t float64_to_int32(float64 a)
-{
-	if (isFloat64NaN(a))
-		return INT32_MAX;
-	
-	
-	if (isFloat64Infinity(a) || (a.parts.exp >= (32 + FLOAT64_BIAS))) {
-		if (a.parts.sign)
-			return INT32_MIN;
-		
-		return INT32_MAX;
-	}
-	
-	return (int32_t) _float64_to_uint64_helper(a);
-}
-
-/** Convert unsigned integer to float32
- *
- *
- */
+
+	return _float128_to_uint64_helper(a);
+}
+
+
 float32 uint32_to_float32(uint32_t i)
 {
@@ -424,5 +791,5 @@
 	roundFloat32(&exp, &i);
 
-	result.parts.fraction = i >> 7;
+	result.parts.fraction = i >> (32 - FLOAT32_FRACTION_SIZE - 2);
 	result.parts.exp = exp;
 
@@ -435,7 +802,7 @@
 
 	if (i < 0) {
-		result = uint32_to_float32((uint32_t)(-i));
+		result = uint32_to_float32((uint32_t) (-i));
 	} else {
-		result = uint32_to_float32((uint32_t)i);
+		result = uint32_to_float32((uint32_t) i);
 	}
 	
@@ -465,5 +832,5 @@
 	}
 	
-	/* Shift all to the first 31 bits (31. will be hidden 1)*/
+	/* Shift all to the first 31 bits (31st will be hidden 1) */
 	if (counter > 33) {
 		i <<= counter - 1 - 32;
@@ -472,8 +839,8 @@
 	}
 	
-	j = (uint32_t)i;
+	j = (uint32_t) i;
 	roundFloat32(&exp, &j);
 
-	result.parts.fraction = j >> 7;
+	result.parts.fraction = j >> (32 - FLOAT32_FRACTION_SIZE - 2);
 	result.parts.exp = exp;
 	return result;
@@ -485,7 +852,7 @@
 
 	if (i < 0) {
-		result = uint64_to_float32((uint64_t)(-i));
+		result = uint64_to_float32((uint64_t) (-i));
 	} else {
-		result = uint64_to_float32((uint64_t)i);
+		result = uint64_to_float32((uint64_t) i);
 	}
 	
@@ -495,8 +862,4 @@
 }
 
-/** Convert unsigned integer to float64
- *
- *
- */
 float64 uint32_to_float64(uint32_t i)
 {
@@ -523,5 +886,5 @@
 	roundFloat64(&exp, &frac);
 
-	result.parts.fraction = frac >> 10;
+	result.parts.fraction = frac >> (64 - FLOAT64_FRACTION_SIZE - 2);
 	result.parts.exp = exp;
 
@@ -534,7 +897,7 @@
 
 	if (i < 0) {
-		result = uint32_to_float64((uint32_t)(-i));
+		result = uint32_to_float64((uint32_t) (-i));
 	} else {
-		result = uint32_to_float64((uint32_t)i);
+		result = uint32_to_float64((uint32_t) i);
 	}
 	
@@ -571,5 +934,5 @@
 	roundFloat64(&exp, &i);
 
-	result.parts.fraction = i >> 10;
+	result.parts.fraction = i >> (64 - FLOAT64_FRACTION_SIZE - 2);
 	result.parts.exp = exp;
 	return result;
@@ -581,7 +944,7 @@
 
 	if (i < 0) {
-		result = uint64_to_float64((uint64_t)(-i));
+		result = uint64_to_float64((uint64_t) (-i));
 	} else {
-		result = uint64_to_float64((uint64_t)i);
+		result = uint64_to_float64((uint64_t) i);
 	}
 	
@@ -591,4 +954,108 @@
 }
 
+
+float128 uint32_to_float128(uint32_t i)
+{
+	int counter;
+	int32_t exp;
+	float128 result;
+	uint64_t frac_hi, frac_lo;
+
+	result.parts.sign = 0;
+	result.parts.frac_hi = 0;
+	result.parts.frac_lo = 0;
+
+	counter = countZeroes32(i);
+
+	exp = FLOAT128_BIAS + 32 - counter - 1;
+
+	if (counter == 32) {
+		result.binary.hi = 0;
+		result.binary.lo = 0;
+		return result;
+	}
+
+	frac_hi = 0;
+	frac_lo = i;
+	lshift128(frac_hi, frac_lo, (counter + 96 - 1), &frac_hi, &frac_lo);
+
+	roundFloat128(&exp, &frac_hi, &frac_lo);
+
+	rshift128(frac_hi, frac_lo,
+	    (128 - FLOAT128_FRACTION_SIZE - 2), &frac_hi, &frac_lo);
+	result.parts.frac_hi = frac_hi;
+	result.parts.frac_lo = frac_lo;
+	result.parts.exp = exp;
+
+	return result;
+}
+
+float128 int32_to_float128(int32_t i)
+{
+	float128 result;
+
+	if (i < 0) {
+		result = uint32_to_float128((uint32_t) (-i));
+	} else {
+		result = uint32_to_float128((uint32_t) i);
+	}
+
+	result.parts.sign = i < 0;
+
+ 	return result;
+}
+
+
+float128 uint64_to_float128(uint64_t i)
+{
+	int counter;
+	int32_t exp;
+	float128 result;
+	uint64_t frac_hi, frac_lo;
+
+	result.parts.sign = 0;
+	result.parts.frac_hi = 0;
+	result.parts.frac_lo = 0;
+
+	counter = countZeroes64(i);
+
+	exp = FLOAT128_BIAS + 64 - counter - 1;
+
+	if (counter == 64) {
+		result.binary.hi = 0;
+		result.binary.lo = 0;
+		return result;
+	}
+
+	frac_hi = 0;
+	frac_lo = i;
+	lshift128(frac_hi, frac_lo, (counter + 64 - 1), &frac_hi, &frac_lo);
+
+	roundFloat128(&exp, &frac_hi, &frac_lo);
+
+	rshift128(frac_hi, frac_lo,
+	    (128 - FLOAT128_FRACTION_SIZE - 2), &frac_hi, &frac_lo);
+	result.parts.frac_hi = frac_hi;
+	result.parts.frac_lo = frac_lo;
+	result.parts.exp = exp;
+
+	return result;
+}
+
+float128 int64_to_float128(int64_t i)
+{
+	float128 result;
+
+	if (i < 0) {
+		result = uint64_to_float128((uint64_t) (-i));
+	} else {
+		result = uint64_to_float128((uint64_t) i);
+	}
+
+	result.parts.sign = i < 0;
+
+ 	return result;
+}
+
 /** @}
  */
Index: uspace/lib/softfloat/generic/div.c
===================================================================
--- uspace/lib/softfloat/generic/div.c	(revision 750636ac4a65360c44846b7681af6ae46854dd3a)
+++ uspace/lib/softfloat/generic/div.c	(revision bb285b42d51bf15d56c4d20ab034e93a463a8b7f)
@@ -1,4 +1,5 @@
 /*
  * Copyright (c) 2005 Josef Cejka
+ * Copyright (c) 2011 Petr Koupy
  * All rights reserved.
  *
@@ -30,5 +31,5 @@
  * @{
  */
-/** @file
+/** @file Division functions.
  */
 
@@ -40,4 +41,11 @@
 #include <common.h>
 
+/**
+ * Divide two single-precision floats.
+ * 
+ * @param a Nominator.
+ * @param b Denominator.
+ * @return Result of division.
+ */
 float32 divFloat32(float32 a, float32 b) 
 {
@@ -100,5 +108,4 @@
 		return result;
 	}
-
 	
 	afrac = a.parts.fraction;
@@ -110,13 +117,13 @@
 	if (aexp == 0) {
 		if (afrac == 0) {
-		result.parts.exp = 0;
-		result.parts.fraction = 0;
-		return result;
-		}
+			result.parts.exp = 0;
+			result.parts.fraction = 0;
+			return result;
+		}
+
 		/* normalize it*/
-		
 		afrac <<= 1;
-			/* afrac is nonzero => it must stop */	
-		while (! (afrac & FLOAT32_HIDDEN_BIT_MASK) ) {
+		/* afrac is nonzero => it must stop */	
+		while (!(afrac & FLOAT32_HIDDEN_BIT_MASK)) {
 			afrac <<= 1;
 			aexp--;
@@ -126,6 +133,6 @@
 	if (bexp == 0) {
 		bfrac <<= 1;
-			/* bfrac is nonzero => it must stop */	
-		while (! (bfrac & FLOAT32_HIDDEN_BIT_MASK) ) {
+		/* bfrac is nonzero => it must stop */	
+		while (!(bfrac & FLOAT32_HIDDEN_BIT_MASK)) {
 			bfrac <<= 1;
 			bexp--;
@@ -133,8 +140,8 @@
 	}
 
-	afrac =	(afrac | FLOAT32_HIDDEN_BIT_MASK ) << (32 - FLOAT32_FRACTION_SIZE - 1 );
-	bfrac =	(bfrac | FLOAT32_HIDDEN_BIT_MASK ) << (32 - FLOAT32_FRACTION_SIZE );
-
-	if ( bfrac <= (afrac << 1) ) {
+	afrac =	(afrac | FLOAT32_HIDDEN_BIT_MASK) << (32 - FLOAT32_FRACTION_SIZE - 1);
+	bfrac =	(bfrac | FLOAT32_HIDDEN_BIT_MASK) << (32 - FLOAT32_FRACTION_SIZE);
+
+	if (bfrac <= (afrac << 1)) {
 		afrac >>= 1;
 		aexp++;
@@ -144,6 +151,6 @@
 	
 	cfrac = (afrac << 32) / bfrac;
-	if ((  cfrac & 0x3F ) == 0) { 
-		cfrac |= ( bfrac * cfrac != afrac << 32 );
+	if ((cfrac & 0x3F) == 0) { 
+		cfrac |= (bfrac * cfrac != afrac << 32);
 	}
 	
@@ -151,9 +158,9 @@
 	
 	/* find first nonzero digit and shift result and detect possibly underflow */
-	while ((cexp > 0) && (cfrac) && (!(cfrac & (FLOAT32_HIDDEN_BIT_MASK << 7 )))) {
+	while ((cexp > 0) && (cfrac) && (!(cfrac & (FLOAT32_HIDDEN_BIT_MASK << 7)))) {
 		cexp--;
 		cfrac <<= 1;
-			/* TODO: fix underflow */
-	};
+		/* TODO: fix underflow */
+	}
 	
 	cfrac += (0x1 << 6); /* FIXME: 7 is not sure*/
@@ -162,8 +169,8 @@
 		++cexp;
 		cfrac >>= 1;
-		}	
+	}	
 
 	/* check overflow */
-	if (cexp >= FLOAT32_MAX_EXPONENT ) {
+	if (cexp >= FLOAT32_MAX_EXPONENT) {
 		/* FIXME: overflow, return infinity */
 		result.parts.exp = FLOAT32_MAX_EXPONENT;
@@ -181,10 +188,9 @@
 		cfrac >>= 1;
 		while (cexp < 0) {
-			cexp ++;
+			cexp++;
 			cfrac >>= 1;
-		}
-		
+		}	
 	} else {
-		result.parts.exp = (uint32_t)cexp;
+		result.parts.exp = (uint32_t) cexp;
 	}
 	
@@ -194,4 +200,11 @@
 }
 
+/**
+ * Divide two double-precision floats.
+ *
+ * @param a Nominator.
+ * @param b Denominator.
+ * @return Result of division.
+ */
 float64 divFloat64(float64 a, float64 b) 
 {
@@ -200,9 +213,9 @@
 	uint64_t afrac, bfrac, cfrac; 
 	uint64_t remlo, remhi;
+	uint64_t tmplo, tmphi;
 	
 	result.parts.sign = a.parts.sign ^ b.parts.sign;
 	
 	if (isFloat64NaN(a)) {
-		
 		if (isFloat64SigNaN(b)) {
 			/*FIXME: SigNaN*/
@@ -262,5 +275,4 @@
 	}
 
-	
 	afrac = a.parts.fraction;
 	aexp = a.parts.exp;
@@ -275,9 +287,9 @@
 			return result;
 		}
+
 		/* normalize it*/
-		
 		aexp++;
-			/* afrac is nonzero => it must stop */	
-		while (! (afrac & FLOAT64_HIDDEN_BIT_MASK) ) {
+		/* afrac is nonzero => it must stop */	
+		while (!(afrac & FLOAT64_HIDDEN_BIT_MASK)) {
 			afrac <<= 1;
 			aexp--;
@@ -287,6 +299,6 @@
 	if (bexp == 0) {
 		bexp++;
-			/* bfrac is nonzero => it must stop */	
-		while (! (bfrac & FLOAT64_HIDDEN_BIT_MASK) ) {
+		/* bfrac is nonzero => it must stop */	
+		while (!(bfrac & FLOAT64_HIDDEN_BIT_MASK)) {
 			bfrac <<= 1;
 			bexp--;
@@ -294,8 +306,8 @@
 	}
 
-	afrac =	(afrac | FLOAT64_HIDDEN_BIT_MASK ) << (64 - FLOAT64_FRACTION_SIZE - 2 );
-	bfrac =	(bfrac | FLOAT64_HIDDEN_BIT_MASK ) << (64 - FLOAT64_FRACTION_SIZE - 1);
-
-	if ( bfrac <= (afrac << 1) ) {
+	afrac =	(afrac | FLOAT64_HIDDEN_BIT_MASK) << (64 - FLOAT64_FRACTION_SIZE - 2);
+	bfrac =	(bfrac | FLOAT64_HIDDEN_BIT_MASK) << (64 - FLOAT64_FRACTION_SIZE - 1);
+
+	if (bfrac <= (afrac << 1)) {
 		afrac >>= 1;
 		aexp++;
@@ -304,18 +316,15 @@
 	cexp = aexp - bexp + FLOAT64_BIAS - 2; 
 	
-	cfrac = divFloat64estim(afrac, bfrac);
-	
-	if ((  cfrac & 0x1FF ) <= 2) { /*FIXME:?? */
-		mul64integers( bfrac, cfrac, &remlo, &remhi);
-		/* (__u128)afrac << 64 - ( ((__u128)remhi<<64) + (__u128)remlo )*/	
-		remhi = afrac - remhi - ( remlo > 0);
-		remlo = - remlo;
+	cfrac = div128est(afrac, 0x0ll, bfrac);
+	
+	if ((cfrac & 0x1FF) <= 2) {
+		mul64(bfrac, cfrac, &tmphi, &tmplo);
+		sub128(afrac, 0x0ll, tmphi, tmplo, &remhi, &remlo);
 		
 		while ((int64_t) remhi < 0) {
 			cfrac--;
-			remlo += bfrac;
-			remhi += ( remlo < bfrac );
-		}
-		cfrac |= ( remlo != 0 );
+			add128(remhi, remlo, 0x0ll, bfrac, &remhi, &remlo);
+		}
+		cfrac |= (remlo != 0);
 	}
 	
@@ -323,38 +332,205 @@
 	result = finishFloat64(cexp, cfrac, result.parts.sign);
 	return result;
-
 }
 
-uint64_t divFloat64estim(uint64_t a, uint64_t b)
+/**
+ * Divide two quadruple-precision floats.
+ *
+ * @param a Nominator.
+ * @param b Denominator.
+ * @return Result of division.
+ */
+float128 divFloat128(float128 a, float128 b)
 {
-	uint64_t bhi;
-	uint64_t remhi, remlo;
-	uint64_t result;
-	
-	if ( b <= a ) {
-		return 0xFFFFFFFFFFFFFFFFull;
-	}
-	
-	bhi = b >> 32;
-	result = ((bhi << 32) <= a) ?( 0xFFFFFFFFull << 32) : ( a / bhi) << 32;
-	mul64integers(b, result, &remlo, &remhi);
-	
-	remhi = a - remhi - (remlo > 0);
-	remlo = - remlo;
-
-	b <<= 32;
-	while ( (int64_t) remhi < 0 ) {
-			result -= 0x1ll << 32;	
-			remlo += b;
-			remhi += bhi + ( remlo < b );
-		}
-	remhi = (remhi << 32) | (remlo >> 32);
-	if (( bhi << 32) <= remhi) {
-		result |= 0xFFFFFFFF;
-	} else {
-		result |= remhi / bhi;
-	}
-	
-	
+	float128 result;
+	int64_t aexp, bexp, cexp;
+	uint64_t afrac_hi, afrac_lo, bfrac_hi, bfrac_lo, cfrac_hi, cfrac_lo;
+	uint64_t shift_out;
+	uint64_t rem_hihi, rem_hilo, rem_lohi, rem_lolo;
+	uint64_t tmp_hihi, tmp_hilo, tmp_lohi, tmp_lolo;
+
+	result.parts.sign = a.parts.sign ^ b.parts.sign;
+
+	if (isFloat128NaN(a)) {
+		if (isFloat128SigNaN(b)) {
+			/*FIXME: SigNaN*/
+			return b;
+		}
+
+		if (isFloat128SigNaN(a)) {
+			/*FIXME: SigNaN*/
+		}
+		/*NaN*/
+		return a;
+	}
+
+	if (isFloat128NaN(b)) {
+		if (isFloat128SigNaN(b)) {
+			/*FIXME: SigNaN*/
+		}
+		/*NaN*/
+		return b;
+	}
+
+	if (isFloat128Infinity(a)) {
+		if (isFloat128Infinity(b) || isFloat128Zero(b)) {
+			/*FIXME: inf / inf */
+			result.binary.hi = FLOAT128_NAN_HI;
+			result.binary.lo = FLOAT128_NAN_LO;
+			return result;
+		}
+		/* inf / num */
+		result.parts.exp = a.parts.exp;
+		result.parts.frac_hi = a.parts.frac_hi;
+		result.parts.frac_lo = a.parts.frac_lo;
+		return result;
+	}
+
+	if (isFloat128Infinity(b)) {
+		if (isFloat128Zero(a)) {
+			/* FIXME 0 / inf */
+			result.parts.exp = 0;
+			result.parts.frac_hi = 0;
+			result.parts.frac_lo = 0;
+			return result;
+		}
+		/* FIXME: num / inf*/
+		result.parts.exp = 0;
+		result.parts.frac_hi = 0;
+		result.parts.frac_lo = 0;
+		return result;
+	}
+
+	if (isFloat128Zero(b)) {
+		if (isFloat128Zero(a)) {
+			/*FIXME: 0 / 0*/
+			result.binary.hi = FLOAT128_NAN_HI;
+			result.binary.lo = FLOAT128_NAN_LO;
+			return result;
+		}
+		/* FIXME: division by zero */
+		result.parts.exp = 0;
+		result.parts.frac_hi = 0;
+		result.parts.frac_lo = 0;
+		return result;
+	}
+
+	afrac_hi = a.parts.frac_hi;
+	afrac_lo = a.parts.frac_lo;
+	aexp = a.parts.exp;
+	bfrac_hi = b.parts.frac_hi;
+	bfrac_lo = b.parts.frac_lo;
+	bexp = b.parts.exp;
+
+	/* denormalized numbers */
+	if (aexp == 0) {
+		if (eq128(afrac_hi, afrac_lo, 0x0ll, 0x0ll)) {
+			result.parts.exp = 0;
+			result.parts.frac_hi = 0;
+			result.parts.frac_lo = 0;
+			return result;
+		}
+
+		/* normalize it*/
+		aexp++;
+		/* afrac is nonzero => it must stop */
+		and128(afrac_hi, afrac_lo,
+		    FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+		    &tmp_hihi, &tmp_lolo);
+		while (!lt128(0x0ll, 0x0ll, tmp_hihi, tmp_lolo)) {
+			lshift128(afrac_hi, afrac_lo, 1, &afrac_hi, &afrac_lo);
+			aexp--;
+		}
+	}
+
+	if (bexp == 0) {
+		bexp++;
+		/* bfrac is nonzero => it must stop */
+		and128(bfrac_hi, bfrac_lo,
+		    FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+		    &tmp_hihi, &tmp_lolo);
+		while (!lt128(0x0ll, 0x0ll, tmp_hihi, tmp_lolo)) {
+			lshift128(bfrac_hi, bfrac_lo, 1, &bfrac_hi, &bfrac_lo);
+			bexp--;
+		}
+	}
+
+	or128(afrac_hi, afrac_lo,
+	    FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+	    &afrac_hi, &afrac_lo);
+	lshift128(afrac_hi, afrac_lo,
+	    (128 - FLOAT128_FRACTION_SIZE - 1), &afrac_hi, &afrac_lo);
+	or128(bfrac_hi, bfrac_lo,
+	    FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+	    &bfrac_hi, &bfrac_lo);
+	lshift128(bfrac_hi, bfrac_lo,
+	    (128 - FLOAT128_FRACTION_SIZE - 1), &bfrac_hi, &bfrac_lo);
+
+	if (le128(bfrac_hi, bfrac_lo, afrac_hi, afrac_lo)) {
+		rshift128(afrac_hi, afrac_lo, 1, &afrac_hi, &afrac_lo);
+		aexp++;
+	}
+
+	cexp = aexp - bexp + FLOAT128_BIAS - 2;
+
+	cfrac_hi = div128est(afrac_hi, afrac_lo, bfrac_hi);
+
+	mul128(bfrac_hi, bfrac_lo, 0x0ll, cfrac_hi,
+	    &tmp_lolo /* dummy */, &tmp_hihi, &tmp_hilo, &tmp_lohi);
+
+	/* sub192(afrac_hi, afrac_lo, 0, 
+	 *     tmp_hihi, tmp_hilo, tmp_lohi
+	 *     &rem_hihi, &rem_hilo, &rem_lohi); */
+	sub128(afrac_hi, afrac_lo, tmp_hihi, tmp_hilo, &rem_hihi, &rem_hilo);
+	if (tmp_lohi > 0) {
+		sub128(rem_hihi, rem_hilo, 0x0ll, 0x1ll, &rem_hihi, &rem_hilo);
+	}
+	rem_lohi = -tmp_lohi;
+
+	while ((int64_t) rem_hihi < 0) {
+		--cfrac_hi;
+		/* add192(rem_hihi, rem_hilo, rem_lohi, 
+		 *     0, bfrac_hi, bfrac_lo,
+		 *     &rem_hihi, &rem_hilo, &rem_lohi); */
+		add128(rem_hilo, rem_lohi, bfrac_hi, bfrac_lo, &rem_hilo, &rem_lohi);
+		if (lt128(rem_hilo, rem_lohi, bfrac_hi, bfrac_lo)) {
+			++rem_hihi;
+		}
+	}
+
+	cfrac_lo = div128est(rem_hilo, rem_lohi, bfrac_lo);
+
+	if ((cfrac_lo & 0x3FFF) <= 4) {
+		mul128(bfrac_hi, bfrac_lo, 0x0ll, cfrac_lo,
+	        &tmp_hihi /* dummy */, &tmp_hilo, &tmp_lohi, &tmp_lolo);
+
+		/* sub192(rem_hilo, rem_lohi, 0,
+		 *     tmp_hilo, tmp_lohi, tmp_lolo,
+		 *     &rem_hilo, &rem_lohi, &rem_lolo); */
+		sub128(rem_hilo, rem_lohi, tmp_hilo, tmp_lohi, &rem_hilo, &rem_lohi);
+		if (tmp_lolo > 0) {
+			sub128(rem_hilo, rem_lohi, 0x0ll, 0x1ll, &rem_hilo, &rem_lohi);
+		}
+		rem_lolo = -tmp_lolo;
+
+		while ((int64_t) rem_hilo < 0) {
+			--cfrac_lo;
+			/* add192(rem_hilo, rem_lohi, rem_lolo,
+			 *     0, bfrac_hi, bfrac_lo,
+			 *     &rem_hilo, &rem_lohi, &rem_lolo); */
+			add128(rem_lohi, rem_lolo, bfrac_hi, bfrac_lo, &rem_lohi, &rem_lolo);
+			if (lt128(rem_lohi, rem_lolo, bfrac_hi, bfrac_lo)) {
+				++rem_hilo;
+			}
+		}
+
+		cfrac_lo |= ((rem_hilo | rem_lohi | rem_lolo) != 0 );
+	}
+
+	shift_out = cfrac_lo << (64 - (128 - FLOAT128_FRACTION_SIZE - 1));
+	rshift128(cfrac_hi, cfrac_lo, (128 - FLOAT128_FRACTION_SIZE - 1),
+	    &cfrac_hi, &cfrac_lo);
+
+	result = finishFloat128(cexp, cfrac_hi, cfrac_lo, result.parts.sign, shift_out);
 	return result;
 }
Index: uspace/lib/softfloat/generic/mul.c
===================================================================
--- uspace/lib/softfloat/generic/mul.c	(revision 750636ac4a65360c44846b7681af6ae46854dd3a)
+++ uspace/lib/softfloat/generic/mul.c	(revision bb285b42d51bf15d56c4d20ab034e93a463a8b7f)
@@ -1,4 +1,5 @@
 /*
  * Copyright (c) 2005 Josef Cejka
+ * Copyright (c) 2011 Petr Koupy
  * All rights reserved.
  *
@@ -30,5 +31,5 @@
  * @{
  */
-/** @file
+/** @file Multiplication functions.
  */
 
@@ -38,6 +39,10 @@
 #include <common.h>
 
-/** Multiply two 32 bit float numbers
- *
+/**
+ * Multiply two single-precision floats.
+ *
+ * @param a First input operand.
+ * @param b Second input operand.
+ * @return Result of multiplication.
  */
 float32 mulFloat32(float32 a, float32 b)
@@ -49,5 +54,5 @@
 	result.parts.sign = a.parts.sign ^ b.parts.sign;
 	
-	if (isFloat32NaN(a) || isFloat32NaN(b) ) {
+	if (isFloat32NaN(a) || isFloat32NaN(b)) {
 		/* TODO: fix SigNaNs */
 		if (isFloat32SigNaN(a)) {
@@ -55,14 +60,14 @@
 			result.parts.exp = a.parts.exp;
 			return result;
-		};
+		}
 		if (isFloat32SigNaN(b)) { /* TODO: fix SigNaN */
 			result.parts.fraction = b.parts.fraction;
 			result.parts.exp = b.parts.exp;
 			return result;
-		};
+		}
 		/* set NaN as result */
 		result.binary = FLOAT32_NAN;
 		return result;
-	};
+	}
 		
 	if (isFloat32Infinity(a)) { 
@@ -98,5 +103,5 @@
 		result.parts.sign = a.parts.sign ^ b.parts.sign;
 		return result;
-	};
+	}
 	
 	if (exp < 0) { 
@@ -106,5 +111,5 @@
 		result.parts.exp = 0x0;
 		return result;
-	};
+	}
 	
 	frac1 = a.parts.fraction;
@@ -113,5 +118,5 @@
 	} else {
 		++exp;
-	};
+	}
 	
 	frac2 = b.parts.fraction;
@@ -121,16 +126,16 @@
 	} else {
 		++exp;
-	};
+	}
 
 	frac1 <<= 1; /* one bit space for rounding */
 
 	frac1 = frac1 * frac2;
-/* round and return */
-	
-	while ((exp < FLOAT32_MAX_EXPONENT) && (frac1 >= ( 1 << (FLOAT32_FRACTION_SIZE + 2)))) { 
-		/* 23 bits of fraction + one more for hidden bit (all shifted 1 bit left)*/
+
+	/* round and return */
+	while ((exp < FLOAT32_MAX_EXPONENT) && (frac1 >= (1 << (FLOAT32_FRACTION_SIZE + 2)))) { 
+		/* 23 bits of fraction + one more for hidden bit (all shifted 1 bit left) */
 		++exp;
 		frac1 >>= 1;
-	};
+	}
 
 	/* rounding */
@@ -141,7 +146,7 @@
 		++exp;
 		frac1 >>= 1;
-	};
-
-	if (exp >= FLOAT32_MAX_EXPONENT ) {	
+	}
+
+	if (exp >= FLOAT32_MAX_EXPONENT) {	
 		/* TODO: fix overflow */
 		/* return infinity*/
@@ -159,21 +164,24 @@
 			frac1 >>= 1;
 			++exp;
-		};
+		}
 		if (frac1 == 0) {
 			/* FIXME : underflow */
-		result.parts.exp = 0;
-		result.parts.fraction = 0;
-		return result;
-		};
-	};
+			result.parts.exp = 0;
+			result.parts.fraction = 0;
+			return result;
+		}
+	}
 	result.parts.exp = exp; 
-	result.parts.fraction = frac1 & ( (1 << FLOAT32_FRACTION_SIZE) - 1);
+	result.parts.fraction = frac1 & ((1 << FLOAT32_FRACTION_SIZE) - 1);
 	
 	return result;	
-	
 }
 
-/** Multiply two 64 bit float numbers
- *
+/**
+ * Multiply two double-precision floats.
+ *
+ * @param a First input operand.
+ * @param b Second input operand.
+ * @return Result of multiplication.
  */
 float64 mulFloat64(float64 a, float64 b)
@@ -185,5 +193,5 @@
 	result.parts.sign = a.parts.sign ^ b.parts.sign;
 	
-	if (isFloat64NaN(a) || isFloat64NaN(b) ) {
+	if (isFloat64NaN(a) || isFloat64NaN(b)) {
 		/* TODO: fix SigNaNs */
 		if (isFloat64SigNaN(a)) {
@@ -191,14 +199,14 @@
 			result.parts.exp = a.parts.exp;
 			return result;
-		};
+		}
 		if (isFloat64SigNaN(b)) { /* TODO: fix SigNaN */
 			result.parts.fraction = b.parts.fraction;
 			result.parts.exp = b.parts.exp;
 			return result;
-		};
+		}
 		/* set NaN as result */
 		result.binary = FLOAT64_NAN;
 		return result;
-	};
+	}
 		
 	if (isFloat64Infinity(a)) { 
@@ -233,5 +241,5 @@
 	} else {
 		++exp;
-	};
+	}
 	
 	frac2 = b.parts.fraction;
@@ -241,52 +249,128 @@
 	} else {
 		++exp;
-	};
+	}
 
 	frac1 <<= (64 - FLOAT64_FRACTION_SIZE - 1);
 	frac2 <<= (64 - FLOAT64_FRACTION_SIZE - 2);
 
-	mul64integers(frac1, frac2, &frac1, &frac2);
-
-	frac2 |= (frac1 != 0);
-	if (frac2 & (0x1ll << 62)) {
-		frac2 <<= 1;
+	mul64(frac1, frac2, &frac1, &frac2);
+
+	frac1 |= (frac2 != 0);
+	if (frac1 & (0x1ll << 62)) {
+		frac1 <<= 1;
 		exp--;
 	}
 
-	result = finishFloat64(exp, frac2, result.parts.sign);
+	result = finishFloat64(exp, frac1, result.parts.sign);
 	return result;
 }
 
-/** Multiply two 64 bit numbers and return result in two parts
- * @param a first operand
- * @param b second operand
- * @param lo lower part from result
- * @param hi higher part of result
- */
-void mul64integers(uint64_t a,uint64_t b, uint64_t *lo, uint64_t *hi)
+/**
+ * Multiply two quadruple-precision floats.
+ *
+ * @param a First input operand.
+ * @param b Second input operand.
+ * @return Result of multiplication.
+ */
+float128 mulFloat128(float128 a, float128 b)
 {
-	uint64_t low, high, middle1, middle2;
-	uint32_t alow, blow;
-
-	alow = a & 0xFFFFFFFF;
-	blow = b & 0xFFFFFFFF;
-	
-	a >>= 32;
-	b >>= 32;
-	
-	low = ((uint64_t)alow) * blow;
-	middle1 = a * blow;
-	middle2 = alow * b;
-	high = a * b;
-
-	middle1 += middle2;
-	high += (((uint64_t)(middle1 < middle2)) << 32) + (middle1 >> 32);
-	middle1 <<= 32;
-	low += middle1;
-	high += (low < middle1);
-	*lo = low;
-	*hi = high;
-	
-	return;
+	float128 result;
+	uint64_t frac1_hi, frac1_lo, frac2_hi, frac2_lo, tmp_hi, tmp_lo;
+	int32_t exp;
+
+	result.parts.sign = a.parts.sign ^ b.parts.sign;
+
+	if (isFloat128NaN(a) || isFloat128NaN(b)) {
+		/* TODO: fix SigNaNs */
+		if (isFloat128SigNaN(a)) {
+			result.parts.frac_hi = a.parts.frac_hi;
+			result.parts.frac_lo = a.parts.frac_lo;
+			result.parts.exp = a.parts.exp;
+			return result;
+		}
+		if (isFloat128SigNaN(b)) { /* TODO: fix SigNaN */
+			result.parts.frac_hi = b.parts.frac_hi;
+			result.parts.frac_lo = b.parts.frac_lo;
+			result.parts.exp = b.parts.exp;
+			return result;
+		}
+		/* set NaN as result */
+		result.binary.hi = FLOAT128_NAN_HI;
+		result.binary.lo = FLOAT128_NAN_LO;
+		return result;
+	}
+
+	if (isFloat128Infinity(a)) {
+		if (isFloat128Zero(b)) {
+			/* FIXME: zero * infinity */
+			result.binary.hi = FLOAT128_NAN_HI;
+			result.binary.lo = FLOAT128_NAN_LO;
+			return result;
+		}
+		result.parts.frac_hi = a.parts.frac_hi;
+		result.parts.frac_lo = a.parts.frac_lo;
+		result.parts.exp = a.parts.exp;
+		return result;
+	}
+
+	if (isFloat128Infinity(b)) {
+		if (isFloat128Zero(a)) {
+			/* FIXME: zero * infinity */
+			result.binary.hi = FLOAT128_NAN_HI;
+			result.binary.lo = FLOAT128_NAN_LO;
+			return result;
+		}
+		result.parts.frac_hi = b.parts.frac_hi;
+		result.parts.frac_lo = b.parts.frac_lo;
+		result.parts.exp = b.parts.exp;
+		return result;
+	}
+
+	/* exp is signed so we can easy detect underflow */
+	exp = a.parts.exp + b.parts.exp - FLOAT128_BIAS - 1;
+
+	frac1_hi = a.parts.frac_hi;
+	frac1_lo = a.parts.frac_lo;
+
+	if (a.parts.exp > 0) {
+		or128(frac1_hi, frac1_lo,
+	        FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+	        &frac1_hi, &frac1_lo);
+	} else {
+		++exp;
+	}
+
+	frac2_hi = b.parts.frac_hi;
+	frac2_lo = b.parts.frac_lo;
+
+	if (b.parts.exp > 0) {
+		or128(frac2_hi, frac2_lo,
+		    FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+		    &frac2_hi, &frac2_lo);
+	} else {
+		++exp;
+	}
+
+	lshift128(frac2_hi, frac2_lo,
+	    128 - FLOAT128_FRACTION_SIZE, &frac2_hi, &frac2_lo);
+
+	tmp_hi = frac1_hi;
+	tmp_lo = frac1_lo;
+	mul128(frac1_hi, frac1_lo, frac2_hi, frac2_lo,
+	    &frac1_hi, &frac1_lo, &frac2_hi, &frac2_lo);
+	add128(frac1_hi, frac1_lo, tmp_hi, tmp_lo, &frac1_hi, &frac1_lo);
+	frac2_hi |= (frac2_lo != 0x0ll);
+
+	if ((FLOAT128_HIDDEN_BIT_MASK_HI << 1) <= frac1_hi) {
+		frac2_hi >>= 1;
+		if (frac1_lo & 0x1ll) {
+			frac2_hi |= (0x1ull < 64);
+		}
+		rshift128(frac1_hi, frac1_lo, 1, &frac1_hi, &frac1_lo);
+		++exp;
+	}
+
+	result = finishFloat128(exp, frac1_hi, frac1_lo, result.parts.sign, frac2_hi);
+	return result;
 }
 
Index: uspace/lib/softfloat/generic/other.c
===================================================================
--- uspace/lib/softfloat/generic/other.c	(revision 750636ac4a65360c44846b7681af6ae46854dd3a)
+++ uspace/lib/softfloat/generic/other.c	(revision bb285b42d51bf15d56c4d20ab034e93a463a8b7f)
@@ -30,5 +30,5 @@
  * @{
  */
-/** @file
+/** @file Other functions (power, complex).
  */
 
Index: uspace/lib/softfloat/generic/softfloat.c
===================================================================
--- uspace/lib/softfloat/generic/softfloat.c	(revision 750636ac4a65360c44846b7681af6ae46854dd3a)
+++ uspace/lib/softfloat/generic/softfloat.c	(revision bb285b42d51bf15d56c4d20ab034e93a463a8b7f)
@@ -1,4 +1,5 @@
 /*
  * Copyright (c) 2005 Josef Cejka
+ * Copyright (c) 2011 Petr Koupy
  * All rights reserved.
  *
@@ -32,5 +33,5 @@
  * @{
  */
-/** @file
+/** @file Softfloat API.
  */
 
@@ -83,4 +84,20 @@
 }
 
+long double __addtf3(long double a, long double b)
+{
+	float128 ta, tb;
+	ta.ld = a;
+	tb.ld = b;
+	if (ta.parts.sign != tb.parts.sign) {
+		if (ta.parts.sign) {
+			ta.parts.sign = 0;
+			return subFloat128(tb, ta).ld;
+		};
+		tb.parts.sign = 0;
+		return subFloat128(ta, tb).ld;
+	}
+	return addFloat128(ta, tb).ld;
+}
+
 float __subsf3(float a, float b)
 {
@@ -107,4 +124,16 @@
 }
 
+long double __subtf3(long double a, long double b)
+{
+	float128 ta, tb;
+	ta.ld = a;
+	tb.ld = b;
+	if (ta.parts.sign != tb.parts.sign) {
+		tb.parts.sign = !tb.parts.sign;
+		return addFloat128(ta, tb).ld;
+	}
+	return subFloat128(ta, tb).ld;
+}
+
 float __mulsf3(float a, float b) 
 {
@@ -123,4 +152,12 @@
 }
 
+long double __multf3(long double a, long double b)
+{
+	float128 ta, tb;
+	ta.ld = a;
+	tb.ld = b;
+	return 	mulFloat128(ta, tb).ld;
+}
+
 float __divsf3(float a, float b) 
 {
@@ -139,4 +176,12 @@
 }
 
+long double __divtf3(long double a, long double b)
+{
+	float128 ta, tb;
+	ta.ld = a;
+	tb.ld = b;
+	return 	divFloat128(ta, tb).ld;
+}
+
 float __negsf2(float a)
 {
@@ -149,8 +194,16 @@
 double __negdf2(double a)
 {
-	float64 fa;
-	fa.d = a;
-	fa.parts.sign = !fa.parts.sign;
-	return fa.d;
+	float64 da;
+	da.d = a;
+	da.parts.sign = !da.parts.sign;
+	return da.d;
+}
+
+long double __negtf2(long double a)
+{
+	float128 ta;
+	ta.ld = a;
+	ta.parts.sign = !ta.parts.sign;
+	return ta.ld;
 }
 
@@ -164,4 +217,18 @@
 }
 
+long double __extendsftf2(float a)
+{
+	float32 fa;
+	fa.f = a;
+	return convertFloat32ToFloat128(fa).ld;
+}
+
+long double __extenddftf2(double a)
+{
+	float64 da;
+	da.d = a;
+	return convertFloat64ToFloat128(da).ld;
+}
+
 float __truncdfsf2(double a) 
 {
@@ -171,4 +238,18 @@
 }
 
+float __trunctfsf2(long double a)
+{
+	float128 ta;
+	ta.ld = a;
+	return convertFloat128ToFloat32(ta).f;
+}
+
+double __trunctfdf2(long double a)
+{
+	float128 ta;
+	ta.ld = a;
+	return convertFloat128ToFloat64(ta).d;
+}
+
 int __fixsfsi(float a)
 {
@@ -178,4 +259,5 @@
 	return float32_to_int(fa);
 }
+
 int __fixdfsi(double a)
 {
@@ -184,4 +266,12 @@
 	
 	return float64_to_int(da);
+}
+
+int __fixtfsi(long double a)
+{
+	float128 ta;
+	ta.ld = a;
+
+	return float128_to_int(ta);
 }
  
@@ -193,4 +283,5 @@
 	return float32_to_long(fa);
 }
+
 long __fixdfdi(double a)
 {
@@ -199,4 +290,12 @@
 	
 	return float64_to_long(da);
+}
+
+long __fixtfdi(long double a)
+{
+	float128 ta;
+	ta.ld = a;
+
+	return float128_to_long(ta);
 }
  
@@ -208,4 +307,5 @@
 	return float32_to_longlong(fa);
 }
+
 long long __fixdfti(double a)
 {
@@ -216,4 +316,12 @@
 }
 
+long long __fixtfti(long double a)
+{
+	float128 ta;
+	ta.ld = a;
+
+	return float128_to_longlong(ta);
+}
+
 unsigned int __fixunssfsi(float a)
 {
@@ -223,4 +331,5 @@
 	return float32_to_uint(fa);
 }
+
 unsigned int __fixunsdfsi(double a)
 {
@@ -229,4 +338,12 @@
 	
 	return float64_to_uint(da);
+}
+
+unsigned int __fixunstfsi(long double a)
+{
+	float128 ta;
+	ta.ld = a;
+
+	return float128_to_uint(ta);
 }
  
@@ -238,4 +355,5 @@
 	return float32_to_ulong(fa);
 }
+
 unsigned long __fixunsdfdi(double a)
 {
@@ -244,4 +362,12 @@
 	
 	return float64_to_ulong(da);
+}
+
+unsigned long __fixunstfdi(long double a)
+{
+	float128 ta;
+	ta.ld = a;
+
+	return float128_to_ulong(ta);
 }
  
@@ -253,4 +379,5 @@
 	return float32_to_ulonglong(fa);
 }
+
 unsigned long long __fixunsdfti(double a)
 {
@@ -259,4 +386,12 @@
 	
 	return float64_to_ulonglong(da);
+}
+
+unsigned long long __fixunstfti(long double a)
+{
+	float128 ta;
+	ta.ld = a;
+
+	return float128_to_ulonglong(ta);
 }
  
@@ -268,4 +403,5 @@
 	return fa.f;
 }
+
 double __floatsidf(int i)
 {
@@ -275,4 +411,12 @@
 	return da.d;
 }
+
+long double __floatsitf(int i)
+{
+	float128 ta;
+
+	ta = int_to_float128(i);
+	return ta.ld;
+}
  
 float __floatdisf(long i)
@@ -283,4 +427,5 @@
 	return fa.f;
 }
+
 double __floatdidf(long i)
 {
@@ -290,4 +435,12 @@
 	return da.d;
 }
+
+long double __floatditf(long i)
+{
+	float128 ta;
+
+	ta = long_to_float128(i);
+	return ta.ld;
+}
  
 float __floattisf(long long i)
@@ -298,4 +451,5 @@
 	return fa.f;
 }
+
 double __floattidf(long long i)
 {
@@ -306,4 +460,12 @@
 }
 
+long double __floattitf(long long i)
+{
+	float128 ta;
+
+	ta = longlong_to_float128(i);
+	return ta.ld;
+}
+
 float __floatunsisf(unsigned int i)
 {
@@ -313,4 +475,5 @@
 	return fa.f;
 }
+
 double __floatunsidf(unsigned int i)
 {
@@ -320,4 +483,12 @@
 	return da.d;
 }
+
+long double __floatunsitf(unsigned int i)
+{
+	float128 ta;
+
+	ta = uint_to_float128(i);
+	return ta.ld;
+}
  
 float __floatundisf(unsigned long i)
@@ -328,4 +499,5 @@
 	return fa.f;
 }
+
 double __floatundidf(unsigned long i)
 {
@@ -335,4 +507,12 @@
 	return da.d;
 }
+
+long double __floatunditf(unsigned long i)
+{
+	float128 ta;
+
+	ta = ulong_to_float128(i);
+	return ta.ld;
+}
  
 float __floatuntisf(unsigned long long i)
@@ -343,4 +523,5 @@
 	return fa.f;
 }
+
 double __floatuntidf(unsigned long long i)
 {
@@ -351,11 +532,13 @@
 }
 
+long double __floatuntitf(unsigned long long i)
+{
+	float128 ta;
+
+	ta = ulonglong_to_float128(i);
+	return ta.ld;
+}
+
 /* Comparison functions */
-/* Comparison functions */
-
-/* a<b .. -1
- * a=b ..  0
- * a>b ..  1
- * */
 
 int __cmpsf2(float a, float b) 
@@ -364,19 +547,62 @@
 	fa.f = a;
 	fb.f = b;
-	if ( (isFloat32NaN(fa)) || (isFloat32NaN(fb)) ) {
+
+	if ((isFloat32NaN(fa)) || (isFloat32NaN(fb))) {
 		return 1; /* no special constant for unordered - maybe signaled? */
-	};
-
+	}
 	
 	if (isFloat32eq(fa, fb)) {
 		return 0;
-	};
+	}
 	
 	if (isFloat32lt(fa, fb)) {
 		return -1;
-		};
+	}
+
 	return 1;
 }
 
+int __cmpdf2(double a, double b)
+{
+	float64 da, db;
+	da.d = a;
+	db.d = b;
+
+	if ((isFloat64NaN(da)) || (isFloat64NaN(db))) {
+		return 1; /* no special constant for unordered - maybe signaled? */
+	}
+
+	if (isFloat64eq(da, db)) {
+		return 0;
+	}
+
+	if (isFloat64lt(da, db)) {
+		return -1;
+	}
+
+	return 1;
+}
+
+int __cmptf2(long double a, long double b)
+{
+	float128 ta, tb;
+	ta.ld = a;
+	tb.ld = b;
+
+	if ((isFloat128NaN(ta)) || (isFloat128NaN(tb))) {
+		return 1; /* no special constant for unordered - maybe signaled? */
+	}
+
+	if (isFloat128eq(ta, tb)) {
+		return 0;
+	}
+
+	if (isFloat128lt(ta, tb)) {
+		return -1;
+	}
+
+	return 1;
+}
+
 int __unordsf2(float a, float b) 
 {
@@ -384,10 +610,23 @@
 	fa.f = a;
 	fb.f = b;
-	return ( (isFloat32NaN(fa)) || (isFloat32NaN(fb)) );
-}
-
-/** 
- * @return zero, if neither argument is a NaN and are equal
- * */
+	return ((isFloat32NaN(fa)) || (isFloat32NaN(fb)));
+}
+
+int __unorddf2(double a, double b)
+{
+	float64 da, db;
+	da.d = a;
+	db.d = b;
+	return ((isFloat64NaN(da)) || (isFloat64NaN(db)));
+}
+
+int __unordtf2(long double a, long double b)
+{
+	float128 ta, tb;
+	ta.ld = a;
+	tb.ld = b;
+	return ((isFloat128NaN(ta)) || (isFloat128NaN(tb)));
+}
+
 int __eqsf2(float a, float b) 
 {
@@ -395,18 +634,53 @@
 	fa.f = a;
 	fb.f = b;
-	if ( (isFloat32NaN(fa)) || (isFloat32NaN(fb)) ) {
-		/* TODO: sigNaNs*/
-		return 1;
-		};
+	if ((isFloat32NaN(fa)) || (isFloat32NaN(fb))) {
+		/* TODO: sigNaNs */
+		return 1;
+	}
 	return isFloat32eq(fa, fb) - 1;
 }
 
-/* strange behavior, but it was in gcc documentation */
+int __eqdf2(double a, double b)
+{
+	float64 da, db;
+	da.d = a;
+	db.d = b;
+	if ((isFloat64NaN(da)) || (isFloat64NaN(db))) {
+		/* TODO: sigNaNs */
+		return 1;
+	}
+	return isFloat64eq(da, db) - 1;
+}
+
+int __eqtf2(long double a, long double b)
+{
+	float128 ta, tb;
+	ta.ld = a;
+	tb.ld = b;
+	if ((isFloat128NaN(ta)) || (isFloat128NaN(tb))) {
+		/* TODO: sigNaNs */
+		return 1;
+	}
+	return isFloat128eq(ta, tb) - 1;
+}
+
 int __nesf2(float a, float b) 
 {
+	/* strange behavior, but it was in gcc documentation */
 	return __eqsf2(a, b);
 }
 
-/* return value >= 0 if a>=b and neither is NaN */
+int __nedf2(double a, double b)
+{
+	/* strange behavior, but it was in gcc documentation */
+	return __eqdf2(a, b);
+}
+
+int __netf2(long double a, long double b)
+{
+	/* strange behavior, but it was in gcc documentation */
+	return __eqtf2(a, b);
+}
+
 int __gesf2(float a, float b)
 {
@@ -414,21 +688,65 @@
 	fa.f = a;
 	fb.f = b;
-	if ( (isFloat32NaN(fa)) || (isFloat32NaN(fb)) ) {
-		/* TODO: sigNaNs*/
-		return -1;
-		};
+
+	if ((isFloat32NaN(fa)) || (isFloat32NaN(fb))) {
+		/* TODO: sigNaNs */
+		return -1;
+	}
 	
 	if (isFloat32eq(fa, fb)) {
 		return 0;
-	};
+	}
 	
 	if (isFloat32gt(fa, fb)) {
 		return 1;
-		};
+	}
 	
 	return -1;
 }
 
-/** Return negative value, if a<b and neither is NaN*/
+int __gedf2(double a, double b)
+{
+	float64 da, db;
+	da.d = a;
+	db.d = b;
+
+	if ((isFloat64NaN(da)) || (isFloat64NaN(db))) {
+		/* TODO: sigNaNs */
+		return -1;
+	}
+
+	if (isFloat64eq(da, db)) {
+		return 0;
+	}
+
+	if (isFloat64gt(da, db)) {
+		return 1;
+	}
+
+	return -1;
+}
+
+int __getf2(long double a, long double b)
+{
+	float128 ta, tb;
+	ta.ld = a;
+	tb.ld = b;
+
+	if ((isFloat128NaN(ta)) || (isFloat128NaN(tb))) {
+		/* TODO: sigNaNs */
+		return -1;
+	}
+
+	if (isFloat128eq(ta, tb)) {
+		return 0;
+	}
+
+	if (isFloat128gt(ta, tb)) {
+		return 1;
+	}
+
+	return -1;
+}
+
 int __ltsf2(float a, float b)
 {
@@ -436,15 +754,53 @@
 	fa.f = a;
 	fb.f = b;
-	if ( (isFloat32NaN(fa)) || (isFloat32NaN(fb)) ) {
-		/* TODO: sigNaNs*/
-		return 1;
-		};
+
+	if ((isFloat32NaN(fa)) || (isFloat32NaN(fb))) {
+		/* TODO: sigNaNs */
+		return 1;
+	}
+
 	if (isFloat32lt(fa, fb)) {
 		return -1;
-		};
+	}
+
 	return 0;
 }
 
-/* return value <= 0 if a<=b and neither is NaN */
+int __ltdf2(double a, double b)
+{
+	float64 da, db;
+	da.d = a;
+	db.d = b;
+
+	if ((isFloat64NaN(da)) || (isFloat64NaN(db))) {
+		/* TODO: sigNaNs */
+		return 1;
+	}
+
+	if (isFloat64lt(da, db)) {
+		return -1;
+	}
+
+	return 0;
+}
+
+int __lttf2(long double a, long double b)
+{
+	float128 ta, tb;
+	ta.ld = a;
+	tb.ld = b;
+
+	if ((isFloat128NaN(ta)) || (isFloat128NaN(tb))) {
+		/* TODO: sigNaNs */
+		return 1;
+	}
+
+	if (isFloat128lt(ta, tb)) {
+		return -1;
+	}
+
+	return 0;
+}
+
 int __lesf2(float a, float b)
 {
@@ -452,21 +808,65 @@
 	fa.f = a;
 	fb.f = b;
-	if ( (isFloat32NaN(fa)) || (isFloat32NaN(fb)) ) {
-		/* TODO: sigNaNs*/
-		return 1;
-		};
+
+	if ((isFloat32NaN(fa)) || (isFloat32NaN(fb))) {
+		/* TODO: sigNaNs */
+		return 1;
+	}
 	
 	if (isFloat32eq(fa, fb)) {
 		return 0;
-	};
+	}
 	
 	if (isFloat32lt(fa, fb)) {
 		return -1;
-		};
+	}
 	
 	return 1;
 }
 
-/** Return positive value, if a>b and neither is NaN*/
+int __ledf2(double a, double b)
+{
+	float64 da, db;
+	da.d = a;
+	db.d = b;
+
+	if ((isFloat64NaN(da)) || (isFloat64NaN(db))) {
+		/* TODO: sigNaNs */
+		return 1;
+	}
+
+	if (isFloat64eq(da, db)) {
+		return 0;
+	}
+
+	if (isFloat64lt(da, db)) {
+		return -1;
+	}
+
+	return 1;
+}
+
+int __letf2(long double a, long double b)
+{
+	float128 ta, tb;
+	ta.ld = a;
+	tb.ld = b;
+
+	if ((isFloat128NaN(ta)) || (isFloat128NaN(tb))) {
+		/* TODO: sigNaNs */
+		return 1;
+	}
+
+	if (isFloat128eq(ta, tb)) {
+		return 0;
+	}
+
+	if (isFloat128lt(ta, tb)) {
+		return -1;
+	}
+
+	return 1;
+}
+
 int __gtsf2(float a, float b)
 {
@@ -474,23 +874,250 @@
 	fa.f = a;
 	fb.f = b;
-	if ( (isFloat32NaN(fa)) || (isFloat32NaN(fb)) ) {
-		/* TODO: sigNaNs*/
-		return -1;
-		};
+
+	if ((isFloat32NaN(fa)) || (isFloat32NaN(fb))) {
+		/* TODO: sigNaNs */
+		return -1;
+	}
+
 	if (isFloat32gt(fa, fb)) {
 		return 1;
-		};
+	}
+
 	return 0;
 }
 
-/* Other functions */
-
-float __powisf2(float a, int b)
-{
-/* TODO: */
-	float32 fa;
-	fa.binary = FLOAT32_NAN;
-	return fa.f;
-}
+int __gtdf2(double a, double b)
+{
+	float64 da, db;
+	da.d = a;
+	db.d = b;
+
+	if ((isFloat64NaN(da)) || (isFloat64NaN(db))) {
+		/* TODO: sigNaNs */
+		return -1;
+	}
+
+	if (isFloat64gt(da, db)) {
+		return 1;
+	}
+
+	return 0;
+}
+
+int __gttf2(long double a, long double b)
+{
+	float128 ta, tb;
+	ta.ld = a;
+	tb.ld = b;
+
+	if ((isFloat128NaN(ta)) || (isFloat128NaN(tb))) {
+		/* TODO: sigNaNs */
+		return -1;
+	}
+
+	if (isFloat128gt(ta, tb)) {
+		return 1;
+	}
+
+	return 0;
+}
+
+
+
+#ifdef SPARC_SOFTFLOAT
+
+/* SPARC quadruple-precision wrappers */
+
+void _Qp_add(long double *c, long double *a, long double *b)
+{
+	*c = __addtf3(*a, *b);
+}
+
+void _Qp_sub(long double *c, long double *a, long double *b)
+{
+	*c = __subtf3(*a, *b);
+}
+
+void _Qp_mul(long double *c, long double *a, long double *b)
+{
+	*c = __multf3(*a, *b);
+}
+
+void _Qp_div(long double *c, long double *a, long double *b)
+{
+	*c = __divtf3(*a, *b);
+}
+
+void _Qp_neg(long double *c, long double *a)
+{
+	*c = __negtf2(*a);
+}
+
+void _Qp_stoq(long double *c, float a)
+{
+	*c = __extendsftf2(a);
+}
+
+void _Qp_dtoq(long double *c, double a)
+{
+	*c = __extenddftf2(a);
+}
+
+float _Qp_qtos(long double *a)
+{
+	return __trunctfsf2(*a);
+}
+
+double _Qp_qtod(long double *a)
+{
+	return __trunctfdf2(*a);
+}
+
+int _Qp_qtoi(long double *a)
+{
+	return __fixtfsi(*a);
+}
+
+unsigned int _Qp_qtoui(long double *a)
+{
+	return __fixunstfsi(*a);
+}
+
+long _Qp_qtox(long double *a)
+{
+	return __fixtfdi(*a);
+}
+
+unsigned long _Qp_qtoux(long double *a)
+{
+	return __fixunstfdi(*a);
+}
+
+void _Qp_itoq(long double *c, int a)
+{
+	*c = __floatsitf(a);
+}
+
+void _Qp_uitoq(long double *c, unsigned int a)
+{
+	*c = __floatunsitf(a);
+}
+
+void _Qp_xtoq(long double *c, long a)
+{
+	*c = __floatditf(a);
+}
+
+void _Qp_uxtoq(long double *c, unsigned long a)
+{
+	*c = __floatunditf(a);
+}
+
+int _Qp_cmp(long double *a, long double *b)
+{
+	float128 ta, tb;
+	ta.ld = *a;
+	tb.ld = *b;
+
+	if ((isFloat128NaN(ta)) || (isFloat128NaN(tb))) {
+		return 3;
+	}
+
+	if (isFloat128eq(ta, tb)) {
+		return 0;
+	}
+
+	if (isFloat128lt(ta, tb)) {
+		return 1;
+	}
+
+	return 2;
+}
+
+int _Qp_cmpe(long double *a, long double *b)
+{
+	/* strange, but is defined this way in SPARC Compliance Definition */
+	return _Qp_cmp(a, b);
+}
+
+int _Qp_feq(long double *a, long double *b)
+{
+	float128 ta, tb;
+	ta.ld = *a;
+	tb.ld = *b;
+
+	if ((isFloat128NaN(ta)) || (isFloat128NaN(tb))) {
+		return 0;
+	}
+
+	return isFloat128eq(ta, tb);
+}
+
+int _Qp_fge(long double *a, long double *b)
+{
+	float128 ta, tb;
+	ta.ld = *a;
+	tb.ld = *b;
+
+	if ((isFloat128NaN(ta)) || (isFloat128NaN(tb))) {
+		return 0;
+	}
+
+	return isFloat128eq(ta, tb) || isFloat128gt(ta, tb);
+}
+
+int _Qp_fgt(long double *a, long double *b)
+{
+	float128 ta, tb;
+	ta.ld = *a;
+	tb.ld = *b;
+
+	if ((isFloat128NaN(ta)) || (isFloat128NaN(tb))) {
+		return 0;
+	}
+
+	return isFloat128gt(ta, tb);
+}
+
+int _Qp_fle(long double*a, long double *b)
+{
+	float128 ta, tb;
+	ta.ld = *a;
+	tb.ld = *b;
+
+	if ((isFloat128NaN(ta)) || (isFloat128NaN(tb))) {
+		return 0;
+	}
+
+	return isFloat128eq(ta, tb) || isFloat128lt(ta, tb);
+}
+
+int _Qp_flt(long double *a, long double *b)
+{
+	float128 ta, tb;
+	ta.ld = *a;
+	tb.ld = *b;
+
+	if ((isFloat128NaN(ta)) || (isFloat128NaN(tb))) {
+		return 0;
+	}
+
+	return isFloat128lt(ta, tb);
+}
+
+int _Qp_fne(long double *a, long double *b)
+{
+	float128 ta, tb;
+	ta.ld = *a;
+	tb.ld = *b;
+
+	if ((isFloat128NaN(ta)) || (isFloat128NaN(tb))) {
+		return 1;
+	}
+
+	return !isFloat128eq(ta, tb);
+}
+
+#endif
 
 /** @}
Index: uspace/lib/softfloat/generic/sub.c
===================================================================
--- uspace/lib/softfloat/generic/sub.c	(revision 750636ac4a65360c44846b7681af6ae46854dd3a)
+++ uspace/lib/softfloat/generic/sub.c	(revision bb285b42d51bf15d56c4d20ab034e93a463a8b7f)
@@ -1,4 +1,5 @@
 /*
  * Copyright (c) 2005 Josef Cejka
+ * Copyright (c) 2011 Petr Koupy
  * All rights reserved.
  *
@@ -30,5 +31,5 @@
  * @{
  */
-/** @file
+/** @file Substraction functions.
  */
 
@@ -36,6 +37,12 @@
 #include <sub.h>
 #include <comparison.h>
-
-/** Subtract two float32 numbers with same signs
+#include <common.h>
+
+/**
+ * Subtract two single-precision floats with the same signs.
+ *
+ * @param a First input operand.
+ * @param b Second input operand.
+ * @return Result of substraction.
  */
 float32 subFloat32(float32 a, float32 b)
@@ -52,7 +59,7 @@
 			/* TODO: fix SigNaN */
 			if (isFloat32SigNaN(b)) {
-			};
-			return b;
-		};
+			}
+			return b;
+		}
 		
 		if (b.parts.exp == FLOAT32_MAX_EXPONENT) { 
@@ -72,7 +79,7 @@
 			/* TODO: fix SigNaN */
 			if (isFloat32SigNaN(a) || isFloat32SigNaN(b)) {
-			};
-			return a;
-		};
+			}
+			return a;
+		}
 		
 		if (a.parts.exp == FLOAT32_MAX_EXPONENT) { 
@@ -82,5 +89,5 @@
 				result.binary = FLOAT32_NAN;
 				return result;
-			};
+			}
 			return a;
 		}
@@ -92,16 +99,16 @@
 		frac2 = b.parts.fraction;
 		exp2 = b.parts.exp;	
-	};
+	}
 	
 	if (exp1 == 0) {
 		/* both are denormalized */
-		result.parts.fraction = frac1-frac2;
+		result.parts.fraction = frac1 - frac2;
 		if (result.parts.fraction > frac1) {
 			/* TODO: underflow exception */
 			return result;
-		};
+		}
 		result.parts.exp = 0;
 		return result;
-	};
+	}
 
 	/* add hidden bit */
@@ -114,5 +121,5 @@
 		/* normalized */
 		frac2 |= FLOAT32_HIDDEN_BIT_MASK;
-	};
+	}
 	
 	/* create some space for rounding */
@@ -121,8 +128,9 @@
 	
 	if (expdiff > FLOAT32_FRACTION_SIZE + 1) {
-	     goto done;	
-	     };
+		goto done;
+	}
 	
 	frac1 = frac1 - (frac2 >> expdiff);
+
 done:
 	/* TODO: find first nonzero digit and shift result and detect possibly underflow */
@@ -130,6 +138,6 @@
 		--exp1;
 		frac1 <<= 1;
-			/* TODO: fix underflow - frac1 == 0 does not necessary means underflow... */
-	};
+		/* TODO: fix underflow - frac1 == 0 does not necessary means underflow... */
+	}
 	
 	/* rounding - if first bit after fraction is set then round up */
@@ -139,7 +147,7 @@
 		++exp1;
 		frac1 >>= 1;
-	};
-	
-	/*Clear hidden bit and shift */
+	}
+	
+	/* Clear hidden bit and shift */
 	result.parts.fraction = ((frac1 >> 6) & (~FLOAT32_HIDDEN_BIT_MASK)); 
 	result.parts.exp = exp1;
@@ -148,5 +156,10 @@
 }
 
-/** Subtract two float64 numbers with same signs
+/**
+ * Subtract two double-precision floats with the same signs.
+ *
+ * @param a First input operand.
+ * @param b Second input operand.
+ * @return Result of substraction.
  */
 float64 subFloat64(float64 a, float64 b)
@@ -164,7 +177,7 @@
 			/* TODO: fix SigNaN */
 			if (isFloat64SigNaN(b)) {
-			};
-			return b;
-		};
+			}
+			return b;
+		}
 		
 		if (b.parts.exp == FLOAT64_MAX_EXPONENT) { 
@@ -184,7 +197,7 @@
 			/* TODO: fix SigNaN */
 			if (isFloat64SigNaN(a) || isFloat64SigNaN(b)) {
-			};
-			return a;
-		};
+			}
+			return a;
+		}
 		
 		if (a.parts.exp == FLOAT64_MAX_EXPONENT) { 
@@ -194,5 +207,5 @@
 				result.binary = FLOAT64_NAN;
 				return result;
-			};
+			}
 			return a;
 		}
@@ -204,5 +217,5 @@
 		frac2 = b.parts.fraction;
 		exp2 = b.parts.exp;	
-	};
+	}
 	
 	if (exp1 == 0) {
@@ -212,8 +225,8 @@
 			/* TODO: underflow exception */
 			return result;
-		};
+		}
 		result.parts.exp = 0;
 		return result;
-	};
+	}
 
 	/* add hidden bit */
@@ -226,5 +239,5 @@
 		/* normalized */
 		frac2 |= FLOAT64_HIDDEN_BIT_MASK;
-	};
+	}
 	
 	/* create some space for rounding */
@@ -233,8 +246,9 @@
 	
 	if (expdiff > FLOAT64_FRACTION_SIZE + 1) {
-	     goto done;	
-	     };
+		goto done;
+	}
 	
 	frac1 = frac1 - (frac2 >> expdiff);
+
 done:
 	/* TODO: find first nonzero digit and shift result and detect possibly underflow */
@@ -242,6 +256,6 @@
 		--exp1;
 		frac1 <<= 1;
-			/* TODO: fix underflow - frac1 == 0 does not necessary means underflow... */
-	};
+		/* TODO: fix underflow - frac1 == 0 does not necessary means underflow... */
+	}
 	
 	/* rounding - if first bit after fraction is set then round up */
@@ -251,7 +265,7 @@
 		++exp1;
 		frac1 >>= 1;
-	};
-	
-	/*Clear hidden bit and shift */
+	}
+	
+	/* Clear hidden bit and shift */
 	result.parts.fraction = ((frac1 >> 6) & (~FLOAT64_HIDDEN_BIT_MASK)); 
 	result.parts.exp = exp1;
@@ -260,4 +274,152 @@
 }
 
+/**
+ * Subtract two quadruple-precision floats with the same signs.
+ *
+ * @param a First input operand.
+ * @param b Second input operand.
+ * @return Result of substraction.
+ */
+float128 subFloat128(float128 a, float128 b)
+{
+	int expdiff;
+	uint32_t exp1, exp2;
+	uint64_t frac1_hi, frac1_lo, frac2_hi, frac2_lo, tmp_hi, tmp_lo;
+	float128 result;
+
+	result.binary.hi = 0;
+	result.binary.lo = 0;
+
+	expdiff = a.parts.exp - b.parts.exp;
+	if ((expdiff < 0 ) || ((expdiff == 0) &&
+	    lt128(a.parts.frac_hi, a.parts.frac_lo, b.parts.frac_hi, b.parts.frac_lo))) {
+		if (isFloat128NaN(b)) {
+			/* TODO: fix SigNaN */
+			if (isFloat128SigNaN(b)) {
+			}
+			return b;
+		}
+
+		if (b.parts.exp == FLOAT128_MAX_EXPONENT) {
+			b.parts.sign = !b.parts.sign; /* num -(+-inf) = -+inf */
+			return b;
+		}
+
+		result.parts.sign = !a.parts.sign;
+
+		frac1_hi = b.parts.frac_hi;
+		frac1_lo = b.parts.frac_lo;
+		exp1 = b.parts.exp;
+		frac2_hi = a.parts.frac_hi;
+		frac2_lo = a.parts.frac_lo;
+		exp2 = a.parts.exp;
+		expdiff *= -1;
+	} else {
+		if (isFloat128NaN(a)) {
+			/* TODO: fix SigNaN */
+			if (isFloat128SigNaN(a) || isFloat128SigNaN(b)) {
+			}
+			return a;
+		}
+
+		if (a.parts.exp == FLOAT128_MAX_EXPONENT) {
+			if (b.parts.exp == FLOAT128_MAX_EXPONENT) {
+				/* inf - inf => nan */
+				/* TODO: fix exception */
+				result.binary.hi = FLOAT128_NAN_HI;
+				result.binary.lo = FLOAT128_NAN_LO;
+				return result;
+			}
+			return a;
+		}
+
+		result.parts.sign = a.parts.sign;
+
+		frac1_hi = a.parts.frac_hi;
+		frac1_lo = a.parts.frac_lo;
+		exp1 = a.parts.exp;
+		frac2_hi = b.parts.frac_hi;
+		frac2_lo = b.parts.frac_lo;
+		exp2 = b.parts.exp;
+	}
+
+	if (exp1 == 0) {
+		/* both are denormalized */
+		sub128(frac1_hi, frac1_lo, frac2_hi, frac2_lo, &tmp_hi, &tmp_lo);
+		result.parts.frac_hi = tmp_hi;
+		result.parts.frac_lo = tmp_lo;
+		if (lt128(frac1_hi, frac1_lo, result.parts.frac_hi, result.parts.frac_lo)) {
+			/* TODO: underflow exception */
+			return result;
+		}
+		result.parts.exp = 0;
+		return result;
+	}
+
+	/* add hidden bit */
+	or128(frac1_hi, frac1_lo,
+	    FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+	    &frac1_hi, &frac1_lo);
+
+	if (exp2 == 0) {
+		/* denormalized */
+		--expdiff;
+	} else {
+		/* normalized */
+		or128(frac2_hi, frac2_lo,
+		    FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+		    &frac2_hi, &frac2_lo);
+	}
+
+	/* create some space for rounding */
+	lshift128(frac1_hi, frac1_lo, 6, &frac1_hi, &frac1_lo);
+	lshift128(frac2_hi, frac2_lo, 6, &frac2_hi, &frac2_lo);
+
+	if (expdiff > FLOAT128_FRACTION_SIZE + 1) {
+		goto done;
+	}
+
+	rshift128(frac2_hi, frac2_lo, expdiff, &tmp_hi, &tmp_lo);
+	sub128(frac1_hi, frac1_lo, tmp_hi, tmp_lo, &frac1_hi, &frac1_lo);
+
+done:
+	/* TODO: find first nonzero digit and shift result and detect possibly underflow */
+	lshift128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO, 6,
+	    &tmp_hi, &tmp_lo);
+	and128(frac1_hi, frac1_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+	while ((exp1 > 0) && (!lt128(0x0ll, 0x0ll, tmp_hi, tmp_lo))) {
+		--exp1;
+		lshift128(frac1_hi, frac1_lo, 1, &frac1_hi, &frac1_lo);
+		/* TODO: fix underflow - frac1 == 0 does not necessary means underflow... */
+
+		lshift128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO, 6,
+		    &tmp_hi, &tmp_lo);
+		and128(frac1_hi, frac1_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+	}
+
+	/* rounding - if first bit after fraction is set then round up */
+	add128(frac1_hi, frac1_lo, 0x0ll, 0x20ll, &frac1_hi, &frac1_lo);
+
+	lshift128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO, 7,
+	   &tmp_hi, &tmp_lo);
+	and128(frac1_hi, frac1_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+	if (lt128(0x0ll, 0x0ll, tmp_hi, tmp_lo)) {
+		++exp1;
+		rshift128(frac1_hi, frac1_lo, 1, &frac1_hi, &frac1_lo);
+	}
+
+	/* Clear hidden bit and shift */
+	rshift128(frac1_hi, frac1_lo, 6, &frac1_hi, &frac1_lo);
+	not128(FLOAT128_HIDDEN_BIT_MASK_HI, FLOAT128_HIDDEN_BIT_MASK_LO,
+	    &tmp_hi, &tmp_lo);
+	and128(frac1_hi, frac1_lo, tmp_hi, tmp_lo, &tmp_hi, &tmp_lo);
+	result.parts.frac_hi = tmp_hi;
+	result.parts.frac_lo = tmp_lo;
+
+	result.parts.exp = exp1;
+
+	return result;
+}
+
 /** @}
  */
