Index: kernel/generic/src/cpu/cpu.c
===================================================================
--- kernel/generic/src/cpu/cpu.c	(revision 1cac8753dba1dcd1e470087cba35d1d4f93975a2)
+++ kernel/generic/src/cpu/cpu.c	(revision 8a8771cf9d051fa7bdc587055c5e7041b8292e25)
@@ -1,3 +1,3 @@
- /*
+/*
  * Copyright (c) 2001-2004 Jakub Jermar
  * All rights reserved.
Index: kernel/generic/src/log/log.c
===================================================================
--- kernel/generic/src/log/log.c	(revision 1cac8753dba1dcd1e470087cba35d1d4f93975a2)
+++ kernel/generic/src/log/log.c	(revision 8a8771cf9d051fa7bdc587055c5e7041b8292e25)
@@ -63,5 +63,5 @@
 
 /** Kernel log initialized */
-static atomic_t log_inited = {false};
+static atomic_t log_inited = { false };
 
 /** Position in the cyclic buffer where the first log entry starts */
@@ -97,5 +97,6 @@
 }
 
-static size_t log_copy_from(uint8_t *data, size_t pos, size_t len) {
+static size_t log_copy_from(uint8_t *data, size_t pos, size_t len)
+{
 	for (size_t i = 0; i < len; i++, pos = (pos + 1) % LOG_LENGTH) {
 		data[i] = log_buffer[pos];
@@ -104,5 +105,6 @@
 }
 
-static size_t log_copy_to(const uint8_t *data, size_t pos, size_t len) {
+static size_t log_copy_to(const uint8_t *data, size_t pos, size_t len)
+{
 	for (size_t i = 0; i < len; i++, pos = (pos + 1) % LOG_LENGTH) {
 		log_buffer[pos] = data[i];
@@ -170,5 +172,6 @@
  * This releases the log and output buffer locks.
  */
-void log_end(void) {
+void log_end(void)
+{
 	/* Set the length in the header to correct value */
 	log_copy_to((uint8_t *) &log_current_len, log_current_start, sizeof(size_t));
@@ -303,81 +306,81 @@
 
 	switch (operation) {
-		case KLOG_WRITE:
-			data = (char *) malloc(size + 1, 0);
-			if (!data)
-				return (sys_errno_t) ENOMEM;
-
-			rc = copy_from_uspace(data, buf, size);
-			if (rc) {
-				free(data);
-				return (sys_errno_t) rc;
+	case KLOG_WRITE:
+		data = (char *) malloc(size + 1, 0);
+		if (!data)
+			return (sys_errno_t) ENOMEM;
+
+		rc = copy_from_uspace(data, buf, size);
+		if (rc) {
+			free(data);
+			return (sys_errno_t) rc;
+		}
+		data[size] = 0;
+
+		if (level >= LVL_LIMIT)
+			level = LVL_NOTE;
+
+		log(LF_USPACE, level, "%s", data);
+
+		free(data);
+		return EOK;
+	case KLOG_READ:
+		data = (char *) malloc(size, 0);
+		if (!data)
+			return (sys_errno_t) ENOMEM;
+
+		size_t entry_len = 0;
+		size_t copied = 0;
+
+		rc = EOK;
+
+		spinlock_lock(&log_lock);
+
+		while (next_for_uspace < log_used) {
+			size_t pos = (log_start + next_for_uspace) % LOG_LENGTH;
+			log_copy_from((uint8_t *) &entry_len, pos, sizeof(size_t));
+
+			if (entry_len > PAGE_SIZE) {
+				/*
+				 * Since we limit data transfer
+				 * to uspace to a maximum of PAGE_SIZE
+				 * bytes, skip any entries larger
+				 * than this limit to prevent
+				 * userspace being stuck trying to
+				 * read them.
+				 */
+				next_for_uspace += entry_len;
+				continue;
 			}
-			data[size] = 0;
-
-			if (level >= LVL_LIMIT)
-				level = LVL_NOTE;
-
-			log(LF_USPACE, level, "%s", data);
-
+
+			if (size < copied + entry_len) {
+				if (copied == 0)
+					rc = EOVERFLOW;
+				break;
+			}
+
+			log_copy_from((uint8_t *) (data + copied), pos, entry_len);
+			copied += entry_len;
+			next_for_uspace += entry_len;
+		}
+
+		spinlock_unlock(&log_lock);
+
+		if (rc != EOK) {
 			free(data);
-			return EOK;
-		case KLOG_READ:
-			data = (char *) malloc(size, 0);
-			if (!data)
-				return (sys_errno_t) ENOMEM;
-
-			size_t entry_len = 0;
-			size_t copied = 0;
-
-			rc = EOK;
-
-			spinlock_lock(&log_lock);
-
-			while (next_for_uspace < log_used) {
-				size_t pos = (log_start + next_for_uspace) % LOG_LENGTH;
-				log_copy_from((uint8_t *) &entry_len, pos, sizeof(size_t));
-
-				if (entry_len > PAGE_SIZE) {
-					/*
-					 * Since we limit data transfer
-					 * to uspace to a maximum of PAGE_SIZE
-					 * bytes, skip any entries larger
-					 * than this limit to prevent
-					 * userspace being stuck trying to
-					 * read them.
-					 */
-					next_for_uspace += entry_len;
-					continue;
-				}
-
-				if (size < copied + entry_len) {
-					if (copied == 0)
-						rc = EOVERFLOW;
-					break;
-				}
-
-				log_copy_from((uint8_t *) (data + copied), pos, entry_len);
-				copied += entry_len;
-				next_for_uspace += entry_len;
-			}
-
-			spinlock_unlock(&log_lock);
-
-			if (rc != EOK) {
-				free(data);
-				return (sys_errno_t) rc;
-			}
-
-			rc = copy_to_uspace(buf, data, size);
-
-			free(data);
-
-			if (rc != EOK)
-				return (sys_errno_t) rc;
-
-			return copy_to_uspace(uspace_nread, &copied, sizeof(copied));
-			return EOK;
-		default:
-			return (sys_errno_t) ENOTSUP;
+			return (sys_errno_t) rc;
+		}
+
+		rc = copy_to_uspace(buf, data, size);
+
+		free(data);
+
+		if (rc != EOK)
+			return (sys_errno_t) rc;
+
+		return copy_to_uspace(uspace_nread, &copied, sizeof(copied));
+		return EOK;
+	default:
+		return (sys_errno_t) ENOTSUP;
 	}
 }
Index: kernel/generic/src/mm/as.c
===================================================================
--- kernel/generic/src/mm/as.c	(revision 1cac8753dba1dcd1e470087cba35d1d4f93975a2)
+++ kernel/generic/src/mm/as.c	(revision 8a8771cf9d051fa7bdc587055c5e7041b8292e25)
@@ -641,8 +641,8 @@
 
 	/*
- 	 * Create the sharing info structure.
- 	 * We do this in advance for every new area, even if it is not going
- 	 * to be shared.
- 	 */
+	 * Create the sharing info structure.
+	 * We do this in advance for every new area, even if it is not going
+	 * to be shared.
+	 */
 	if (!(attrs & AS_AREA_ATTR_PARTIAL)) {
 		si = (share_info_t *) malloc(sizeof(share_info_t), 0);
Index: kernel/generic/src/proc/task.c
===================================================================
--- kernel/generic/src/proc/task.c	(revision 1cac8753dba1dcd1e470087cba35d1d4f93975a2)
+++ kernel/generic/src/proc/task.c	(revision 8a8771cf9d051fa7bdc587055c5e7041b8292e25)
@@ -572,5 +572,5 @@
 	 * The notification is always available, but unless udebug is enabled,
 	 * that's all you get.
-	*/
+	 */
 	if (notify) {
 		/* Notify the subscriber that a fault occurred. */
