Index: uspace/lib/usb/src/dma_buffer.c
===================================================================
--- uspace/lib/usb/src/dma_buffer.c	(revision 3e6ff9a5d49f640854fbd4c5b5c21e928a71bc1d)
+++ uspace/lib/usb/src/dma_buffer.c	(revision d345ce2f2396010ea2fd5ad90e85202da83d5197)
@@ -39,21 +39,21 @@
 #include "usb/dma_buffer.h"
 
-const dma_policy_t dma_policy_default = {
-	.flags = DMA_POLICY_F_4GiB | DMA_POLICY_F_CONTIGUOUS,
-};
-
-/**
- * The routine of allocating a DMA buffer. Inlined to force optimization for the
- * default policy.
- *
- * FIXME: We ignore the non-presence of contiguous flag, for now.
- */
-static inline int dma_buffer_alloc_internal(dma_buffer_t *db,
-    size_t size, const dma_policy_t *policy)
+/**
+ * Allocate a DMA buffer.
+ *
+ * XXX: Currently cannot make much use of missing constraints, as it always
+ * allocates page-aligned contiguous buffer. We rely on it in dma_buffer_phys.
+ *
+ * @param[in] db dma_buffer_t structure to fill
+ * @param[in] size Size of the required memory space
+ * @param[in] policy dma_policy_t flags to guide the allocation
+ * @return Error code.
+ */
+errno_t dma_buffer_alloc_policy(dma_buffer_t *db, size_t size, dma_policy_t policy)
 {
 	assert(db);
 
 	const size_t real_size = ALIGN_UP(size, PAGE_SIZE);
-	const bool need_4gib = !!(policy->flags & DMA_POLICY_F_4GiB);
+	const bool need_4gib = !!(policy & DMA_POLICY_4GiB);
 
 	const uintptr_t flags = need_4gib ? DMAMEM_4GiB : 0;
@@ -67,4 +67,9 @@
 
 	if (ret == EOK) {
+		/* Access the pages to force mapping */
+		volatile char *buf = address;
+		for (size_t i = 0; i < size; i += PAGE_SIZE)
+			buf[i] = 0xff;
+
 		db->virt = address;
 		db->phys = phys;
@@ -73,17 +78,4 @@
 }
 
-/**
- * Allocate a DMA buffer.
- *
- * @param[in] db dma_buffer_t structure to fill
- * @param[in] size Size of the required memory space
- * @param[in] policy dma_policy_t structure to guide
- * @return Error code.
- */
-int dma_buffer_alloc_policy(dma_buffer_t *db, size_t size,
-    const dma_policy_t *policy)
-{
-	return dma_buffer_alloc_internal(db, size, policy);
-}
 
 /**
@@ -94,7 +86,7 @@
  * @return Error code.
  */
-int dma_buffer_alloc(dma_buffer_t *db, size_t size)
-{
-	return dma_buffer_alloc_internal(db, size, &dma_policy_default);
+errno_t dma_buffer_alloc(dma_buffer_t *db, size_t size)
+{
+	return dma_buffer_alloc_policy(db, size, DMA_POLICY_DEFAULT);
 }
 
@@ -128,15 +120,25 @@
  * Check whether a memory area is compatible with a policy.
  *
- * Useful to skip copying, if the buffer is already ready to be given to
- * hardware.
- */
-bool dma_buffer_check_policy(const void *buffer, size_t size, dma_policy_t *policy)
-{
-	/* Buffer must be always page aligned */
-	if (((uintptr_t) buffer) % PAGE_SIZE)
+ * Useful to skip copying when the buffer is already ready to be given to
+ * hardware as is.
+ *
+ * Note that the "as_get_physical_mapping" fails when the page is not mapped
+ * yet, and that the caller is responsible for forcing the mapping.
+ */
+bool dma_buffer_check_policy(const void *buffer, size_t size, const dma_policy_t policy)
+{
+	uintptr_t addr = (uintptr_t) buffer;
+
+	const bool check_4gib       = !!(policy & DMA_POLICY_4GiB);
+	const bool check_crossing   = !!(policy & DMA_POLICY_NOT_CROSSING);
+	const bool check_alignment  = !!(policy & DMA_POLICY_PAGE_ALIGNED);
+	const bool check_contiguous = !!(policy & DMA_POLICY_CONTIGUOUS);
+
+	/* Check the two conditions that are easy */
+	if (check_crossing && (addr + size - 1) / PAGE_SIZE != addr / PAGE_SIZE)
 		goto violated;
 
-	const bool check_4gib = !!(policy->flags & DMA_POLICY_F_4GiB);
-	const bool check_contiguous = !!(policy->flags & DMA_POLICY_F_CONTIGUOUS);
+	if (check_alignment && ((uintptr_t) buffer) % PAGE_SIZE)
+		goto violated;
 
 	/*
@@ -145,5 +147,5 @@
 	 */
 	if (check_contiguous || check_4gib) {
-		const void * virt = buffer;
+		const void *virt = buffer;
 		uintptr_t phys;
 
@@ -156,5 +158,5 @@
 			goto violated;
 
-		while (size <= PAGE_SIZE) {
+		while (size >= PAGE_SIZE) {
 			/* Move to the next page */
 			virt += PAGE_SIZE;
@@ -182,4 +184,29 @@
 
 /**
+ * Lock an arbitrary buffer for DMA operations, creating a DMA buffer.
+ *
+ * FIXME: To handle page-unaligned buffers, we need to calculate the base
+ *        address and lock the whole first page. But as the operation is not yet
+ *        implemented in the kernel, it doesn't matter.
+ */
+errno_t dma_buffer_lock(dma_buffer_t *db, void *virt, size_t size)
+{
+	db->virt = virt;
+	return dmamem_map(db->virt, size, 0, 0, &db->phys);
+}
+
+/**
+ * Unlock a buffer for DMA operations.
+ */
+void dma_buffer_unlock(dma_buffer_t *db, size_t size)
+{
+	if (db->virt) {
+		dmamem_unmap(db->virt, size);
+		db->virt = NULL;
+		db->phys = 0;
+	}
+}
+
+/**
  * @}
  */
