source: mainline/uspace/lib/block/libblock.c@ 764d71e

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 764d71e was c4aa9cf, checked in by Martin Sucha <sucha14@…>, 14 years ago

Fix coding style

  • Property mode set to 100644
File size: 24.0 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2008 Martin Decky
4 * Copyright (c) 2011 Martin Sucha
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libblock
32 * @{
33 */
34/**
35 * @file
36 * @brief
37 */
38
39#include "libblock.h"
40#include "../../srv/vfs/vfs.h"
41#include <ipc/devmap.h>
42#include <ipc/bd.h>
43#include <ipc/services.h>
44#include <errno.h>
45#include <sys/mman.h>
46#include <async.h>
47#include <as.h>
48#include <assert.h>
49#include <fibril_synch.h>
50#include <adt/list.h>
51#include <adt/hash_table.h>
52#include <macros.h>
53#include <mem.h>
54#include <malloc.h>
55#include <stdio.h>
56#include <sys/typefmt.h>
57#include <stacktrace.h>
58
59/** Lock protecting the device connection list */
60static FIBRIL_MUTEX_INITIALIZE(dcl_lock);
61/** Device connection list head. */
62static LIST_INITIALIZE(dcl_head);
63
64#define CACHE_BUCKETS_LOG2 10
65#define CACHE_BUCKETS (1 << CACHE_BUCKETS_LOG2)
66
67typedef struct {
68 fibril_mutex_t lock;
69 size_t lblock_size; /**< Logical block size. */
70 unsigned blocks_cluster; /**< Physical blocks per block_t */
71 unsigned block_count; /**< Total number of blocks. */
72 unsigned blocks_cached; /**< Number of cached blocks. */
73 hash_table_t block_hash;
74 link_t free_head;
75 enum cache_mode mode;
76} cache_t;
77
78typedef struct {
79 link_t link;
80 devmap_handle_t devmap_handle;
81 int dev_phone;
82 fibril_mutex_t comm_area_lock;
83 void *comm_area;
84 size_t comm_size;
85 void *bb_buf;
86 aoff64_t bb_addr;
87 size_t pblock_size; /**< Physical block size. */
88 cache_t *cache;
89} devcon_t;
90
91static int read_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt);
92static int write_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt);
93static int get_block_size(int dev_phone, size_t *bsize);
94static int get_num_blocks(int dev_phone, aoff64_t *nblocks);
95static aoff64_t ba_ltop(devcon_t *devcon, aoff64_t lba);
96
97static devcon_t *devcon_search(devmap_handle_t devmap_handle)
98{
99 link_t *cur;
100
101 fibril_mutex_lock(&dcl_lock);
102 for (cur = dcl_head.next; cur != &dcl_head; cur = cur->next) {
103 devcon_t *devcon = list_get_instance(cur, devcon_t, link);
104 if (devcon->devmap_handle == devmap_handle) {
105 fibril_mutex_unlock(&dcl_lock);
106 return devcon;
107 }
108 }
109 fibril_mutex_unlock(&dcl_lock);
110 return NULL;
111}
112
113static int devcon_add(devmap_handle_t devmap_handle, int dev_phone, size_t bsize,
114 void *comm_area, size_t comm_size)
115{
116 link_t *cur;
117 devcon_t *devcon;
118
119 if (comm_size < bsize)
120 return EINVAL;
121
122 devcon = malloc(sizeof(devcon_t));
123 if (!devcon)
124 return ENOMEM;
125
126 link_initialize(&devcon->link);
127 devcon->devmap_handle = devmap_handle;
128 devcon->dev_phone = dev_phone;
129 fibril_mutex_initialize(&devcon->comm_area_lock);
130 devcon->comm_area = comm_area;
131 devcon->comm_size = comm_size;
132 devcon->bb_buf = NULL;
133 devcon->bb_addr = 0;
134 devcon->pblock_size = bsize;
135 devcon->cache = NULL;
136
137 fibril_mutex_lock(&dcl_lock);
138 for (cur = dcl_head.next; cur != &dcl_head; cur = cur->next) {
139 devcon_t *d = list_get_instance(cur, devcon_t, link);
140 if (d->devmap_handle == devmap_handle) {
141 fibril_mutex_unlock(&dcl_lock);
142 free(devcon);
143 return EEXIST;
144 }
145 }
146 list_append(&devcon->link, &dcl_head);
147 fibril_mutex_unlock(&dcl_lock);
148 return EOK;
149}
150
151static void devcon_remove(devcon_t *devcon)
152{
153 fibril_mutex_lock(&dcl_lock);
154 list_remove(&devcon->link);
155 fibril_mutex_unlock(&dcl_lock);
156}
157
158int block_init(devmap_handle_t devmap_handle, size_t comm_size)
159{
160 int rc;
161 int dev_phone;
162 void *comm_area;
163 size_t bsize;
164
165 comm_area = mmap(NULL, comm_size, PROTO_READ | PROTO_WRITE,
166 MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
167 if (!comm_area) {
168 return ENOMEM;
169 }
170
171 dev_phone = devmap_device_connect(devmap_handle, IPC_FLAG_BLOCKING);
172 if (dev_phone < 0) {
173 munmap(comm_area, comm_size);
174 return dev_phone;
175 }
176
177 rc = async_share_out_start(dev_phone, comm_area,
178 AS_AREA_READ | AS_AREA_WRITE);
179 if (rc != EOK) {
180 munmap(comm_area, comm_size);
181 async_hangup(dev_phone);
182 return rc;
183 }
184
185 if (get_block_size(dev_phone, &bsize) != EOK) {
186 munmap(comm_area, comm_size);
187 async_hangup(dev_phone);
188 return rc;
189 }
190
191 rc = devcon_add(devmap_handle, dev_phone, bsize, comm_area, comm_size);
192 if (rc != EOK) {
193 munmap(comm_area, comm_size);
194 async_hangup(dev_phone);
195 return rc;
196 }
197
198 return EOK;
199}
200
201void block_fini(devmap_handle_t devmap_handle)
202{
203 devcon_t *devcon = devcon_search(devmap_handle);
204 assert(devcon);
205
206 if (devcon->cache)
207 (void) block_cache_fini(devmap_handle);
208
209 devcon_remove(devcon);
210
211 if (devcon->bb_buf)
212 free(devcon->bb_buf);
213
214 munmap(devcon->comm_area, devcon->comm_size);
215 async_hangup(devcon->dev_phone);
216
217 free(devcon);
218}
219
220int block_bb_read(devmap_handle_t devmap_handle, aoff64_t ba)
221{
222 void *bb_buf;
223 int rc;
224
225 devcon_t *devcon = devcon_search(devmap_handle);
226 if (!devcon)
227 return ENOENT;
228 if (devcon->bb_buf)
229 return EEXIST;
230 bb_buf = malloc(devcon->pblock_size);
231 if (!bb_buf)
232 return ENOMEM;
233
234 fibril_mutex_lock(&devcon->comm_area_lock);
235 rc = read_blocks(devcon, 0, 1);
236 if (rc != EOK) {
237 fibril_mutex_unlock(&devcon->comm_area_lock);
238 free(bb_buf);
239 return rc;
240 }
241 memcpy(bb_buf, devcon->comm_area, devcon->pblock_size);
242 fibril_mutex_unlock(&devcon->comm_area_lock);
243
244 devcon->bb_buf = bb_buf;
245 devcon->bb_addr = ba;
246
247 return EOK;
248}
249
250void *block_bb_get(devmap_handle_t devmap_handle)
251{
252 devcon_t *devcon = devcon_search(devmap_handle);
253 assert(devcon);
254 return devcon->bb_buf;
255}
256
257static hash_index_t cache_hash(unsigned long *key)
258{
259 return *key & (CACHE_BUCKETS - 1);
260}
261
262static int cache_compare(unsigned long *key, hash_count_t keys, link_t *item)
263{
264 block_t *b = hash_table_get_instance(item, block_t, hash_link);
265 return b->lba == *key;
266}
267
268static void cache_remove_callback(link_t *item)
269{
270}
271
272static hash_table_operations_t cache_ops = {
273 .hash = cache_hash,
274 .compare = cache_compare,
275 .remove_callback = cache_remove_callback
276};
277
278int block_cache_init(devmap_handle_t devmap_handle, size_t size, unsigned blocks,
279 enum cache_mode mode)
280{
281 devcon_t *devcon = devcon_search(devmap_handle);
282 cache_t *cache;
283 if (!devcon)
284 return ENOENT;
285 if (devcon->cache)
286 return EEXIST;
287 cache = malloc(sizeof(cache_t));
288 if (!cache)
289 return ENOMEM;
290
291 fibril_mutex_initialize(&cache->lock);
292 list_initialize(&cache->free_head);
293 cache->lblock_size = size;
294 cache->block_count = blocks;
295 cache->blocks_cached = 0;
296 cache->mode = mode;
297
298 /* Allow 1:1 or small-to-large block size translation */
299 if (cache->lblock_size % devcon->pblock_size != 0) {
300 free(cache);
301 return ENOTSUP;
302 }
303
304 cache->blocks_cluster = cache->lblock_size / devcon->pblock_size;
305
306 if (!hash_table_create(&cache->block_hash, CACHE_BUCKETS, 1,
307 &cache_ops)) {
308 free(cache);
309 return ENOMEM;
310 }
311
312 devcon->cache = cache;
313 return EOK;
314}
315
316int block_cache_fini(devmap_handle_t devmap_handle)
317{
318 devcon_t *devcon = devcon_search(devmap_handle);
319 cache_t *cache;
320 int rc;
321
322 if (!devcon)
323 return ENOENT;
324 if (!devcon->cache)
325 return EOK;
326 cache = devcon->cache;
327
328 /*
329 * We are expecting to find all blocks for this device handle on the
330 * free list, i.e. the block reference count should be zero. Do not
331 * bother with the cache and block locks because we are single-threaded.
332 */
333 while (!list_empty(&cache->free_head)) {
334 block_t *b = list_get_instance(cache->free_head.next,
335 block_t, free_link);
336
337 list_remove(&b->free_link);
338 if (b->dirty) {
339 memcpy(devcon->comm_area, b->data, b->size);
340 rc = write_blocks(devcon, b->pba, cache->blocks_cluster);
341 if (rc != EOK)
342 return rc;
343 }
344
345 unsigned long key = b->lba;
346 hash_table_remove(&cache->block_hash, &key, 1);
347
348 free(b->data);
349 free(b);
350 }
351
352 hash_table_destroy(&cache->block_hash);
353 devcon->cache = NULL;
354 free(cache);
355
356 return EOK;
357}
358
359#define CACHE_LO_WATERMARK 10
360#define CACHE_HI_WATERMARK 20
361static bool cache_can_grow(cache_t *cache)
362{
363 if (cache->blocks_cached < CACHE_LO_WATERMARK)
364 return true;
365 if (!list_empty(&cache->free_head))
366 return false;
367 return true;
368}
369
370static void block_initialize(block_t *b)
371{
372 fibril_mutex_initialize(&b->lock);
373 b->refcnt = 1;
374 b->dirty = false;
375 b->toxic = false;
376 fibril_rwlock_initialize(&b->contents_lock);
377 link_initialize(&b->free_link);
378 link_initialize(&b->hash_link);
379}
380
381/** Instantiate a block in memory and get a reference to it.
382 *
383 * @param block Pointer to where the function will store the
384 * block pointer on success.
385 * @param devmap_handle Device handle of the block device.
386 * @param ba Block address (logical).
387 * @param flags If BLOCK_FLAGS_NOREAD is specified, block_get()
388 * will not read the contents of the block from the
389 * device.
390 *
391 * @return EOK on success or a negative error code.
392 */
393int block_get(block_t **block, devmap_handle_t devmap_handle, aoff64_t ba, int flags)
394{
395 devcon_t *devcon;
396 cache_t *cache;
397 block_t *b;
398 link_t *l;
399 unsigned long key = ba;
400 int rc;
401
402 devcon = devcon_search(devmap_handle);
403
404 assert(devcon);
405 assert(devcon->cache);
406
407 cache = devcon->cache;
408
409retry:
410 rc = EOK;
411 b = NULL;
412
413 fibril_mutex_lock(&cache->lock);
414 l = hash_table_find(&cache->block_hash, &key);
415 if (l) {
416found:
417 /*
418 * We found the block in the cache.
419 */
420 b = hash_table_get_instance(l, block_t, hash_link);
421 fibril_mutex_lock(&b->lock);
422 if (b->refcnt++ == 0)
423 list_remove(&b->free_link);
424 if (b->toxic)
425 rc = EIO;
426 fibril_mutex_unlock(&b->lock);
427 fibril_mutex_unlock(&cache->lock);
428 } else {
429 /*
430 * The block was not found in the cache.
431 */
432 if (cache_can_grow(cache)) {
433 /*
434 * We can grow the cache by allocating new blocks.
435 * Should the allocation fail, we fail over and try to
436 * recycle a block from the cache.
437 */
438 b = malloc(sizeof(block_t));
439 if (!b)
440 goto recycle;
441 b->data = malloc(cache->lblock_size);
442 if (!b->data) {
443 free(b);
444 b = NULL;
445 goto recycle;
446 }
447 cache->blocks_cached++;
448 } else {
449 /*
450 * Try to recycle a block from the free list.
451 */
452 unsigned long temp_key;
453recycle:
454 if (list_empty(&cache->free_head)) {
455 fibril_mutex_unlock(&cache->lock);
456 rc = ENOMEM;
457 goto out;
458 }
459 l = cache->free_head.next;
460 b = list_get_instance(l, block_t, free_link);
461
462 fibril_mutex_lock(&b->lock);
463 if (b->dirty) {
464 /*
465 * The block needs to be written back to the
466 * device before it changes identity. Do this
467 * while not holding the cache lock so that
468 * concurrency is not impeded. Also move the
469 * block to the end of the free list so that we
470 * do not slow down other instances of
471 * block_get() draining the free list.
472 */
473 list_remove(&b->free_link);
474 list_append(&b->free_link, &cache->free_head);
475 fibril_mutex_unlock(&cache->lock);
476 fibril_mutex_lock(&devcon->comm_area_lock);
477 memcpy(devcon->comm_area, b->data, b->size);
478 rc = write_blocks(devcon, b->pba,
479 cache->blocks_cluster);
480 fibril_mutex_unlock(&devcon->comm_area_lock);
481 if (rc != EOK) {
482 /*
483 * We did not manage to write the block
484 * to the device. Keep it around for
485 * another try. Hopefully, we will grab
486 * another block next time.
487 */
488 fibril_mutex_unlock(&b->lock);
489 goto retry;
490 }
491 b->dirty = false;
492 if (!fibril_mutex_trylock(&cache->lock)) {
493 /*
494 * Somebody is probably racing with us.
495 * Unlock the block and retry.
496 */
497 fibril_mutex_unlock(&b->lock);
498 goto retry;
499 }
500 l = hash_table_find(&cache->block_hash, &key);
501 if (l) {
502 /*
503 * Someone else must have already
504 * instantiated the block while we were
505 * not holding the cache lock.
506 * Leave the recycled block on the
507 * freelist and continue as if we
508 * found the block of interest during
509 * the first try.
510 */
511 fibril_mutex_unlock(&b->lock);
512 goto found;
513 }
514
515 }
516 fibril_mutex_unlock(&b->lock);
517
518 /*
519 * Unlink the block from the free list and the hash
520 * table.
521 */
522 list_remove(&b->free_link);
523 temp_key = b->lba;
524 hash_table_remove(&cache->block_hash, &temp_key, 1);
525 }
526
527 block_initialize(b);
528 b->devmap_handle = devmap_handle;
529 b->size = cache->lblock_size;
530 b->lba = ba;
531 b->pba = ba_ltop(devcon, b->lba);
532 hash_table_insert(&cache->block_hash, &key, &b->hash_link);
533
534 /*
535 * Lock the block before releasing the cache lock. Thus we don't
536 * kill concurrent operations on the cache while doing I/O on
537 * the block.
538 */
539 fibril_mutex_lock(&b->lock);
540 fibril_mutex_unlock(&cache->lock);
541
542 if (!(flags & BLOCK_FLAGS_NOREAD)) {
543 /*
544 * The block contains old or no data. We need to read
545 * the new contents from the device.
546 */
547 fibril_mutex_lock(&devcon->comm_area_lock);
548 rc = read_blocks(devcon, b->pba, cache->blocks_cluster);
549 memcpy(b->data, devcon->comm_area, cache->lblock_size);
550 fibril_mutex_unlock(&devcon->comm_area_lock);
551 if (rc != EOK)
552 b->toxic = true;
553 } else
554 rc = EOK;
555
556 fibril_mutex_unlock(&b->lock);
557 }
558out:
559 if ((rc != EOK) && b) {
560 assert(b->toxic);
561 (void) block_put(b);
562 b = NULL;
563 }
564 *block = b;
565 return rc;
566}
567
568/** Release a reference to a block.
569 *
570 * If the last reference is dropped, the block is put on the free list.
571 *
572 * @param block Block of which a reference is to be released.
573 *
574 * @return EOK on success or a negative error code.
575 */
576int block_put(block_t *block)
577{
578 devcon_t *devcon = devcon_search(block->devmap_handle);
579 cache_t *cache;
580 unsigned blocks_cached;
581 enum cache_mode mode;
582 int rc = EOK;
583
584 assert(devcon);
585 assert(devcon->cache);
586 assert(block->refcnt >= 1);
587
588 cache = devcon->cache;
589
590retry:
591 fibril_mutex_lock(&cache->lock);
592 blocks_cached = cache->blocks_cached;
593 mode = cache->mode;
594 fibril_mutex_unlock(&cache->lock);
595
596 /*
597 * Determine whether to sync the block. Syncing the block is best done
598 * when not holding the cache lock as it does not impede concurrency.
599 * Since the situation may have changed when we unlocked the cache, the
600 * blocks_cached and mode variables are mere hints. We will recheck the
601 * conditions later when the cache lock is held again.
602 */
603 fibril_mutex_lock(&block->lock);
604 if (block->toxic)
605 block->dirty = false; /* will not write back toxic block */
606 if (block->dirty && (block->refcnt == 1) &&
607 (blocks_cached > CACHE_HI_WATERMARK || mode != CACHE_MODE_WB)) {
608 fibril_mutex_lock(&devcon->comm_area_lock);
609 memcpy(devcon->comm_area, block->data, block->size);
610 rc = write_blocks(devcon, block->pba, cache->blocks_cluster);
611 fibril_mutex_unlock(&devcon->comm_area_lock);
612 block->dirty = false;
613 }
614 fibril_mutex_unlock(&block->lock);
615
616 fibril_mutex_lock(&cache->lock);
617 fibril_mutex_lock(&block->lock);
618 if (!--block->refcnt) {
619 /*
620 * Last reference to the block was dropped. Either free the
621 * block or put it on the free list. In case of an I/O error,
622 * free the block.
623 */
624 if ((cache->blocks_cached > CACHE_HI_WATERMARK) ||
625 (rc != EOK)) {
626 /*
627 * Currently there are too many cached blocks or there
628 * was an I/O error when writing the block back to the
629 * device.
630 */
631 if (block->dirty) {
632 /*
633 * We cannot sync the block while holding the
634 * cache lock. Release everything and retry.
635 */
636 block->refcnt++;
637 fibril_mutex_unlock(&block->lock);
638 fibril_mutex_unlock(&cache->lock);
639 goto retry;
640 }
641 /*
642 * Take the block out of the cache and free it.
643 */
644 unsigned long key = block->lba;
645 hash_table_remove(&cache->block_hash, &key, 1);
646 fibril_mutex_unlock(&block->lock);
647 free(block->data);
648 free(block);
649 cache->blocks_cached--;
650 fibril_mutex_unlock(&cache->lock);
651 return rc;
652 }
653 /*
654 * Put the block on the free list.
655 */
656 if (cache->mode != CACHE_MODE_WB && block->dirty) {
657 /*
658 * We cannot sync the block while holding the cache
659 * lock. Release everything and retry.
660 */
661 block->refcnt++;
662 fibril_mutex_unlock(&block->lock);
663 fibril_mutex_unlock(&cache->lock);
664 goto retry;
665 }
666 list_append(&block->free_link, &cache->free_head);
667 }
668 fibril_mutex_unlock(&block->lock);
669 fibril_mutex_unlock(&cache->lock);
670
671 return rc;
672}
673
674/** Read sequential data from a block device.
675 *
676 * @param devmap_handle Device handle of the block device.
677 * @param bufpos Pointer to the first unread valid offset within the
678 * communication buffer.
679 * @param buflen Pointer to the number of unread bytes that are ready in
680 * the communication buffer.
681 * @param pos Device position to be read.
682 * @param dst Destination buffer.
683 * @param size Size of the destination buffer.
684 * @param block_size Block size to be used for the transfer.
685 *
686 * @return EOK on success or a negative return code on failure.
687 */
688int block_seqread(devmap_handle_t devmap_handle, size_t *bufpos, size_t *buflen,
689 aoff64_t *pos, void *dst, size_t size)
690{
691 size_t offset = 0;
692 size_t left = size;
693 size_t block_size;
694 devcon_t *devcon;
695
696 devcon = devcon_search(devmap_handle);
697 assert(devcon);
698 block_size = devcon->pblock_size;
699
700 fibril_mutex_lock(&devcon->comm_area_lock);
701 while (left > 0) {
702 size_t rd;
703
704 if (*bufpos + left < *buflen)
705 rd = left;
706 else
707 rd = *buflen - *bufpos;
708
709 if (rd > 0) {
710 /*
711 * Copy the contents of the communication buffer to the
712 * destination buffer.
713 */
714 memcpy(dst + offset, devcon->comm_area + *bufpos, rd);
715 offset += rd;
716 *bufpos += rd;
717 *pos += rd;
718 left -= rd;
719 }
720
721 if (*bufpos == *buflen) {
722 /* Refill the communication buffer with a new block. */
723 int rc;
724
725 rc = read_blocks(devcon, *pos / block_size, 1);
726 if (rc != EOK) {
727 fibril_mutex_unlock(&devcon->comm_area_lock);
728 return rc;
729 }
730
731 *bufpos = 0;
732 *buflen = block_size;
733 }
734 }
735 fibril_mutex_unlock(&devcon->comm_area_lock);
736
737 return EOK;
738}
739
740/** Read blocks directly from device (bypass cache).
741 *
742 * @param devmap_handle Device handle of the block device.
743 * @param ba Address of first block (physical).
744 * @param cnt Number of blocks.
745 * @param src Buffer for storing the data.
746 *
747 * @return EOK on success or negative error code on failure.
748 */
749int block_read_direct(devmap_handle_t devmap_handle, aoff64_t ba, size_t cnt, void *buf)
750{
751 devcon_t *devcon;
752 int rc;
753
754 devcon = devcon_search(devmap_handle);
755 assert(devcon);
756
757 fibril_mutex_lock(&devcon->comm_area_lock);
758
759 rc = read_blocks(devcon, ba, cnt);
760 if (rc == EOK)
761 memcpy(buf, devcon->comm_area, devcon->pblock_size * cnt);
762
763 fibril_mutex_unlock(&devcon->comm_area_lock);
764
765 return rc;
766}
767
768/** Write blocks directly to device (bypass cache).
769 *
770 * @param devmap_handle Device handle of the block device.
771 * @param ba Address of first block (physical).
772 * @param cnt Number of blocks.
773 * @param src The data to be written.
774 *
775 * @return EOK on success or negative error code on failure.
776 */
777int block_write_direct(devmap_handle_t devmap_handle, aoff64_t ba, size_t cnt,
778 const void *data)
779{
780 devcon_t *devcon;
781 int rc;
782
783 devcon = devcon_search(devmap_handle);
784 assert(devcon);
785
786 fibril_mutex_lock(&devcon->comm_area_lock);
787
788 memcpy(devcon->comm_area, data, devcon->pblock_size * cnt);
789 rc = write_blocks(devcon, ba, cnt);
790
791 fibril_mutex_unlock(&devcon->comm_area_lock);
792
793 return rc;
794}
795
796/** Get device block size.
797 *
798 * @param devmap_handle Device handle of the block device.
799 * @param bsize Output block size.
800 *
801 * @return EOK on success or negative error code on failure.
802 */
803int block_get_bsize(devmap_handle_t devmap_handle, size_t *bsize)
804{
805 devcon_t *devcon;
806
807 devcon = devcon_search(devmap_handle);
808 assert(devcon);
809
810 return get_block_size(devcon->dev_phone, bsize);
811}
812
813/** Get number of blocks on device.
814 *
815 * @param devmap_handle Device handle of the block device.
816 * @param nblocks Output number of blocks.
817 *
818 * @return EOK on success or negative error code on failure.
819 */
820int block_get_nblocks(devmap_handle_t devmap_handle, aoff64_t *nblocks)
821{
822 devcon_t *devcon;
823
824 devcon = devcon_search(devmap_handle);
825 assert(devcon);
826
827 return get_num_blocks(devcon->dev_phone, nblocks);
828}
829
830/** Read bytes directly from the device (bypass cache)
831 *
832 * @param devmap_handle Device handle of the block device.
833 * @param abs_offset Absolute offset in bytes where to start reading
834 * @param bytes Number of bytes to read
835 * @param data Buffer that receives the data
836 *
837 * @return EOK on success or negative error code on failure.
838 */
839int block_read_bytes_direct(devmap_handle_t devmap_handle, aoff64_t abs_offset,
840 size_t bytes, void *data)
841{
842 int rc;
843 size_t phys_block_size;
844 size_t buf_size;
845 void *buffer;
846 aoff64_t first_block;
847 aoff64_t last_block;
848 size_t blocks;
849 size_t offset;
850
851 rc = block_get_bsize(devmap_handle, &phys_block_size);
852 if (rc != EOK) {
853 return rc;
854 }
855
856 /* calculate data position and required space */
857 first_block = abs_offset / phys_block_size;
858 offset = abs_offset % phys_block_size;
859 last_block = (abs_offset + bytes - 1) / phys_block_size;
860 blocks = last_block - first_block + 1;
861 buf_size = blocks * phys_block_size;
862
863 /* read the data into memory */
864 buffer = malloc(buf_size);
865 if (buffer == NULL) {
866 return ENOMEM;
867 }
868
869 rc = block_read_direct(devmap_handle, first_block, blocks, buffer);
870 if (rc != EOK) {
871 free(buffer);
872 return rc;
873 }
874
875 /* copy the data from the buffer */
876 memcpy(data, buffer + offset, bytes);
877 free(buffer);
878
879 return EOK;
880}
881
882/** Read blocks from block device.
883 *
884 * @param devcon Device connection.
885 * @param ba Address of first block.
886 * @param cnt Number of blocks.
887 * @param src Buffer for storing the data.
888 *
889 * @return EOK on success or negative error code on failure.
890 */
891static int read_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt)
892{
893 int rc;
894
895 assert(devcon);
896 rc = async_req_3_0(devcon->dev_phone, BD_READ_BLOCKS, LOWER32(ba),
897 UPPER32(ba), cnt);
898 if (rc != EOK) {
899 printf("Error %d reading %zu blocks starting at block %" PRIuOFF64
900 " from device handle %" PRIun "\n", rc, cnt, ba,
901 devcon->devmap_handle);
902#ifndef NDEBUG
903 stacktrace_print();
904#endif
905 }
906 return rc;
907}
908
909/** Write block to block device.
910 *
911 * @param devcon Device connection.
912 * @param ba Address of first block.
913 * @param cnt Number of blocks.
914 * @param src Buffer containing the data to write.
915 *
916 * @return EOK on success or negative error code on failure.
917 */
918static int write_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt)
919{
920 int rc;
921
922 assert(devcon);
923 rc = async_req_3_0(devcon->dev_phone, BD_WRITE_BLOCKS, LOWER32(ba),
924 UPPER32(ba), cnt);
925 if (rc != EOK) {
926 printf("Error %d writing %zu blocks starting at block %" PRIuOFF64
927 " to device handle %" PRIun "\n", rc, cnt, ba, devcon->devmap_handle);
928#ifndef NDEBUG
929 stacktrace_print();
930#endif
931 }
932 return rc;
933}
934
935/** Get block size used by the device. */
936static int get_block_size(int dev_phone, size_t *bsize)
937{
938 sysarg_t bs;
939 int rc;
940
941 rc = async_req_0_1(dev_phone, BD_GET_BLOCK_SIZE, &bs);
942 if (rc == EOK)
943 *bsize = (size_t) bs;
944
945 return rc;
946}
947
948/** Get total number of blocks on block device. */
949static int get_num_blocks(int dev_phone, aoff64_t *nblocks)
950{
951 sysarg_t nb_l, nb_h;
952 int rc;
953
954 rc = async_req_0_2(dev_phone, BD_GET_NUM_BLOCKS, &nb_l, &nb_h);
955 if (rc == EOK) {
956 *nblocks = (aoff64_t) MERGE_LOUP32(nb_l, nb_h);
957 }
958
959 return rc;
960}
961
962/** Convert logical block address to physical block address. */
963static aoff64_t ba_ltop(devcon_t *devcon, aoff64_t lba)
964{
965 assert(devcon->cache != NULL);
966 return lba * devcon->cache->blocks_cluster;
967}
968
969/** @}
970 */
Note: See TracBrowser for help on using the repository browser.