source: mainline/uspace/lib/block/libblock.c@ 4723444

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 4723444 was 956d4df8, checked in by Jakub Jermar <jakub@…>, 15 years ago

Do not forget to unlock the block structure before freeing it.

Even though this happens when there is no reference to the block,
unlocking here may be essential from the point of view of the
deadlock detection code which could otherwise become confused.

  • Property mode set to 100644
File size: 22.3 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2008 Martin Decky
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libblock
31 * @{
32 */
33/**
34 * @file
35 * @brief
36 */
37
38#include "libblock.h"
39#include "../../srv/vfs/vfs.h"
40#include <ipc/devmap.h>
41#include <ipc/bd.h>
42#include <ipc/services.h>
43#include <errno.h>
44#include <sys/mman.h>
45#include <async.h>
46#include <as.h>
47#include <assert.h>
48#include <fibril_synch.h>
49#include <adt/list.h>
50#include <adt/hash_table.h>
51#include <macros.h>
52#include <mem.h>
53#include <sys/typefmt.h>
54#include <stacktrace.h>
55
56/** Lock protecting the device connection list */
57static FIBRIL_MUTEX_INITIALIZE(dcl_lock);
58/** Device connection list head. */
59static LIST_INITIALIZE(dcl_head);
60
61#define CACHE_BUCKETS_LOG2 10
62#define CACHE_BUCKETS (1 << CACHE_BUCKETS_LOG2)
63
64typedef struct {
65 fibril_mutex_t lock;
66 size_t lblock_size; /**< Logical block size. */
67 unsigned blocks_cluster; /**< Physical blocks per block_t */
68 unsigned block_count; /**< Total number of blocks. */
69 unsigned blocks_cached; /**< Number of cached blocks. */
70 hash_table_t block_hash;
71 link_t free_head;
72 enum cache_mode mode;
73} cache_t;
74
75typedef struct {
76 link_t link;
77 devmap_handle_t devmap_handle;
78 int dev_phone;
79 fibril_mutex_t comm_area_lock;
80 void *comm_area;
81 size_t comm_size;
82 void *bb_buf;
83 aoff64_t bb_addr;
84 size_t pblock_size; /**< Physical block size. */
85 cache_t *cache;
86} devcon_t;
87
88static int read_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt);
89static int write_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt);
90static int get_block_size(int dev_phone, size_t *bsize);
91static int get_num_blocks(int dev_phone, aoff64_t *nblocks);
92static aoff64_t ba_ltop(devcon_t *devcon, aoff64_t lba);
93
94static devcon_t *devcon_search(devmap_handle_t devmap_handle)
95{
96 link_t *cur;
97
98 fibril_mutex_lock(&dcl_lock);
99 for (cur = dcl_head.next; cur != &dcl_head; cur = cur->next) {
100 devcon_t *devcon = list_get_instance(cur, devcon_t, link);
101 if (devcon->devmap_handle == devmap_handle) {
102 fibril_mutex_unlock(&dcl_lock);
103 return devcon;
104 }
105 }
106 fibril_mutex_unlock(&dcl_lock);
107 return NULL;
108}
109
110static int devcon_add(devmap_handle_t devmap_handle, int dev_phone, size_t bsize,
111 void *comm_area, size_t comm_size)
112{
113 link_t *cur;
114 devcon_t *devcon;
115
116 if (comm_size < bsize)
117 return EINVAL;
118
119 devcon = malloc(sizeof(devcon_t));
120 if (!devcon)
121 return ENOMEM;
122
123 link_initialize(&devcon->link);
124 devcon->devmap_handle = devmap_handle;
125 devcon->dev_phone = dev_phone;
126 fibril_mutex_initialize(&devcon->comm_area_lock);
127 devcon->comm_area = comm_area;
128 devcon->comm_size = comm_size;
129 devcon->bb_buf = NULL;
130 devcon->bb_addr = 0;
131 devcon->pblock_size = bsize;
132 devcon->cache = NULL;
133
134 fibril_mutex_lock(&dcl_lock);
135 for (cur = dcl_head.next; cur != &dcl_head; cur = cur->next) {
136 devcon_t *d = list_get_instance(cur, devcon_t, link);
137 if (d->devmap_handle == devmap_handle) {
138 fibril_mutex_unlock(&dcl_lock);
139 free(devcon);
140 return EEXIST;
141 }
142 }
143 list_append(&devcon->link, &dcl_head);
144 fibril_mutex_unlock(&dcl_lock);
145 return EOK;
146}
147
148static void devcon_remove(devcon_t *devcon)
149{
150 fibril_mutex_lock(&dcl_lock);
151 list_remove(&devcon->link);
152 fibril_mutex_unlock(&dcl_lock);
153}
154
155int block_init(devmap_handle_t devmap_handle, size_t comm_size)
156{
157 int rc;
158 int dev_phone;
159 void *comm_area;
160 size_t bsize;
161
162 comm_area = mmap(NULL, comm_size, PROTO_READ | PROTO_WRITE,
163 MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
164 if (!comm_area) {
165 return ENOMEM;
166 }
167
168 dev_phone = devmap_device_connect(devmap_handle, IPC_FLAG_BLOCKING);
169 if (dev_phone < 0) {
170 munmap(comm_area, comm_size);
171 return dev_phone;
172 }
173
174 rc = async_share_out_start(dev_phone, comm_area,
175 AS_AREA_READ | AS_AREA_WRITE);
176 if (rc != EOK) {
177 munmap(comm_area, comm_size);
178 async_hangup(dev_phone);
179 return rc;
180 }
181
182 if (get_block_size(dev_phone, &bsize) != EOK) {
183 munmap(comm_area, comm_size);
184 async_hangup(dev_phone);
185 return rc;
186 }
187
188 rc = devcon_add(devmap_handle, dev_phone, bsize, comm_area, comm_size);
189 if (rc != EOK) {
190 munmap(comm_area, comm_size);
191 async_hangup(dev_phone);
192 return rc;
193 }
194
195 return EOK;
196}
197
198void block_fini(devmap_handle_t devmap_handle)
199{
200 devcon_t *devcon = devcon_search(devmap_handle);
201 assert(devcon);
202
203 if (devcon->cache)
204 (void) block_cache_fini(devmap_handle);
205
206 devcon_remove(devcon);
207
208 if (devcon->bb_buf)
209 free(devcon->bb_buf);
210
211 munmap(devcon->comm_area, devcon->comm_size);
212 async_hangup(devcon->dev_phone);
213
214 free(devcon);
215}
216
217int block_bb_read(devmap_handle_t devmap_handle, aoff64_t ba)
218{
219 void *bb_buf;
220 int rc;
221
222 devcon_t *devcon = devcon_search(devmap_handle);
223 if (!devcon)
224 return ENOENT;
225 if (devcon->bb_buf)
226 return EEXIST;
227 bb_buf = malloc(devcon->pblock_size);
228 if (!bb_buf)
229 return ENOMEM;
230
231 fibril_mutex_lock(&devcon->comm_area_lock);
232 rc = read_blocks(devcon, 0, 1);
233 if (rc != EOK) {
234 fibril_mutex_unlock(&devcon->comm_area_lock);
235 free(bb_buf);
236 return rc;
237 }
238 memcpy(bb_buf, devcon->comm_area, devcon->pblock_size);
239 fibril_mutex_unlock(&devcon->comm_area_lock);
240
241 devcon->bb_buf = bb_buf;
242 devcon->bb_addr = ba;
243
244 return EOK;
245}
246
247void *block_bb_get(devmap_handle_t devmap_handle)
248{
249 devcon_t *devcon = devcon_search(devmap_handle);
250 assert(devcon);
251 return devcon->bb_buf;
252}
253
254static hash_index_t cache_hash(unsigned long *key)
255{
256 return *key & (CACHE_BUCKETS - 1);
257}
258
259static int cache_compare(unsigned long *key, hash_count_t keys, link_t *item)
260{
261 block_t *b = hash_table_get_instance(item, block_t, hash_link);
262 return b->lba == *key;
263}
264
265static void cache_remove_callback(link_t *item)
266{
267}
268
269static hash_table_operations_t cache_ops = {
270 .hash = cache_hash,
271 .compare = cache_compare,
272 .remove_callback = cache_remove_callback
273};
274
275int block_cache_init(devmap_handle_t devmap_handle, size_t size, unsigned blocks,
276 enum cache_mode mode)
277{
278 devcon_t *devcon = devcon_search(devmap_handle);
279 cache_t *cache;
280 if (!devcon)
281 return ENOENT;
282 if (devcon->cache)
283 return EEXIST;
284 cache = malloc(sizeof(cache_t));
285 if (!cache)
286 return ENOMEM;
287
288 fibril_mutex_initialize(&cache->lock);
289 list_initialize(&cache->free_head);
290 cache->lblock_size = size;
291 cache->block_count = blocks;
292 cache->blocks_cached = 0;
293 cache->mode = mode;
294
295 /* Allow 1:1 or small-to-large block size translation */
296 if (cache->lblock_size % devcon->pblock_size != 0) {
297 free(cache);
298 return ENOTSUP;
299 }
300
301 cache->blocks_cluster = cache->lblock_size / devcon->pblock_size;
302
303 if (!hash_table_create(&cache->block_hash, CACHE_BUCKETS, 1,
304 &cache_ops)) {
305 free(cache);
306 return ENOMEM;
307 }
308
309 devcon->cache = cache;
310 return EOK;
311}
312
313int block_cache_fini(devmap_handle_t devmap_handle)
314{
315 devcon_t *devcon = devcon_search(devmap_handle);
316 cache_t *cache;
317 int rc;
318
319 if (!devcon)
320 return ENOENT;
321 if (!devcon->cache)
322 return EOK;
323 cache = devcon->cache;
324
325 /*
326 * We are expecting to find all blocks for this device handle on the
327 * free list, i.e. the block reference count should be zero. Do not
328 * bother with the cache and block locks because we are single-threaded.
329 */
330 while (!list_empty(&cache->free_head)) {
331 block_t *b = list_get_instance(cache->free_head.next,
332 block_t, free_link);
333
334 list_remove(&b->free_link);
335 if (b->dirty) {
336 memcpy(devcon->comm_area, b->data, b->size);
337 rc = write_blocks(devcon, b->pba, cache->blocks_cluster);
338 if (rc != EOK)
339 return rc;
340 }
341
342 unsigned long key = b->lba;
343 hash_table_remove(&cache->block_hash, &key, 1);
344
345 free(b->data);
346 free(b);
347 }
348
349 hash_table_destroy(&cache->block_hash);
350 devcon->cache = NULL;
351 free(cache);
352
353 return EOK;
354}
355
356#define CACHE_LO_WATERMARK 10
357#define CACHE_HI_WATERMARK 20
358static bool cache_can_grow(cache_t *cache)
359{
360 if (cache->blocks_cached < CACHE_LO_WATERMARK)
361 return true;
362 if (!list_empty(&cache->free_head))
363 return false;
364 return true;
365}
366
367static void block_initialize(block_t *b)
368{
369 fibril_mutex_initialize(&b->lock);
370 b->refcnt = 1;
371 b->dirty = false;
372 b->toxic = false;
373 fibril_rwlock_initialize(&b->contents_lock);
374 link_initialize(&b->free_link);
375 link_initialize(&b->hash_link);
376}
377
378/** Instantiate a block in memory and get a reference to it.
379 *
380 * @param block Pointer to where the function will store the
381 * block pointer on success.
382 * @param devmap_handle Device handle of the block device.
383 * @param ba Block address (logical).
384 * @param flags If BLOCK_FLAGS_NOREAD is specified, block_get()
385 * will not read the contents of the block from the
386 * device.
387 *
388 * @return EOK on success or a negative error code.
389 */
390int block_get(block_t **block, devmap_handle_t devmap_handle, aoff64_t ba, int flags)
391{
392 devcon_t *devcon;
393 cache_t *cache;
394 block_t *b;
395 link_t *l;
396 unsigned long key = ba;
397 int rc;
398
399 devcon = devcon_search(devmap_handle);
400
401 assert(devcon);
402 assert(devcon->cache);
403
404 cache = devcon->cache;
405
406retry:
407 rc = EOK;
408 b = NULL;
409
410 fibril_mutex_lock(&cache->lock);
411 l = hash_table_find(&cache->block_hash, &key);
412 if (l) {
413 /*
414 * We found the block in the cache.
415 */
416 b = hash_table_get_instance(l, block_t, hash_link);
417 fibril_mutex_lock(&b->lock);
418 if (b->refcnt++ == 0)
419 list_remove(&b->free_link);
420 if (b->toxic)
421 rc = EIO;
422 fibril_mutex_unlock(&b->lock);
423 fibril_mutex_unlock(&cache->lock);
424 } else {
425 /*
426 * The block was not found in the cache.
427 */
428 if (cache_can_grow(cache)) {
429 /*
430 * We can grow the cache by allocating new blocks.
431 * Should the allocation fail, we fail over and try to
432 * recycle a block from the cache.
433 */
434 b = malloc(sizeof(block_t));
435 if (!b)
436 goto recycle;
437 b->data = malloc(cache->lblock_size);
438 if (!b->data) {
439 free(b);
440 b = NULL;
441 goto recycle;
442 }
443 cache->blocks_cached++;
444 } else {
445 /*
446 * Try to recycle a block from the free list.
447 */
448 unsigned long temp_key;
449recycle:
450 if (list_empty(&cache->free_head)) {
451 fibril_mutex_unlock(&cache->lock);
452 rc = ENOMEM;
453 goto out;
454 }
455 l = cache->free_head.next;
456 b = list_get_instance(l, block_t, free_link);
457
458 fibril_mutex_lock(&b->lock);
459 if (b->dirty) {
460 /*
461 * The block needs to be written back to the
462 * device before it changes identity. Do this
463 * while not holding the cache lock so that
464 * concurrency is not impeded. Also move the
465 * block to the end of the free list so that we
466 * do not slow down other instances of
467 * block_get() draining the free list.
468 */
469 list_remove(&b->free_link);
470 list_append(&b->free_link, &cache->free_head);
471 fibril_mutex_unlock(&cache->lock);
472 fibril_mutex_lock(&devcon->comm_area_lock);
473 memcpy(devcon->comm_area, b->data, b->size);
474 rc = write_blocks(devcon, b->pba,
475 cache->blocks_cluster);
476 fibril_mutex_unlock(&devcon->comm_area_lock);
477 if (rc != EOK) {
478 /*
479 * We did not manage to write the block
480 * to the device. Keep it around for
481 * another try. Hopefully, we will grab
482 * another block next time.
483 */
484 fibril_mutex_unlock(&b->lock);
485 goto retry;
486 }
487 b->dirty = false;
488 if (!fibril_mutex_trylock(&cache->lock)) {
489 /*
490 * Somebody is probably racing with us.
491 * Unlock the block and retry.
492 */
493 fibril_mutex_unlock(&b->lock);
494 goto retry;
495 }
496
497 }
498 fibril_mutex_unlock(&b->lock);
499
500 /*
501 * Unlink the block from the free list and the hash
502 * table.
503 */
504 list_remove(&b->free_link);
505 temp_key = b->lba;
506 hash_table_remove(&cache->block_hash, &temp_key, 1);
507 }
508
509 block_initialize(b);
510 b->devmap_handle = devmap_handle;
511 b->size = cache->lblock_size;
512 b->lba = ba;
513 b->pba = ba_ltop(devcon, b->lba);
514 hash_table_insert(&cache->block_hash, &key, &b->hash_link);
515
516 /*
517 * Lock the block before releasing the cache lock. Thus we don't
518 * kill concurrent operations on the cache while doing I/O on
519 * the block.
520 */
521 fibril_mutex_lock(&b->lock);
522 fibril_mutex_unlock(&cache->lock);
523
524 if (!(flags & BLOCK_FLAGS_NOREAD)) {
525 /*
526 * The block contains old or no data. We need to read
527 * the new contents from the device.
528 */
529 fibril_mutex_lock(&devcon->comm_area_lock);
530 rc = read_blocks(devcon, b->pba, cache->blocks_cluster);
531 memcpy(b->data, devcon->comm_area, cache->lblock_size);
532 fibril_mutex_unlock(&devcon->comm_area_lock);
533 if (rc != EOK)
534 b->toxic = true;
535 } else
536 rc = EOK;
537
538 fibril_mutex_unlock(&b->lock);
539 }
540out:
541 if ((rc != EOK) && b) {
542 assert(b->toxic);
543 (void) block_put(b);
544 b = NULL;
545 }
546 *block = b;
547 return rc;
548}
549
550/** Release a reference to a block.
551 *
552 * If the last reference is dropped, the block is put on the free list.
553 *
554 * @param block Block of which a reference is to be released.
555 *
556 * @return EOK on success or a negative error code.
557 */
558int block_put(block_t *block)
559{
560 devcon_t *devcon = devcon_search(block->devmap_handle);
561 cache_t *cache;
562 unsigned blocks_cached;
563 enum cache_mode mode;
564 int rc = EOK;
565
566 assert(devcon);
567 assert(devcon->cache);
568 assert(block->refcnt >= 1);
569
570 cache = devcon->cache;
571
572retry:
573 fibril_mutex_lock(&cache->lock);
574 blocks_cached = cache->blocks_cached;
575 mode = cache->mode;
576 fibril_mutex_unlock(&cache->lock);
577
578 /*
579 * Determine whether to sync the block. Syncing the block is best done
580 * when not holding the cache lock as it does not impede concurrency.
581 * Since the situation may have changed when we unlocked the cache, the
582 * blocks_cached and mode variables are mere hints. We will recheck the
583 * conditions later when the cache lock is held again.
584 */
585 fibril_mutex_lock(&block->lock);
586 if (block->toxic)
587 block->dirty = false; /* will not write back toxic block */
588 if (block->dirty && (block->refcnt == 1) &&
589 (blocks_cached > CACHE_HI_WATERMARK || mode != CACHE_MODE_WB)) {
590 fibril_mutex_lock(&devcon->comm_area_lock);
591 memcpy(devcon->comm_area, block->data, block->size);
592 rc = write_blocks(devcon, block->pba, cache->blocks_cluster);
593 fibril_mutex_unlock(&devcon->comm_area_lock);
594 block->dirty = false;
595 }
596 fibril_mutex_unlock(&block->lock);
597
598 fibril_mutex_lock(&cache->lock);
599 fibril_mutex_lock(&block->lock);
600 if (!--block->refcnt) {
601 /*
602 * Last reference to the block was dropped. Either free the
603 * block or put it on the free list. In case of an I/O error,
604 * free the block.
605 */
606 if ((cache->blocks_cached > CACHE_HI_WATERMARK) ||
607 (rc != EOK)) {
608 /*
609 * Currently there are too many cached blocks or there
610 * was an I/O error when writing the block back to the
611 * device.
612 */
613 if (block->dirty) {
614 /*
615 * We cannot sync the block while holding the
616 * cache lock. Release everything and retry.
617 */
618 block->refcnt++;
619 fibril_mutex_unlock(&block->lock);
620 fibril_mutex_unlock(&cache->lock);
621 goto retry;
622 }
623 /*
624 * Take the block out of the cache and free it.
625 */
626 unsigned long key = block->lba;
627 hash_table_remove(&cache->block_hash, &key, 1);
628 fibril_mutex_unlock(&block->lock);
629 free(block->data);
630 free(block);
631 cache->blocks_cached--;
632 fibril_mutex_unlock(&cache->lock);
633 return rc;
634 }
635 /*
636 * Put the block on the free list.
637 */
638 if (cache->mode != CACHE_MODE_WB && block->dirty) {
639 /*
640 * We cannot sync the block while holding the cache
641 * lock. Release everything and retry.
642 */
643 block->refcnt++;
644 fibril_mutex_unlock(&block->lock);
645 fibril_mutex_unlock(&cache->lock);
646 goto retry;
647 }
648 list_append(&block->free_link, &cache->free_head);
649 }
650 fibril_mutex_unlock(&block->lock);
651 fibril_mutex_unlock(&cache->lock);
652
653 return rc;
654}
655
656/** Read sequential data from a block device.
657 *
658 * @param devmap_handle Device handle of the block device.
659 * @param bufpos Pointer to the first unread valid offset within the
660 * communication buffer.
661 * @param buflen Pointer to the number of unread bytes that are ready in
662 * the communication buffer.
663 * @param pos Device position to be read.
664 * @param dst Destination buffer.
665 * @param size Size of the destination buffer.
666 * @param block_size Block size to be used for the transfer.
667 *
668 * @return EOK on success or a negative return code on failure.
669 */
670int block_seqread(devmap_handle_t devmap_handle, size_t *bufpos, size_t *buflen,
671 aoff64_t *pos, void *dst, size_t size)
672{
673 size_t offset = 0;
674 size_t left = size;
675 size_t block_size;
676 devcon_t *devcon;
677
678 devcon = devcon_search(devmap_handle);
679 assert(devcon);
680 block_size = devcon->pblock_size;
681
682 fibril_mutex_lock(&devcon->comm_area_lock);
683 while (left > 0) {
684 size_t rd;
685
686 if (*bufpos + left < *buflen)
687 rd = left;
688 else
689 rd = *buflen - *bufpos;
690
691 if (rd > 0) {
692 /*
693 * Copy the contents of the communication buffer to the
694 * destination buffer.
695 */
696 memcpy(dst + offset, devcon->comm_area + *bufpos, rd);
697 offset += rd;
698 *bufpos += rd;
699 *pos += rd;
700 left -= rd;
701 }
702
703 if (*bufpos == *buflen) {
704 /* Refill the communication buffer with a new block. */
705 int rc;
706
707 rc = read_blocks(devcon, *pos / block_size, 1);
708 if (rc != EOK) {
709 fibril_mutex_unlock(&devcon->comm_area_lock);
710 return rc;
711 }
712
713 *bufpos = 0;
714 *buflen = block_size;
715 }
716 }
717 fibril_mutex_unlock(&devcon->comm_area_lock);
718
719 return EOK;
720}
721
722/** Read blocks directly from device (bypass cache).
723 *
724 * @param devmap_handle Device handle of the block device.
725 * @param ba Address of first block (physical).
726 * @param cnt Number of blocks.
727 * @param src Buffer for storing the data.
728 *
729 * @return EOK on success or negative error code on failure.
730 */
731int block_read_direct(devmap_handle_t devmap_handle, aoff64_t ba, size_t cnt, void *buf)
732{
733 devcon_t *devcon;
734 int rc;
735
736 devcon = devcon_search(devmap_handle);
737 assert(devcon);
738
739 fibril_mutex_lock(&devcon->comm_area_lock);
740
741 rc = read_blocks(devcon, ba, cnt);
742 if (rc == EOK)
743 memcpy(buf, devcon->comm_area, devcon->pblock_size * cnt);
744
745 fibril_mutex_unlock(&devcon->comm_area_lock);
746
747 return rc;
748}
749
750/** Write blocks directly to device (bypass cache).
751 *
752 * @param devmap_handle Device handle of the block device.
753 * @param ba Address of first block (physical).
754 * @param cnt Number of blocks.
755 * @param src The data to be written.
756 *
757 * @return EOK on success or negative error code on failure.
758 */
759int block_write_direct(devmap_handle_t devmap_handle, aoff64_t ba, size_t cnt,
760 const void *data)
761{
762 devcon_t *devcon;
763 int rc;
764
765 devcon = devcon_search(devmap_handle);
766 assert(devcon);
767
768 fibril_mutex_lock(&devcon->comm_area_lock);
769
770 memcpy(devcon->comm_area, data, devcon->pblock_size * cnt);
771 rc = write_blocks(devcon, ba, cnt);
772
773 fibril_mutex_unlock(&devcon->comm_area_lock);
774
775 return rc;
776}
777
778/** Get device block size.
779 *
780 * @param devmap_handle Device handle of the block device.
781 * @param bsize Output block size.
782 *
783 * @return EOK on success or negative error code on failure.
784 */
785int block_get_bsize(devmap_handle_t devmap_handle, size_t *bsize)
786{
787 devcon_t *devcon;
788
789 devcon = devcon_search(devmap_handle);
790 assert(devcon);
791
792 return get_block_size(devcon->dev_phone, bsize);
793}
794
795/** Get number of blocks on device.
796 *
797 * @param devmap_handle Device handle of the block device.
798 * @param nblocks Output number of blocks.
799 *
800 * @return EOK on success or negative error code on failure.
801 */
802int block_get_nblocks(devmap_handle_t devmap_handle, aoff64_t *nblocks)
803{
804 devcon_t *devcon;
805
806 devcon = devcon_search(devmap_handle);
807 assert(devcon);
808
809 return get_num_blocks(devcon->dev_phone, nblocks);
810}
811
812/** Read blocks from block device.
813 *
814 * @param devcon Device connection.
815 * @param ba Address of first block.
816 * @param cnt Number of blocks.
817 * @param src Buffer for storing the data.
818 *
819 * @return EOK on success or negative error code on failure.
820 */
821static int read_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt)
822{
823 int rc;
824
825 assert(devcon);
826 rc = async_req_3_0(devcon->dev_phone, BD_READ_BLOCKS, LOWER32(ba),
827 UPPER32(ba), cnt);
828 if (rc != EOK) {
829 printf("Error %d reading %zu blocks starting at block %" PRIuOFF64
830 " from device handle %" PRIun "\n", rc, cnt, ba,
831 devcon->devmap_handle);
832#ifndef NDEBUG
833 stacktrace_print();
834#endif
835 }
836 return rc;
837}
838
839/** Write block to block device.
840 *
841 * @param devcon Device connection.
842 * @param ba Address of first block.
843 * @param cnt Number of blocks.
844 * @param src Buffer containing the data to write.
845 *
846 * @return EOK on success or negative error code on failure.
847 */
848static int write_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt)
849{
850 int rc;
851
852 assert(devcon);
853 rc = async_req_3_0(devcon->dev_phone, BD_WRITE_BLOCKS, LOWER32(ba),
854 UPPER32(ba), cnt);
855 if (rc != EOK) {
856 printf("Error %d writing %zu blocks starting at block %" PRIuOFF64
857 " to device handle %" PRIun "\n", rc, cnt, ba, devcon->devmap_handle);
858#ifndef NDEBUG
859 stacktrace_print();
860#endif
861 }
862 return rc;
863}
864
865/** Get block size used by the device. */
866static int get_block_size(int dev_phone, size_t *bsize)
867{
868 sysarg_t bs;
869 int rc;
870
871 rc = async_req_0_1(dev_phone, BD_GET_BLOCK_SIZE, &bs);
872 if (rc == EOK)
873 *bsize = (size_t) bs;
874
875 return rc;
876}
877
878/** Get total number of blocks on block device. */
879static int get_num_blocks(int dev_phone, aoff64_t *nblocks)
880{
881 sysarg_t nb_l, nb_h;
882 int rc;
883
884 rc = async_req_0_2(dev_phone, BD_GET_NUM_BLOCKS, &nb_l, &nb_h);
885 if (rc == EOK) {
886 *nblocks = (aoff64_t) MERGE_LOUP32(nb_l, nb_h);
887 }
888
889 return rc;
890}
891
892/** Convert logical block address to physical block address. */
893static aoff64_t ba_ltop(devcon_t *devcon, aoff64_t lba)
894{
895 assert(devcon->cache != NULL);
896 return lba * devcon->cache->blocks_cluster;
897}
898
899/** @}
900 */
Note: See TracBrowser for help on using the repository browser.