source: mainline/uspace/lib/block/libblock.c@ 0ca7286

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 0ca7286 was 0ca7286, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

Added resizing to user space (single-threaded) hash_table. Resizes in a way to mitigate effects of bad hash functions. Change of interface affected many files.

  • Property mode set to 100644
File size: 25.1 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2008 Martin Decky
4 * Copyright (c) 2011 Martin Sucha
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libblock
32 * @{
33 */
34/**
35 * @file
36 * @brief
37 */
38
39#include "libblock.h"
40#include "../../srv/vfs/vfs.h"
41#include <ipc/loc.h>
42#include <ipc/bd.h>
43#include <ipc/services.h>
44#include <errno.h>
45#include <sys/mman.h>
46#include <async.h>
47#include <as.h>
48#include <assert.h>
49#include <fibril_synch.h>
50#include <adt/list.h>
51#include <adt/hash_table.h>
52#include <macros.h>
53#include <mem.h>
54#include <malloc.h>
55#include <stdio.h>
56#include <sys/typefmt.h>
57#include <stacktrace.h>
58
59/** Lock protecting the device connection list */
60static FIBRIL_MUTEX_INITIALIZE(dcl_lock);
61/** Device connection list head. */
62static LIST_INITIALIZE(dcl);
63
64
65typedef struct {
66 fibril_mutex_t lock;
67 size_t lblock_size; /**< Logical block size. */
68 unsigned blocks_cluster; /**< Physical blocks per block_t */
69 unsigned block_count; /**< Total number of blocks. */
70 unsigned blocks_cached; /**< Number of cached blocks. */
71 hash_table_t block_hash;
72 list_t free_list;
73 enum cache_mode mode;
74} cache_t;
75
76typedef struct {
77 link_t link;
78 service_id_t service_id;
79 async_sess_t *sess;
80 fibril_mutex_t comm_area_lock;
81 void *comm_area;
82 size_t comm_size;
83 void *bb_buf;
84 aoff64_t bb_addr;
85 size_t pblock_size; /**< Physical block size. */
86 cache_t *cache;
87} devcon_t;
88
89static int read_blocks(devcon_t *, aoff64_t, size_t);
90static int write_blocks(devcon_t *, aoff64_t, size_t);
91static int get_block_size(async_sess_t *, size_t *);
92static int get_num_blocks(async_sess_t *, aoff64_t *);
93static aoff64_t ba_ltop(devcon_t *, aoff64_t);
94
95static devcon_t *devcon_search(service_id_t service_id)
96{
97 fibril_mutex_lock(&dcl_lock);
98
99 list_foreach(dcl, cur) {
100 devcon_t *devcon = list_get_instance(cur, devcon_t, link);
101 if (devcon->service_id == service_id) {
102 fibril_mutex_unlock(&dcl_lock);
103 return devcon;
104 }
105 }
106
107 fibril_mutex_unlock(&dcl_lock);
108 return NULL;
109}
110
111static int devcon_add(service_id_t service_id, async_sess_t *sess,
112 size_t bsize, void *comm_area, size_t comm_size)
113{
114 devcon_t *devcon;
115
116 if (comm_size < bsize)
117 return EINVAL;
118
119 devcon = malloc(sizeof(devcon_t));
120 if (!devcon)
121 return ENOMEM;
122
123 link_initialize(&devcon->link);
124 devcon->service_id = service_id;
125 devcon->sess = sess;
126 fibril_mutex_initialize(&devcon->comm_area_lock);
127 devcon->comm_area = comm_area;
128 devcon->comm_size = comm_size;
129 devcon->bb_buf = NULL;
130 devcon->bb_addr = 0;
131 devcon->pblock_size = bsize;
132 devcon->cache = NULL;
133
134 fibril_mutex_lock(&dcl_lock);
135 list_foreach(dcl, cur) {
136 devcon_t *d = list_get_instance(cur, devcon_t, link);
137 if (d->service_id == service_id) {
138 fibril_mutex_unlock(&dcl_lock);
139 free(devcon);
140 return EEXIST;
141 }
142 }
143 list_append(&devcon->link, &dcl);
144 fibril_mutex_unlock(&dcl_lock);
145 return EOK;
146}
147
148static void devcon_remove(devcon_t *devcon)
149{
150 fibril_mutex_lock(&dcl_lock);
151 list_remove(&devcon->link);
152 fibril_mutex_unlock(&dcl_lock);
153}
154
155int block_init(exch_mgmt_t mgmt, service_id_t service_id,
156 size_t comm_size)
157{
158 void *comm_area = mmap(NULL, comm_size, PROTO_READ | PROTO_WRITE,
159 MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
160 if (!comm_area)
161 return ENOMEM;
162
163 async_sess_t *sess = loc_service_connect(mgmt, service_id,
164 IPC_FLAG_BLOCKING);
165 if (!sess) {
166 munmap(comm_area, comm_size);
167 return ENOENT;
168 }
169
170 async_exch_t *exch = async_exchange_begin(sess);
171 int rc = async_share_out_start(exch, comm_area,
172 AS_AREA_READ | AS_AREA_WRITE);
173 async_exchange_end(exch);
174
175 if (rc != EOK) {
176 munmap(comm_area, comm_size);
177 async_hangup(sess);
178 return rc;
179 }
180
181 size_t bsize;
182 rc = get_block_size(sess, &bsize);
183
184 if (rc != EOK) {
185 munmap(comm_area, comm_size);
186 async_hangup(sess);
187 return rc;
188 }
189
190 rc = devcon_add(service_id, sess, bsize, comm_area, comm_size);
191 if (rc != EOK) {
192 munmap(comm_area, comm_size);
193 async_hangup(sess);
194 return rc;
195 }
196
197 return EOK;
198}
199
200void block_fini(service_id_t service_id)
201{
202 devcon_t *devcon = devcon_search(service_id);
203 assert(devcon);
204
205 if (devcon->cache)
206 (void) block_cache_fini(service_id);
207
208 devcon_remove(devcon);
209
210 if (devcon->bb_buf)
211 free(devcon->bb_buf);
212
213 munmap(devcon->comm_area, devcon->comm_size);
214 async_hangup(devcon->sess);
215
216 free(devcon);
217}
218
219int block_bb_read(service_id_t service_id, aoff64_t ba)
220{
221 void *bb_buf;
222 int rc;
223
224 devcon_t *devcon = devcon_search(service_id);
225 if (!devcon)
226 return ENOENT;
227 if (devcon->bb_buf)
228 return EEXIST;
229 bb_buf = malloc(devcon->pblock_size);
230 if (!bb_buf)
231 return ENOMEM;
232
233 fibril_mutex_lock(&devcon->comm_area_lock);
234 rc = read_blocks(devcon, 0, 1);
235 if (rc != EOK) {
236 fibril_mutex_unlock(&devcon->comm_area_lock);
237 free(bb_buf);
238 return rc;
239 }
240 memcpy(bb_buf, devcon->comm_area, devcon->pblock_size);
241 fibril_mutex_unlock(&devcon->comm_area_lock);
242
243 devcon->bb_buf = bb_buf;
244 devcon->bb_addr = ba;
245
246 return EOK;
247}
248
249void *block_bb_get(service_id_t service_id)
250{
251 devcon_t *devcon = devcon_search(service_id);
252 assert(devcon);
253 return devcon->bb_buf;
254}
255
256static size_t cache_key_hash(unsigned long *key)
257{
258 /* As recommended by Effective Java, 2nd Edition. */
259 size_t hash = 17;
260 hash = 31 * hash + key[1];
261 hash = 31 * hash + key[0];
262 return hash;
263}
264
265static size_t cache_hash(const link_t *item)
266{
267 block_t *b = hash_table_get_instance(item, block_t, hash_link);
268 unsigned long key[] = {
269 LOWER32(b->lba),
270 UPPER32(b->lba)
271 };
272
273 return cache_key_hash(key);
274}
275
276static bool cache_match(unsigned long *key, size_t keys, const link_t *item)
277{
278 block_t *b = hash_table_get_instance(item, block_t, hash_link);
279 return b->lba == MERGE_LOUP32(key[0], key[1]);
280}
281
282
283static hash_table_ops_t cache_ops = {
284 .hash = cache_hash,
285 .key_hash = cache_key_hash,
286 .match = cache_match,
287 .equal = 0,
288 .remove_callback = 0
289};
290
291int block_cache_init(service_id_t service_id, size_t size, unsigned blocks,
292 enum cache_mode mode)
293{
294 devcon_t *devcon = devcon_search(service_id);
295 cache_t *cache;
296 if (!devcon)
297 return ENOENT;
298 if (devcon->cache)
299 return EEXIST;
300 cache = malloc(sizeof(cache_t));
301 if (!cache)
302 return ENOMEM;
303
304 fibril_mutex_initialize(&cache->lock);
305 list_initialize(&cache->free_list);
306 cache->lblock_size = size;
307 cache->block_count = blocks;
308 cache->blocks_cached = 0;
309 cache->mode = mode;
310
311 /* Allow 1:1 or small-to-large block size translation */
312 if (cache->lblock_size % devcon->pblock_size != 0) {
313 free(cache);
314 return ENOTSUP;
315 }
316
317 cache->blocks_cluster = cache->lblock_size / devcon->pblock_size;
318
319 if (!hash_table_create(&cache->block_hash, 0, 2, &cache_ops)) {
320 free(cache);
321 return ENOMEM;
322 }
323
324 devcon->cache = cache;
325 return EOK;
326}
327
328int block_cache_fini(service_id_t service_id)
329{
330 devcon_t *devcon = devcon_search(service_id);
331 cache_t *cache;
332 int rc;
333
334 if (!devcon)
335 return ENOENT;
336 if (!devcon->cache)
337 return EOK;
338 cache = devcon->cache;
339
340 /*
341 * We are expecting to find all blocks for this device handle on the
342 * free list, i.e. the block reference count should be zero. Do not
343 * bother with the cache and block locks because we are single-threaded.
344 */
345 while (!list_empty(&cache->free_list)) {
346 block_t *b = list_get_instance(list_first(&cache->free_list),
347 block_t, free_link);
348
349 list_remove(&b->free_link);
350 if (b->dirty) {
351 memcpy(devcon->comm_area, b->data, b->size);
352 rc = write_blocks(devcon, b->pba, cache->blocks_cluster);
353 if (rc != EOK)
354 return rc;
355 }
356
357 unsigned long key[2] = {
358 LOWER32(b->lba),
359 UPPER32(b->lba)
360 };
361 hash_table_remove(&cache->block_hash, key, 2);
362
363 free(b->data);
364 free(b);
365 }
366
367 hash_table_destroy(&cache->block_hash);
368 devcon->cache = NULL;
369 free(cache);
370
371 return EOK;
372}
373
374#define CACHE_LO_WATERMARK 10
375#define CACHE_HI_WATERMARK 20
376static bool cache_can_grow(cache_t *cache)
377{
378 if (cache->blocks_cached < CACHE_LO_WATERMARK)
379 return true;
380 if (!list_empty(&cache->free_list))
381 return false;
382 return true;
383}
384
385static void block_initialize(block_t *b)
386{
387 fibril_mutex_initialize(&b->lock);
388 b->refcnt = 1;
389 b->dirty = false;
390 b->toxic = false;
391 fibril_rwlock_initialize(&b->contents_lock);
392 link_initialize(&b->free_link);
393 link_initialize(&b->hash_link);
394}
395
396/** Instantiate a block in memory and get a reference to it.
397 *
398 * @param block Pointer to where the function will store the
399 * block pointer on success.
400 * @param service_id Service ID of the block device.
401 * @param ba Block address (logical).
402 * @param flags If BLOCK_FLAGS_NOREAD is specified, block_get()
403 * will not read the contents of the block from the
404 * device.
405 *
406 * @return EOK on success or a negative error code.
407 */
408int block_get(block_t **block, service_id_t service_id, aoff64_t ba, int flags)
409{
410 devcon_t *devcon;
411 cache_t *cache;
412 block_t *b;
413 link_t *l;
414 unsigned long key[2] = {
415 LOWER32(ba),
416 UPPER32(ba)
417 };
418
419 int rc;
420
421 devcon = devcon_search(service_id);
422
423 assert(devcon);
424 assert(devcon->cache);
425
426 cache = devcon->cache;
427
428retry:
429 rc = EOK;
430 b = NULL;
431
432 fibril_mutex_lock(&cache->lock);
433 l = hash_table_find(&cache->block_hash, key);
434 if (l) {
435found:
436 /*
437 * We found the block in the cache.
438 */
439 b = hash_table_get_instance(l, block_t, hash_link);
440 fibril_mutex_lock(&b->lock);
441 if (b->refcnt++ == 0)
442 list_remove(&b->free_link);
443 if (b->toxic)
444 rc = EIO;
445 fibril_mutex_unlock(&b->lock);
446 fibril_mutex_unlock(&cache->lock);
447 } else {
448 /*
449 * The block was not found in the cache.
450 */
451 if (cache_can_grow(cache)) {
452 /*
453 * We can grow the cache by allocating new blocks.
454 * Should the allocation fail, we fail over and try to
455 * recycle a block from the cache.
456 */
457 b = malloc(sizeof(block_t));
458 if (!b)
459 goto recycle;
460 b->data = malloc(cache->lblock_size);
461 if (!b->data) {
462 free(b);
463 b = NULL;
464 goto recycle;
465 }
466 cache->blocks_cached++;
467 } else {
468 /*
469 * Try to recycle a block from the free list.
470 */
471recycle:
472 if (list_empty(&cache->free_list)) {
473 fibril_mutex_unlock(&cache->lock);
474 rc = ENOMEM;
475 goto out;
476 }
477 l = list_first(&cache->free_list);
478 b = list_get_instance(l, block_t, free_link);
479
480 fibril_mutex_lock(&b->lock);
481 if (b->dirty) {
482 /*
483 * The block needs to be written back to the
484 * device before it changes identity. Do this
485 * while not holding the cache lock so that
486 * concurrency is not impeded. Also move the
487 * block to the end of the free list so that we
488 * do not slow down other instances of
489 * block_get() draining the free list.
490 */
491 list_remove(&b->free_link);
492 list_append(&b->free_link, &cache->free_list);
493 fibril_mutex_unlock(&cache->lock);
494 fibril_mutex_lock(&devcon->comm_area_lock);
495 memcpy(devcon->comm_area, b->data, b->size);
496 rc = write_blocks(devcon, b->pba,
497 cache->blocks_cluster);
498 fibril_mutex_unlock(&devcon->comm_area_lock);
499 if (rc != EOK) {
500 /*
501 * We did not manage to write the block
502 * to the device. Keep it around for
503 * another try. Hopefully, we will grab
504 * another block next time.
505 */
506 fibril_mutex_unlock(&b->lock);
507 goto retry;
508 }
509 b->dirty = false;
510 if (!fibril_mutex_trylock(&cache->lock)) {
511 /*
512 * Somebody is probably racing with us.
513 * Unlock the block and retry.
514 */
515 fibril_mutex_unlock(&b->lock);
516 goto retry;
517 }
518 l = hash_table_find(&cache->block_hash, key);
519 if (l) {
520 /*
521 * Someone else must have already
522 * instantiated the block while we were
523 * not holding the cache lock.
524 * Leave the recycled block on the
525 * freelist and continue as if we
526 * found the block of interest during
527 * the first try.
528 */
529 fibril_mutex_unlock(&b->lock);
530 goto found;
531 }
532
533 }
534 fibril_mutex_unlock(&b->lock);
535
536 /*
537 * Unlink the block from the free list and the hash
538 * table.
539 */
540 list_remove(&b->free_link);
541 unsigned long temp_key[2] = {
542 LOWER32(b->lba),
543 UPPER32(b->lba)
544 };
545 hash_table_remove(&cache->block_hash, temp_key, 2);
546 }
547
548 block_initialize(b);
549 b->service_id = service_id;
550 b->size = cache->lblock_size;
551 b->lba = ba;
552 b->pba = ba_ltop(devcon, b->lba);
553 hash_table_insert(&cache->block_hash, &b->hash_link);
554
555 /*
556 * Lock the block before releasing the cache lock. Thus we don't
557 * kill concurrent operations on the cache while doing I/O on
558 * the block.
559 */
560 fibril_mutex_lock(&b->lock);
561 fibril_mutex_unlock(&cache->lock);
562
563 if (!(flags & BLOCK_FLAGS_NOREAD)) {
564 /*
565 * The block contains old or no data. We need to read
566 * the new contents from the device.
567 */
568 fibril_mutex_lock(&devcon->comm_area_lock);
569 rc = read_blocks(devcon, b->pba, cache->blocks_cluster);
570 memcpy(b->data, devcon->comm_area, cache->lblock_size);
571 fibril_mutex_unlock(&devcon->comm_area_lock);
572 if (rc != EOK)
573 b->toxic = true;
574 } else
575 rc = EOK;
576
577 fibril_mutex_unlock(&b->lock);
578 }
579out:
580 if ((rc != EOK) && b) {
581 assert(b->toxic);
582 (void) block_put(b);
583 b = NULL;
584 }
585 *block = b;
586 return rc;
587}
588
589/** Release a reference to a block.
590 *
591 * If the last reference is dropped, the block is put on the free list.
592 *
593 * @param block Block of which a reference is to be released.
594 *
595 * @return EOK on success or a negative error code.
596 */
597int block_put(block_t *block)
598{
599 devcon_t *devcon = devcon_search(block->service_id);
600 cache_t *cache;
601 unsigned blocks_cached;
602 enum cache_mode mode;
603 int rc = EOK;
604
605 assert(devcon);
606 assert(devcon->cache);
607 assert(block->refcnt >= 1);
608
609 cache = devcon->cache;
610
611retry:
612 fibril_mutex_lock(&cache->lock);
613 blocks_cached = cache->blocks_cached;
614 mode = cache->mode;
615 fibril_mutex_unlock(&cache->lock);
616
617 /*
618 * Determine whether to sync the block. Syncing the block is best done
619 * when not holding the cache lock as it does not impede concurrency.
620 * Since the situation may have changed when we unlocked the cache, the
621 * blocks_cached and mode variables are mere hints. We will recheck the
622 * conditions later when the cache lock is held again.
623 */
624 fibril_mutex_lock(&block->lock);
625 if (block->toxic)
626 block->dirty = false; /* will not write back toxic block */
627 if (block->dirty && (block->refcnt == 1) &&
628 (blocks_cached > CACHE_HI_WATERMARK || mode != CACHE_MODE_WB)) {
629 fibril_mutex_lock(&devcon->comm_area_lock);
630 memcpy(devcon->comm_area, block->data, block->size);
631 rc = write_blocks(devcon, block->pba, cache->blocks_cluster);
632 fibril_mutex_unlock(&devcon->comm_area_lock);
633 block->dirty = false;
634 }
635 fibril_mutex_unlock(&block->lock);
636
637 fibril_mutex_lock(&cache->lock);
638 fibril_mutex_lock(&block->lock);
639 if (!--block->refcnt) {
640 /*
641 * Last reference to the block was dropped. Either free the
642 * block or put it on the free list. In case of an I/O error,
643 * free the block.
644 */
645 if ((cache->blocks_cached > CACHE_HI_WATERMARK) ||
646 (rc != EOK)) {
647 /*
648 * Currently there are too many cached blocks or there
649 * was an I/O error when writing the block back to the
650 * device.
651 */
652 if (block->dirty) {
653 /*
654 * We cannot sync the block while holding the
655 * cache lock. Release everything and retry.
656 */
657 block->refcnt++;
658 fibril_mutex_unlock(&block->lock);
659 fibril_mutex_unlock(&cache->lock);
660 goto retry;
661 }
662 /*
663 * Take the block out of the cache and free it.
664 */
665 unsigned long key[2] = {
666 LOWER32(block->lba),
667 UPPER32(block->lba)
668 };
669 hash_table_remove(&cache->block_hash, key, 2);
670 fibril_mutex_unlock(&block->lock);
671 free(block->data);
672 free(block);
673 cache->blocks_cached--;
674 fibril_mutex_unlock(&cache->lock);
675 return rc;
676 }
677 /*
678 * Put the block on the free list.
679 */
680 if (cache->mode != CACHE_MODE_WB && block->dirty) {
681 /*
682 * We cannot sync the block while holding the cache
683 * lock. Release everything and retry.
684 */
685 block->refcnt++;
686 fibril_mutex_unlock(&block->lock);
687 fibril_mutex_unlock(&cache->lock);
688 goto retry;
689 }
690 list_append(&block->free_link, &cache->free_list);
691 }
692 fibril_mutex_unlock(&block->lock);
693 fibril_mutex_unlock(&cache->lock);
694
695 return rc;
696}
697
698/** Read sequential data from a block device.
699 *
700 * @param service_id Service ID of the block device.
701 * @param bufpos Pointer to the first unread valid offset within the
702 * communication buffer.
703 * @param buflen Pointer to the number of unread bytes that are ready in
704 * the communication buffer.
705 * @param pos Device position to be read.
706 * @param dst Destination buffer.
707 * @param size Size of the destination buffer.
708 * @param block_size Block size to be used for the transfer.
709 *
710 * @return EOK on success or a negative return code on failure.
711 */
712int block_seqread(service_id_t service_id, size_t *bufpos, size_t *buflen,
713 aoff64_t *pos, void *dst, size_t size)
714{
715 size_t offset = 0;
716 size_t left = size;
717 size_t block_size;
718 devcon_t *devcon;
719
720 devcon = devcon_search(service_id);
721 assert(devcon);
722 block_size = devcon->pblock_size;
723
724 fibril_mutex_lock(&devcon->comm_area_lock);
725 while (left > 0) {
726 size_t rd;
727
728 if (*bufpos + left < *buflen)
729 rd = left;
730 else
731 rd = *buflen - *bufpos;
732
733 if (rd > 0) {
734 /*
735 * Copy the contents of the communication buffer to the
736 * destination buffer.
737 */
738 memcpy(dst + offset, devcon->comm_area + *bufpos, rd);
739 offset += rd;
740 *bufpos += rd;
741 *pos += rd;
742 left -= rd;
743 }
744
745 if (*bufpos == *buflen) {
746 /* Refill the communication buffer with a new block. */
747 int rc;
748
749 rc = read_blocks(devcon, *pos / block_size, 1);
750 if (rc != EOK) {
751 fibril_mutex_unlock(&devcon->comm_area_lock);
752 return rc;
753 }
754
755 *bufpos = 0;
756 *buflen = block_size;
757 }
758 }
759 fibril_mutex_unlock(&devcon->comm_area_lock);
760
761 return EOK;
762}
763
764/** Read blocks directly from device (bypass cache).
765 *
766 * @param service_id Service ID of the block device.
767 * @param ba Address of first block (physical).
768 * @param cnt Number of blocks.
769 * @param src Buffer for storing the data.
770 *
771 * @return EOK on success or negative error code on failure.
772 */
773int block_read_direct(service_id_t service_id, aoff64_t ba, size_t cnt, void *buf)
774{
775 devcon_t *devcon;
776 int rc;
777
778 devcon = devcon_search(service_id);
779 assert(devcon);
780
781 fibril_mutex_lock(&devcon->comm_area_lock);
782
783 rc = read_blocks(devcon, ba, cnt);
784 if (rc == EOK)
785 memcpy(buf, devcon->comm_area, devcon->pblock_size * cnt);
786
787 fibril_mutex_unlock(&devcon->comm_area_lock);
788
789 return rc;
790}
791
792/** Write blocks directly to device (bypass cache).
793 *
794 * @param service_id Service ID of the block device.
795 * @param ba Address of first block (physical).
796 * @param cnt Number of blocks.
797 * @param src The data to be written.
798 *
799 * @return EOK on success or negative error code on failure.
800 */
801int block_write_direct(service_id_t service_id, aoff64_t ba, size_t cnt,
802 const void *data)
803{
804 devcon_t *devcon;
805 int rc;
806
807 devcon = devcon_search(service_id);
808 assert(devcon);
809
810 fibril_mutex_lock(&devcon->comm_area_lock);
811
812 memcpy(devcon->comm_area, data, devcon->pblock_size * cnt);
813 rc = write_blocks(devcon, ba, cnt);
814
815 fibril_mutex_unlock(&devcon->comm_area_lock);
816
817 return rc;
818}
819
820/** Get device block size.
821 *
822 * @param service_id Service ID of the block device.
823 * @param bsize Output block size.
824 *
825 * @return EOK on success or negative error code on failure.
826 */
827int block_get_bsize(service_id_t service_id, size_t *bsize)
828{
829 devcon_t *devcon;
830
831 devcon = devcon_search(service_id);
832 assert(devcon);
833
834 return get_block_size(devcon->sess, bsize);
835}
836
837/** Get number of blocks on device.
838 *
839 * @param service_id Service ID of the block device.
840 * @param nblocks Output number of blocks.
841 *
842 * @return EOK on success or negative error code on failure.
843 */
844int block_get_nblocks(service_id_t service_id, aoff64_t *nblocks)
845{
846 devcon_t *devcon = devcon_search(service_id);
847 assert(devcon);
848
849 return get_num_blocks(devcon->sess, nblocks);
850}
851
852/** Read bytes directly from the device (bypass cache)
853 *
854 * @param service_id Service ID of the block device.
855 * @param abs_offset Absolute offset in bytes where to start reading
856 * @param bytes Number of bytes to read
857 * @param data Buffer that receives the data
858 *
859 * @return EOK on success or negative error code on failure.
860 */
861int block_read_bytes_direct(service_id_t service_id, aoff64_t abs_offset,
862 size_t bytes, void *data)
863{
864 int rc;
865 size_t phys_block_size;
866 size_t buf_size;
867 void *buffer;
868 aoff64_t first_block;
869 aoff64_t last_block;
870 size_t blocks;
871 size_t offset;
872
873 rc = block_get_bsize(service_id, &phys_block_size);
874 if (rc != EOK) {
875 return rc;
876 }
877
878 /* calculate data position and required space */
879 first_block = abs_offset / phys_block_size;
880 offset = abs_offset % phys_block_size;
881 last_block = (abs_offset + bytes - 1) / phys_block_size;
882 blocks = last_block - first_block + 1;
883 buf_size = blocks * phys_block_size;
884
885 /* read the data into memory */
886 buffer = malloc(buf_size);
887 if (buffer == NULL) {
888 return ENOMEM;
889 }
890
891 rc = block_read_direct(service_id, first_block, blocks, buffer);
892 if (rc != EOK) {
893 free(buffer);
894 return rc;
895 }
896
897 /* copy the data from the buffer */
898 memcpy(data, buffer + offset, bytes);
899 free(buffer);
900
901 return EOK;
902}
903
904/** Get TOC from device.
905 *
906 * @param service_id Service ID of the block device.
907 * @param session Starting session.
908 *
909 * @return Allocated TOC structure.
910 * @return NULL on failure.
911 *
912 */
913toc_block_t *block_get_toc(service_id_t service_id, uint8_t session)
914{
915 devcon_t *devcon = devcon_search(service_id);
916 assert(devcon);
917
918 toc_block_t *toc = NULL;
919
920 fibril_mutex_lock(&devcon->comm_area_lock);
921
922 async_exch_t *exch = async_exchange_begin(devcon->sess);
923 int rc = async_req_1_0(exch, BD_READ_TOC, session);
924 async_exchange_end(exch);
925
926 if (rc == EOK) {
927 toc = (toc_block_t *) malloc(sizeof(toc_block_t));
928 if (toc != NULL) {
929 memset(toc, 0, sizeof(toc_block_t));
930 memcpy(toc, devcon->comm_area,
931 min(devcon->pblock_size, sizeof(toc_block_t)));
932 }
933 }
934
935
936 fibril_mutex_unlock(&devcon->comm_area_lock);
937
938 return toc;
939}
940
941/** Read blocks from block device.
942 *
943 * @param devcon Device connection.
944 * @param ba Address of first block.
945 * @param cnt Number of blocks.
946 * @param src Buffer for storing the data.
947 *
948 * @return EOK on success or negative error code on failure.
949 */
950static int read_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt)
951{
952 assert(devcon);
953
954 async_exch_t *exch = async_exchange_begin(devcon->sess);
955 int rc = async_req_3_0(exch, BD_READ_BLOCKS, LOWER32(ba),
956 UPPER32(ba), cnt);
957 async_exchange_end(exch);
958
959 if (rc != EOK) {
960 printf("Error %d reading %zu blocks starting at block %" PRIuOFF64
961 " from device handle %" PRIun "\n", rc, cnt, ba,
962 devcon->service_id);
963#ifndef NDEBUG
964 stacktrace_print();
965#endif
966 }
967
968 return rc;
969}
970
971/** Write block to block device.
972 *
973 * @param devcon Device connection.
974 * @param ba Address of first block.
975 * @param cnt Number of blocks.
976 * @param src Buffer containing the data to write.
977 *
978 * @return EOK on success or negative error code on failure.
979 */
980static int write_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt)
981{
982 assert(devcon);
983
984 async_exch_t *exch = async_exchange_begin(devcon->sess);
985 int rc = async_req_3_0(exch, BD_WRITE_BLOCKS, LOWER32(ba),
986 UPPER32(ba), cnt);
987 async_exchange_end(exch);
988
989 if (rc != EOK) {
990 printf("Error %d writing %zu blocks starting at block %" PRIuOFF64
991 " to device handle %" PRIun "\n", rc, cnt, ba, devcon->service_id);
992#ifndef NDEBUG
993 stacktrace_print();
994#endif
995 }
996
997 return rc;
998}
999
1000/** Get block size used by the device. */
1001static int get_block_size(async_sess_t *sess, size_t *bsize)
1002{
1003 sysarg_t bs;
1004
1005 async_exch_t *exch = async_exchange_begin(sess);
1006 int rc = async_req_0_1(exch, BD_GET_BLOCK_SIZE, &bs);
1007 async_exchange_end(exch);
1008
1009 if (rc == EOK)
1010 *bsize = (size_t) bs;
1011
1012 return rc;
1013}
1014
1015/** Get total number of blocks on block device. */
1016static int get_num_blocks(async_sess_t *sess, aoff64_t *nblocks)
1017{
1018 sysarg_t nb_l;
1019 sysarg_t nb_h;
1020
1021 async_exch_t *exch = async_exchange_begin(sess);
1022 int rc = async_req_0_2(exch, BD_GET_NUM_BLOCKS, &nb_l, &nb_h);
1023 async_exchange_end(exch);
1024
1025 if (rc == EOK)
1026 *nblocks = (aoff64_t) MERGE_LOUP32(nb_l, nb_h);
1027
1028 return rc;
1029}
1030
1031/** Convert logical block address to physical block address. */
1032static aoff64_t ba_ltop(devcon_t *devcon, aoff64_t lba)
1033{
1034 assert(devcon->cache != NULL);
1035 return lba * devcon->cache->blocks_cluster;
1036}
1037
1038/** @}
1039 */
Note: See TracBrowser for help on using the repository browser.