source: mainline/uspace/lib/block/libblock.c@ d7f6248

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since d7f6248 was 7a72ce1a, checked in by Martin Decky <martin@…>, 14 years ago

fix argument name

  • Property mode set to 100644
File size: 25.0 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2008 Martin Decky
4 * Copyright (c) 2011 Martin Sucha
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libblock
32 * @{
33 */
34/**
35 * @file
36 * @brief
37 */
38
39#include "libblock.h"
40#include "../../srv/vfs/vfs.h"
41#include <ipc/loc.h>
42#include <ipc/bd.h>
43#include <ipc/services.h>
44#include <errno.h>
45#include <sys/mman.h>
46#include <async.h>
47#include <as.h>
48#include <assert.h>
49#include <fibril_synch.h>
50#include <adt/list.h>
51#include <adt/hash_table.h>
52#include <macros.h>
53#include <mem.h>
54#include <malloc.h>
55#include <stdio.h>
56#include <sys/typefmt.h>
57#include <stacktrace.h>
58
59/** Lock protecting the device connection list */
60static FIBRIL_MUTEX_INITIALIZE(dcl_lock);
61/** Device connection list head. */
62static LIST_INITIALIZE(dcl);
63
64#define CACHE_BUCKETS_LOG2 10
65#define CACHE_BUCKETS (1 << CACHE_BUCKETS_LOG2)
66
67typedef struct {
68 fibril_mutex_t lock;
69 size_t lblock_size; /**< Logical block size. */
70 unsigned blocks_cluster; /**< Physical blocks per block_t */
71 unsigned block_count; /**< Total number of blocks. */
72 unsigned blocks_cached; /**< Number of cached blocks. */
73 hash_table_t block_hash;
74 list_t free_list;
75 enum cache_mode mode;
76} cache_t;
77
78typedef struct {
79 link_t link;
80 service_id_t service_id;
81 async_sess_t *sess;
82 fibril_mutex_t comm_area_lock;
83 void *comm_area;
84 size_t comm_size;
85 void *bb_buf;
86 aoff64_t bb_addr;
87 size_t pblock_size; /**< Physical block size. */
88 cache_t *cache;
89} devcon_t;
90
91static int read_blocks(devcon_t *, aoff64_t, size_t);
92static int write_blocks(devcon_t *, aoff64_t, size_t);
93static int get_block_size(async_sess_t *, size_t *);
94static int get_num_blocks(async_sess_t *, aoff64_t *);
95static int read_toc(async_sess_t *, uint8_t);
96static aoff64_t ba_ltop(devcon_t *, aoff64_t);
97
98static devcon_t *devcon_search(service_id_t service_id)
99{
100 fibril_mutex_lock(&dcl_lock);
101
102 list_foreach(dcl, cur) {
103 devcon_t *devcon = list_get_instance(cur, devcon_t, link);
104 if (devcon->service_id == service_id) {
105 fibril_mutex_unlock(&dcl_lock);
106 return devcon;
107 }
108 }
109
110 fibril_mutex_unlock(&dcl_lock);
111 return NULL;
112}
113
114static int devcon_add(service_id_t service_id, async_sess_t *sess,
115 size_t bsize, void *comm_area, size_t comm_size)
116{
117 devcon_t *devcon;
118
119 if (comm_size < bsize)
120 return EINVAL;
121
122 devcon = malloc(sizeof(devcon_t));
123 if (!devcon)
124 return ENOMEM;
125
126 link_initialize(&devcon->link);
127 devcon->service_id = service_id;
128 devcon->sess = sess;
129 fibril_mutex_initialize(&devcon->comm_area_lock);
130 devcon->comm_area = comm_area;
131 devcon->comm_size = comm_size;
132 devcon->bb_buf = NULL;
133 devcon->bb_addr = 0;
134 devcon->pblock_size = bsize;
135 devcon->cache = NULL;
136
137 fibril_mutex_lock(&dcl_lock);
138 list_foreach(dcl, cur) {
139 devcon_t *d = list_get_instance(cur, devcon_t, link);
140 if (d->service_id == service_id) {
141 fibril_mutex_unlock(&dcl_lock);
142 free(devcon);
143 return EEXIST;
144 }
145 }
146 list_append(&devcon->link, &dcl);
147 fibril_mutex_unlock(&dcl_lock);
148 return EOK;
149}
150
151static void devcon_remove(devcon_t *devcon)
152{
153 fibril_mutex_lock(&dcl_lock);
154 list_remove(&devcon->link);
155 fibril_mutex_unlock(&dcl_lock);
156}
157
158int block_init(exch_mgmt_t mgmt, service_id_t service_id,
159 size_t comm_size)
160{
161 void *comm_area = mmap(NULL, comm_size, PROTO_READ | PROTO_WRITE,
162 MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
163 if (!comm_area)
164 return ENOMEM;
165
166 async_sess_t *sess = loc_service_connect(mgmt, service_id,
167 IPC_FLAG_BLOCKING);
168 if (!sess) {
169 munmap(comm_area, comm_size);
170 return ENOENT;
171 }
172
173 async_exch_t *exch = async_exchange_begin(sess);
174 int rc = async_share_out_start(exch, comm_area,
175 AS_AREA_READ | AS_AREA_WRITE);
176 async_exchange_end(exch);
177
178 if (rc != EOK) {
179 munmap(comm_area, comm_size);
180 async_hangup(sess);
181 return rc;
182 }
183
184 size_t bsize;
185 rc = get_block_size(sess, &bsize);
186
187 if (rc != EOK) {
188 munmap(comm_area, comm_size);
189 async_hangup(sess);
190 return rc;
191 }
192
193 rc = devcon_add(service_id, sess, bsize, comm_area, comm_size);
194 if (rc != EOK) {
195 munmap(comm_area, comm_size);
196 async_hangup(sess);
197 return rc;
198 }
199
200 return EOK;
201}
202
203void block_fini(service_id_t service_id)
204{
205 devcon_t *devcon = devcon_search(service_id);
206 assert(devcon);
207
208 if (devcon->cache)
209 (void) block_cache_fini(service_id);
210
211 devcon_remove(devcon);
212
213 if (devcon->bb_buf)
214 free(devcon->bb_buf);
215
216 munmap(devcon->comm_area, devcon->comm_size);
217 async_hangup(devcon->sess);
218
219 free(devcon);
220}
221
222int block_bb_read(service_id_t service_id, aoff64_t ba)
223{
224 void *bb_buf;
225 int rc;
226
227 devcon_t *devcon = devcon_search(service_id);
228 if (!devcon)
229 return ENOENT;
230 if (devcon->bb_buf)
231 return EEXIST;
232 bb_buf = malloc(devcon->pblock_size);
233 if (!bb_buf)
234 return ENOMEM;
235
236 fibril_mutex_lock(&devcon->comm_area_lock);
237 rc = read_blocks(devcon, 0, 1);
238 if (rc != EOK) {
239 fibril_mutex_unlock(&devcon->comm_area_lock);
240 free(bb_buf);
241 return rc;
242 }
243 memcpy(bb_buf, devcon->comm_area, devcon->pblock_size);
244 fibril_mutex_unlock(&devcon->comm_area_lock);
245
246 devcon->bb_buf = bb_buf;
247 devcon->bb_addr = ba;
248
249 return EOK;
250}
251
252void *block_bb_get(service_id_t service_id)
253{
254 devcon_t *devcon = devcon_search(service_id);
255 assert(devcon);
256 return devcon->bb_buf;
257}
258
259static hash_index_t cache_hash(unsigned long *key)
260{
261 return MERGE_LOUP32(key[0], key[1]) & (CACHE_BUCKETS - 1);
262}
263
264static int cache_compare(unsigned long *key, hash_count_t keys, link_t *item)
265{
266 block_t *b = hash_table_get_instance(item, block_t, hash_link);
267 return b->lba == MERGE_LOUP32(key[0], key[1]);
268}
269
270static void cache_remove_callback(link_t *item)
271{
272}
273
274static hash_table_operations_t cache_ops = {
275 .hash = cache_hash,
276 .compare = cache_compare,
277 .remove_callback = cache_remove_callback
278};
279
280int block_cache_init(service_id_t service_id, size_t size, unsigned blocks,
281 enum cache_mode mode)
282{
283 devcon_t *devcon = devcon_search(service_id);
284 cache_t *cache;
285 if (!devcon)
286 return ENOENT;
287 if (devcon->cache)
288 return EEXIST;
289 cache = malloc(sizeof(cache_t));
290 if (!cache)
291 return ENOMEM;
292
293 fibril_mutex_initialize(&cache->lock);
294 list_initialize(&cache->free_list);
295 cache->lblock_size = size;
296 cache->block_count = blocks;
297 cache->blocks_cached = 0;
298 cache->mode = mode;
299
300 /* Allow 1:1 or small-to-large block size translation */
301 if (cache->lblock_size % devcon->pblock_size != 0) {
302 free(cache);
303 return ENOTSUP;
304 }
305
306 cache->blocks_cluster = cache->lblock_size / devcon->pblock_size;
307
308 if (!hash_table_create(&cache->block_hash, CACHE_BUCKETS, 2,
309 &cache_ops)) {
310 free(cache);
311 return ENOMEM;
312 }
313
314 devcon->cache = cache;
315 return EOK;
316}
317
318int block_cache_fini(service_id_t service_id)
319{
320 devcon_t *devcon = devcon_search(service_id);
321 cache_t *cache;
322 int rc;
323
324 if (!devcon)
325 return ENOENT;
326 if (!devcon->cache)
327 return EOK;
328 cache = devcon->cache;
329
330 /*
331 * We are expecting to find all blocks for this device handle on the
332 * free list, i.e. the block reference count should be zero. Do not
333 * bother with the cache and block locks because we are single-threaded.
334 */
335 while (!list_empty(&cache->free_list)) {
336 block_t *b = list_get_instance(list_first(&cache->free_list),
337 block_t, free_link);
338
339 list_remove(&b->free_link);
340 if (b->dirty) {
341 memcpy(devcon->comm_area, b->data, b->size);
342 rc = write_blocks(devcon, b->pba, cache->blocks_cluster);
343 if (rc != EOK)
344 return rc;
345 }
346
347 unsigned long key[2] = {
348 LOWER32(b->lba),
349 UPPER32(b->lba)
350 };
351 hash_table_remove(&cache->block_hash, key, 2);
352
353 free(b->data);
354 free(b);
355 }
356
357 hash_table_destroy(&cache->block_hash);
358 devcon->cache = NULL;
359 free(cache);
360
361 return EOK;
362}
363
364#define CACHE_LO_WATERMARK 10
365#define CACHE_HI_WATERMARK 20
366static bool cache_can_grow(cache_t *cache)
367{
368 if (cache->blocks_cached < CACHE_LO_WATERMARK)
369 return true;
370 if (!list_empty(&cache->free_list))
371 return false;
372 return true;
373}
374
375static void block_initialize(block_t *b)
376{
377 fibril_mutex_initialize(&b->lock);
378 b->refcnt = 1;
379 b->dirty = false;
380 b->toxic = false;
381 fibril_rwlock_initialize(&b->contents_lock);
382 link_initialize(&b->free_link);
383 link_initialize(&b->hash_link);
384}
385
386/** Instantiate a block in memory and get a reference to it.
387 *
388 * @param block Pointer to where the function will store the
389 * block pointer on success.
390 * @param service_id Service ID of the block device.
391 * @param ba Block address (logical).
392 * @param flags If BLOCK_FLAGS_NOREAD is specified, block_get()
393 * will not read the contents of the block from the
394 * device.
395 *
396 * @return EOK on success or a negative error code.
397 */
398int block_get(block_t **block, service_id_t service_id, aoff64_t ba, int flags)
399{
400 devcon_t *devcon;
401 cache_t *cache;
402 block_t *b;
403 link_t *l;
404 unsigned long key[2] = {
405 LOWER32(ba),
406 UPPER32(ba)
407 };
408
409 int rc;
410
411 devcon = devcon_search(service_id);
412
413 assert(devcon);
414 assert(devcon->cache);
415
416 cache = devcon->cache;
417
418retry:
419 rc = EOK;
420 b = NULL;
421
422 fibril_mutex_lock(&cache->lock);
423 l = hash_table_find(&cache->block_hash, key);
424 if (l) {
425found:
426 /*
427 * We found the block in the cache.
428 */
429 b = hash_table_get_instance(l, block_t, hash_link);
430 fibril_mutex_lock(&b->lock);
431 if (b->refcnt++ == 0)
432 list_remove(&b->free_link);
433 if (b->toxic)
434 rc = EIO;
435 fibril_mutex_unlock(&b->lock);
436 fibril_mutex_unlock(&cache->lock);
437 } else {
438 /*
439 * The block was not found in the cache.
440 */
441 if (cache_can_grow(cache)) {
442 /*
443 * We can grow the cache by allocating new blocks.
444 * Should the allocation fail, we fail over and try to
445 * recycle a block from the cache.
446 */
447 b = malloc(sizeof(block_t));
448 if (!b)
449 goto recycle;
450 b->data = malloc(cache->lblock_size);
451 if (!b->data) {
452 free(b);
453 b = NULL;
454 goto recycle;
455 }
456 cache->blocks_cached++;
457 } else {
458 /*
459 * Try to recycle a block from the free list.
460 */
461recycle:
462 if (list_empty(&cache->free_list)) {
463 fibril_mutex_unlock(&cache->lock);
464 rc = ENOMEM;
465 goto out;
466 }
467 l = list_first(&cache->free_list);
468 b = list_get_instance(l, block_t, free_link);
469
470 fibril_mutex_lock(&b->lock);
471 if (b->dirty) {
472 /*
473 * The block needs to be written back to the
474 * device before it changes identity. Do this
475 * while not holding the cache lock so that
476 * concurrency is not impeded. Also move the
477 * block to the end of the free list so that we
478 * do not slow down other instances of
479 * block_get() draining the free list.
480 */
481 list_remove(&b->free_link);
482 list_append(&b->free_link, &cache->free_list);
483 fibril_mutex_unlock(&cache->lock);
484 fibril_mutex_lock(&devcon->comm_area_lock);
485 memcpy(devcon->comm_area, b->data, b->size);
486 rc = write_blocks(devcon, b->pba,
487 cache->blocks_cluster);
488 fibril_mutex_unlock(&devcon->comm_area_lock);
489 if (rc != EOK) {
490 /*
491 * We did not manage to write the block
492 * to the device. Keep it around for
493 * another try. Hopefully, we will grab
494 * another block next time.
495 */
496 fibril_mutex_unlock(&b->lock);
497 goto retry;
498 }
499 b->dirty = false;
500 if (!fibril_mutex_trylock(&cache->lock)) {
501 /*
502 * Somebody is probably racing with us.
503 * Unlock the block and retry.
504 */
505 fibril_mutex_unlock(&b->lock);
506 goto retry;
507 }
508 l = hash_table_find(&cache->block_hash, key);
509 if (l) {
510 /*
511 * Someone else must have already
512 * instantiated the block while we were
513 * not holding the cache lock.
514 * Leave the recycled block on the
515 * freelist and continue as if we
516 * found the block of interest during
517 * the first try.
518 */
519 fibril_mutex_unlock(&b->lock);
520 goto found;
521 }
522
523 }
524 fibril_mutex_unlock(&b->lock);
525
526 /*
527 * Unlink the block from the free list and the hash
528 * table.
529 */
530 list_remove(&b->free_link);
531 unsigned long temp_key[2] = {
532 LOWER32(b->lba),
533 UPPER32(b->lba)
534 };
535 hash_table_remove(&cache->block_hash, temp_key, 2);
536 }
537
538 block_initialize(b);
539 b->service_id = service_id;
540 b->size = cache->lblock_size;
541 b->lba = ba;
542 b->pba = ba_ltop(devcon, b->lba);
543 hash_table_insert(&cache->block_hash, key, &b->hash_link);
544
545 /*
546 * Lock the block before releasing the cache lock. Thus we don't
547 * kill concurrent operations on the cache while doing I/O on
548 * the block.
549 */
550 fibril_mutex_lock(&b->lock);
551 fibril_mutex_unlock(&cache->lock);
552
553 if (!(flags & BLOCK_FLAGS_NOREAD)) {
554 /*
555 * The block contains old or no data. We need to read
556 * the new contents from the device.
557 */
558 fibril_mutex_lock(&devcon->comm_area_lock);
559 rc = read_blocks(devcon, b->pba, cache->blocks_cluster);
560 memcpy(b->data, devcon->comm_area, cache->lblock_size);
561 fibril_mutex_unlock(&devcon->comm_area_lock);
562 if (rc != EOK)
563 b->toxic = true;
564 } else
565 rc = EOK;
566
567 fibril_mutex_unlock(&b->lock);
568 }
569out:
570 if ((rc != EOK) && b) {
571 assert(b->toxic);
572 (void) block_put(b);
573 b = NULL;
574 }
575 *block = b;
576 return rc;
577}
578
579/** Release a reference to a block.
580 *
581 * If the last reference is dropped, the block is put on the free list.
582 *
583 * @param block Block of which a reference is to be released.
584 *
585 * @return EOK on success or a negative error code.
586 */
587int block_put(block_t *block)
588{
589 devcon_t *devcon = devcon_search(block->service_id);
590 cache_t *cache;
591 unsigned blocks_cached;
592 enum cache_mode mode;
593 int rc = EOK;
594
595 assert(devcon);
596 assert(devcon->cache);
597 assert(block->refcnt >= 1);
598
599 cache = devcon->cache;
600
601retry:
602 fibril_mutex_lock(&cache->lock);
603 blocks_cached = cache->blocks_cached;
604 mode = cache->mode;
605 fibril_mutex_unlock(&cache->lock);
606
607 /*
608 * Determine whether to sync the block. Syncing the block is best done
609 * when not holding the cache lock as it does not impede concurrency.
610 * Since the situation may have changed when we unlocked the cache, the
611 * blocks_cached and mode variables are mere hints. We will recheck the
612 * conditions later when the cache lock is held again.
613 */
614 fibril_mutex_lock(&block->lock);
615 if (block->toxic)
616 block->dirty = false; /* will not write back toxic block */
617 if (block->dirty && (block->refcnt == 1) &&
618 (blocks_cached > CACHE_HI_WATERMARK || mode != CACHE_MODE_WB)) {
619 fibril_mutex_lock(&devcon->comm_area_lock);
620 memcpy(devcon->comm_area, block->data, block->size);
621 rc = write_blocks(devcon, block->pba, cache->blocks_cluster);
622 fibril_mutex_unlock(&devcon->comm_area_lock);
623 block->dirty = false;
624 }
625 fibril_mutex_unlock(&block->lock);
626
627 fibril_mutex_lock(&cache->lock);
628 fibril_mutex_lock(&block->lock);
629 if (!--block->refcnt) {
630 /*
631 * Last reference to the block was dropped. Either free the
632 * block or put it on the free list. In case of an I/O error,
633 * free the block.
634 */
635 if ((cache->blocks_cached > CACHE_HI_WATERMARK) ||
636 (rc != EOK)) {
637 /*
638 * Currently there are too many cached blocks or there
639 * was an I/O error when writing the block back to the
640 * device.
641 */
642 if (block->dirty) {
643 /*
644 * We cannot sync the block while holding the
645 * cache lock. Release everything and retry.
646 */
647 block->refcnt++;
648 fibril_mutex_unlock(&block->lock);
649 fibril_mutex_unlock(&cache->lock);
650 goto retry;
651 }
652 /*
653 * Take the block out of the cache and free it.
654 */
655 unsigned long key[2] = {
656 LOWER32(block->lba),
657 UPPER32(block->lba)
658 };
659 hash_table_remove(&cache->block_hash, key, 2);
660 fibril_mutex_unlock(&block->lock);
661 free(block->data);
662 free(block);
663 cache->blocks_cached--;
664 fibril_mutex_unlock(&cache->lock);
665 return rc;
666 }
667 /*
668 * Put the block on the free list.
669 */
670 if (cache->mode != CACHE_MODE_WB && block->dirty) {
671 /*
672 * We cannot sync the block while holding the cache
673 * lock. Release everything and retry.
674 */
675 block->refcnt++;
676 fibril_mutex_unlock(&block->lock);
677 fibril_mutex_unlock(&cache->lock);
678 goto retry;
679 }
680 list_append(&block->free_link, &cache->free_list);
681 }
682 fibril_mutex_unlock(&block->lock);
683 fibril_mutex_unlock(&cache->lock);
684
685 return rc;
686}
687
688/** Read sequential data from a block device.
689 *
690 * @param service_id Service ID of the block device.
691 * @param bufpos Pointer to the first unread valid offset within the
692 * communication buffer.
693 * @param buflen Pointer to the number of unread bytes that are ready in
694 * the communication buffer.
695 * @param pos Device position to be read.
696 * @param dst Destination buffer.
697 * @param size Size of the destination buffer.
698 * @param block_size Block size to be used for the transfer.
699 *
700 * @return EOK on success or a negative return code on failure.
701 */
702int block_seqread(service_id_t service_id, size_t *bufpos, size_t *buflen,
703 aoff64_t *pos, void *dst, size_t size)
704{
705 size_t offset = 0;
706 size_t left = size;
707 size_t block_size;
708 devcon_t *devcon;
709
710 devcon = devcon_search(service_id);
711 assert(devcon);
712 block_size = devcon->pblock_size;
713
714 fibril_mutex_lock(&devcon->comm_area_lock);
715 while (left > 0) {
716 size_t rd;
717
718 if (*bufpos + left < *buflen)
719 rd = left;
720 else
721 rd = *buflen - *bufpos;
722
723 if (rd > 0) {
724 /*
725 * Copy the contents of the communication buffer to the
726 * destination buffer.
727 */
728 memcpy(dst + offset, devcon->comm_area + *bufpos, rd);
729 offset += rd;
730 *bufpos += rd;
731 *pos += rd;
732 left -= rd;
733 }
734
735 if (*bufpos == *buflen) {
736 /* Refill the communication buffer with a new block. */
737 int rc;
738
739 rc = read_blocks(devcon, *pos / block_size, 1);
740 if (rc != EOK) {
741 fibril_mutex_unlock(&devcon->comm_area_lock);
742 return rc;
743 }
744
745 *bufpos = 0;
746 *buflen = block_size;
747 }
748 }
749 fibril_mutex_unlock(&devcon->comm_area_lock);
750
751 return EOK;
752}
753
754/** Read blocks directly from device (bypass cache).
755 *
756 * @param service_id Service ID of the block device.
757 * @param ba Address of first block (physical).
758 * @param cnt Number of blocks.
759 * @param src Buffer for storing the data.
760 *
761 * @return EOK on success or negative error code on failure.
762 */
763int block_read_direct(service_id_t service_id, aoff64_t ba, size_t cnt, void *buf)
764{
765 devcon_t *devcon;
766 int rc;
767
768 devcon = devcon_search(service_id);
769 assert(devcon);
770
771 fibril_mutex_lock(&devcon->comm_area_lock);
772
773 rc = read_blocks(devcon, ba, cnt);
774 if (rc == EOK)
775 memcpy(buf, devcon->comm_area, devcon->pblock_size * cnt);
776
777 fibril_mutex_unlock(&devcon->comm_area_lock);
778
779 return rc;
780}
781
782/** Write blocks directly to device (bypass cache).
783 *
784 * @param service_id Service ID of the block device.
785 * @param ba Address of first block (physical).
786 * @param cnt Number of blocks.
787 * @param src The data to be written.
788 *
789 * @return EOK on success or negative error code on failure.
790 */
791int block_write_direct(service_id_t service_id, aoff64_t ba, size_t cnt,
792 const void *data)
793{
794 devcon_t *devcon;
795 int rc;
796
797 devcon = devcon_search(service_id);
798 assert(devcon);
799
800 fibril_mutex_lock(&devcon->comm_area_lock);
801
802 memcpy(devcon->comm_area, data, devcon->pblock_size * cnt);
803 rc = write_blocks(devcon, ba, cnt);
804
805 fibril_mutex_unlock(&devcon->comm_area_lock);
806
807 return rc;
808}
809
810/** Get device block size.
811 *
812 * @param service_id Service ID of the block device.
813 * @param bsize Output block size.
814 *
815 * @return EOK on success or negative error code on failure.
816 */
817int block_get_bsize(service_id_t service_id, size_t *bsize)
818{
819 devcon_t *devcon;
820
821 devcon = devcon_search(service_id);
822 assert(devcon);
823
824 return get_block_size(devcon->sess, bsize);
825}
826
827/** Get number of blocks on device.
828 *
829 * @param service_id Service ID of the block device.
830 * @param nblocks Output number of blocks.
831 *
832 * @return EOK on success or negative error code on failure.
833 */
834int block_get_nblocks(service_id_t service_id, aoff64_t *nblocks)
835{
836 devcon_t *devcon = devcon_search(service_id);
837 assert(devcon);
838
839 return get_num_blocks(devcon->sess, nblocks);
840}
841
842/** Read bytes directly from the device (bypass cache)
843 *
844 * @param service_id Service ID of the block device.
845 * @param abs_offset Absolute offset in bytes where to start reading
846 * @param bytes Number of bytes to read
847 * @param data Buffer that receives the data
848 *
849 * @return EOK on success or negative error code on failure.
850 */
851int block_read_bytes_direct(service_id_t service_id, aoff64_t abs_offset,
852 size_t bytes, void *data)
853{
854 int rc;
855 size_t phys_block_size;
856 size_t buf_size;
857 void *buffer;
858 aoff64_t first_block;
859 aoff64_t last_block;
860 size_t blocks;
861 size_t offset;
862
863 rc = block_get_bsize(service_id, &phys_block_size);
864 if (rc != EOK) {
865 return rc;
866 }
867
868 /* calculate data position and required space */
869 first_block = abs_offset / phys_block_size;
870 offset = abs_offset % phys_block_size;
871 last_block = (abs_offset + bytes - 1) / phys_block_size;
872 blocks = last_block - first_block + 1;
873 buf_size = blocks * phys_block_size;
874
875 /* read the data into memory */
876 buffer = malloc(buf_size);
877 if (buffer == NULL) {
878 return ENOMEM;
879 }
880
881 rc = block_read_direct(service_id, first_block, blocks, buffer);
882 if (rc != EOK) {
883 free(buffer);
884 return rc;
885 }
886
887 /* copy the data from the buffer */
888 memcpy(data, buffer + offset, bytes);
889 free(buffer);
890
891 return EOK;
892}
893
894/** Get TOC from device.
895 *
896 * @param service_id Service ID of the block device.
897 * @param session Starting session.
898 * @param data Buffer to read TOC into.
899 *
900 * @return EOK on success.
901 * @return Error code on failure.
902 *
903 */
904int block_get_toc(service_id_t service_id, uint8_t session, void *data)
905{
906 devcon_t *devcon = devcon_search(service_id);
907 assert(devcon);
908
909 fibril_mutex_lock(&devcon->comm_area_lock);
910
911 int rc = read_toc(devcon->sess, session);
912 if (rc == EOK)
913 memcpy(data, devcon->comm_area, devcon->pblock_size);
914
915 fibril_mutex_unlock(&devcon->comm_area_lock);
916
917 return rc;
918}
919
920/** Read blocks from block device.
921 *
922 * @param devcon Device connection.
923 * @param ba Address of first block.
924 * @param cnt Number of blocks.
925 * @param src Buffer for storing the data.
926 *
927 * @return EOK on success or negative error code on failure.
928 */
929static int read_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt)
930{
931 assert(devcon);
932
933 async_exch_t *exch = async_exchange_begin(devcon->sess);
934 int rc = async_req_3_0(exch, BD_READ_BLOCKS, LOWER32(ba),
935 UPPER32(ba), cnt);
936 async_exchange_end(exch);
937
938 if (rc != EOK) {
939 printf("Error %d reading %zu blocks starting at block %" PRIuOFF64
940 " from device handle %" PRIun "\n", rc, cnt, ba,
941 devcon->service_id);
942#ifndef NDEBUG
943 stacktrace_print();
944#endif
945 }
946
947 return rc;
948}
949
950/** Write block to block device.
951 *
952 * @param devcon Device connection.
953 * @param ba Address of first block.
954 * @param cnt Number of blocks.
955 * @param src Buffer containing the data to write.
956 *
957 * @return EOK on success or negative error code on failure.
958 */
959static int write_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt)
960{
961 assert(devcon);
962
963 async_exch_t *exch = async_exchange_begin(devcon->sess);
964 int rc = async_req_3_0(exch, BD_WRITE_BLOCKS, LOWER32(ba),
965 UPPER32(ba), cnt);
966 async_exchange_end(exch);
967
968 if (rc != EOK) {
969 printf("Error %d writing %zu blocks starting at block %" PRIuOFF64
970 " to device handle %" PRIun "\n", rc, cnt, ba, devcon->service_id);
971#ifndef NDEBUG
972 stacktrace_print();
973#endif
974 }
975
976 return rc;
977}
978
979/** Get block size used by the device. */
980static int get_block_size(async_sess_t *sess, size_t *bsize)
981{
982 sysarg_t bs;
983
984 async_exch_t *exch = async_exchange_begin(sess);
985 int rc = async_req_0_1(exch, BD_GET_BLOCK_SIZE, &bs);
986 async_exchange_end(exch);
987
988 if (rc == EOK)
989 *bsize = (size_t) bs;
990
991 return rc;
992}
993
994/** Get total number of blocks on block device. */
995static int get_num_blocks(async_sess_t *sess, aoff64_t *nblocks)
996{
997 sysarg_t nb_l;
998 sysarg_t nb_h;
999
1000 async_exch_t *exch = async_exchange_begin(sess);
1001 int rc = async_req_0_2(exch, BD_GET_NUM_BLOCKS, &nb_l, &nb_h);
1002 async_exchange_end(exch);
1003
1004 if (rc == EOK)
1005 *nblocks = (aoff64_t) MERGE_LOUP32(nb_l, nb_h);
1006
1007 return rc;
1008}
1009
1010/** Get TOC from block device. */
1011static int read_toc(async_sess_t *sess, uint8_t session)
1012{
1013 async_exch_t *exch = async_exchange_begin(sess);
1014 int rc = async_req_1_0(exch, BD_READ_TOC, session);
1015 async_exchange_end(exch);
1016
1017 return rc;
1018}
1019
1020/** Convert logical block address to physical block address. */
1021static aoff64_t ba_ltop(devcon_t *devcon, aoff64_t lba)
1022{
1023 assert(devcon->cache != NULL);
1024 return lba * devcon->cache->blocks_cluster;
1025}
1026
1027/** @}
1028 */
Note: See TracBrowser for help on using the repository browser.