source: mainline/uspace/lib/block/block.c@ d579acc

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since d579acc was 2463df9, checked in by Jakub Jermar <jakub@…>, 12 years ago

Remove the temptation to use mmap() and munmap() in native code.

  • Property mode set to 100644
File size: 21.9 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2008 Martin Decky
4 * Copyright (c) 2011 Martin Sucha
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libblock
32 * @{
33 */
34/**
35 * @file
36 * @brief
37 */
38
39#include <ipc/loc.h>
40#include <ipc/services.h>
41#include <errno.h>
42#include <async.h>
43#include <as.h>
44#include <assert.h>
45#include <bd.h>
46#include <fibril_synch.h>
47#include <adt/list.h>
48#include <adt/hash_table.h>
49#include <macros.h>
50#include <mem.h>
51#include <malloc.h>
52#include <stdio.h>
53#include <sys/typefmt.h>
54#include <stacktrace.h>
55#include "block.h"
56
57/** Lock protecting the device connection list */
58static FIBRIL_MUTEX_INITIALIZE(dcl_lock);
59/** Device connection list head. */
60static LIST_INITIALIZE(dcl);
61
62
63typedef struct {
64 fibril_mutex_t lock;
65 size_t lblock_size; /**< Logical block size. */
66 unsigned blocks_cluster; /**< Physical blocks per block_t */
67 unsigned block_count; /**< Total number of blocks. */
68 unsigned blocks_cached; /**< Number of cached blocks. */
69 hash_table_t block_hash;
70 list_t free_list;
71 enum cache_mode mode;
72} cache_t;
73
74typedef struct {
75 link_t link;
76 service_id_t service_id;
77 async_sess_t *sess;
78 bd_t *bd;
79 void *bb_buf;
80 aoff64_t bb_addr;
81 size_t pblock_size; /**< Physical block size. */
82 cache_t *cache;
83} devcon_t;
84
85static int read_blocks(devcon_t *, aoff64_t, size_t, void *, size_t);
86static int write_blocks(devcon_t *, aoff64_t, size_t, void *, size_t);
87static aoff64_t ba_ltop(devcon_t *, aoff64_t);
88
89static devcon_t *devcon_search(service_id_t service_id)
90{
91 fibril_mutex_lock(&dcl_lock);
92
93 list_foreach(dcl, link, devcon_t, devcon) {
94 if (devcon->service_id == service_id) {
95 fibril_mutex_unlock(&dcl_lock);
96 return devcon;
97 }
98 }
99
100 fibril_mutex_unlock(&dcl_lock);
101 return NULL;
102}
103
104static int devcon_add(service_id_t service_id, async_sess_t *sess,
105 size_t bsize, bd_t *bd)
106{
107 devcon_t *devcon;
108
109 devcon = malloc(sizeof(devcon_t));
110 if (!devcon)
111 return ENOMEM;
112
113 link_initialize(&devcon->link);
114 devcon->service_id = service_id;
115 devcon->sess = sess;
116 devcon->bd = bd;
117 devcon->bb_buf = NULL;
118 devcon->bb_addr = 0;
119 devcon->pblock_size = bsize;
120 devcon->cache = NULL;
121
122 fibril_mutex_lock(&dcl_lock);
123 list_foreach(dcl, link, devcon_t, d) {
124 if (d->service_id == service_id) {
125 fibril_mutex_unlock(&dcl_lock);
126 free(devcon);
127 return EEXIST;
128 }
129 }
130 list_append(&devcon->link, &dcl);
131 fibril_mutex_unlock(&dcl_lock);
132 return EOK;
133}
134
135static void devcon_remove(devcon_t *devcon)
136{
137 fibril_mutex_lock(&dcl_lock);
138 list_remove(&devcon->link);
139 fibril_mutex_unlock(&dcl_lock);
140}
141
142int block_init(exch_mgmt_t mgmt, service_id_t service_id,
143 size_t comm_size)
144{
145 bd_t *bd;
146
147 async_sess_t *sess = loc_service_connect(mgmt, service_id,
148 IPC_FLAG_BLOCKING);
149 if (!sess) {
150 return ENOENT;
151 }
152
153 int rc = bd_open(sess, &bd);
154 if (rc != EOK) {
155 async_hangup(sess);
156 return rc;
157 }
158
159 size_t bsize;
160 rc = bd_get_block_size(bd, &bsize);
161 if (rc != EOK) {
162 bd_close(bd);
163 async_hangup(sess);
164 return rc;
165 }
166
167 rc = devcon_add(service_id, sess, bsize, bd);
168 if (rc != EOK) {
169 bd_close(bd);
170 async_hangup(sess);
171 return rc;
172 }
173
174 return EOK;
175}
176
177void block_fini(service_id_t service_id)
178{
179 devcon_t *devcon = devcon_search(service_id);
180 assert(devcon);
181
182 if (devcon->cache)
183 (void) block_cache_fini(service_id);
184
185 devcon_remove(devcon);
186
187 if (devcon->bb_buf)
188 free(devcon->bb_buf);
189
190 bd_close(devcon->bd);
191 async_hangup(devcon->sess);
192
193 free(devcon);
194}
195
196int block_bb_read(service_id_t service_id, aoff64_t ba)
197{
198 void *bb_buf;
199 int rc;
200
201 devcon_t *devcon = devcon_search(service_id);
202 if (!devcon)
203 return ENOENT;
204 if (devcon->bb_buf)
205 return EEXIST;
206 bb_buf = malloc(devcon->pblock_size);
207 if (!bb_buf)
208 return ENOMEM;
209
210 rc = read_blocks(devcon, 0, 1, bb_buf, devcon->pblock_size);
211 if (rc != EOK) {
212 free(bb_buf);
213 return rc;
214 }
215
216 devcon->bb_buf = bb_buf;
217 devcon->bb_addr = ba;
218
219 return EOK;
220}
221
222void *block_bb_get(service_id_t service_id)
223{
224 devcon_t *devcon = devcon_search(service_id);
225 assert(devcon);
226 return devcon->bb_buf;
227}
228
229static size_t cache_key_hash(void *key)
230{
231 aoff64_t *lba = (aoff64_t*)key;
232 return *lba;
233}
234
235static size_t cache_hash(const ht_link_t *item)
236{
237 block_t *b = hash_table_get_inst(item, block_t, hash_link);
238 return b->lba;
239}
240
241static bool cache_key_equal(void *key, const ht_link_t *item)
242{
243 aoff64_t *lba = (aoff64_t*)key;
244 block_t *b = hash_table_get_inst(item, block_t, hash_link);
245 return b->lba == *lba;
246}
247
248
249static hash_table_ops_t cache_ops = {
250 .hash = cache_hash,
251 .key_hash = cache_key_hash,
252 .key_equal = cache_key_equal,
253 .equal = NULL,
254 .remove_callback = NULL
255};
256
257int block_cache_init(service_id_t service_id, size_t size, unsigned blocks,
258 enum cache_mode mode)
259{
260 devcon_t *devcon = devcon_search(service_id);
261 cache_t *cache;
262 if (!devcon)
263 return ENOENT;
264 if (devcon->cache)
265 return EEXIST;
266 cache = malloc(sizeof(cache_t));
267 if (!cache)
268 return ENOMEM;
269
270 fibril_mutex_initialize(&cache->lock);
271 list_initialize(&cache->free_list);
272 cache->lblock_size = size;
273 cache->block_count = blocks;
274 cache->blocks_cached = 0;
275 cache->mode = mode;
276
277 /* Allow 1:1 or small-to-large block size translation */
278 if (cache->lblock_size % devcon->pblock_size != 0) {
279 free(cache);
280 return ENOTSUP;
281 }
282
283 cache->blocks_cluster = cache->lblock_size / devcon->pblock_size;
284
285 if (!hash_table_create(&cache->block_hash, 0, 0, &cache_ops)) {
286 free(cache);
287 return ENOMEM;
288 }
289
290 devcon->cache = cache;
291 return EOK;
292}
293
294int block_cache_fini(service_id_t service_id)
295{
296 devcon_t *devcon = devcon_search(service_id);
297 cache_t *cache;
298 int rc;
299
300 if (!devcon)
301 return ENOENT;
302 if (!devcon->cache)
303 return EOK;
304 cache = devcon->cache;
305
306 /*
307 * We are expecting to find all blocks for this device handle on the
308 * free list, i.e. the block reference count should be zero. Do not
309 * bother with the cache and block locks because we are single-threaded.
310 */
311 while (!list_empty(&cache->free_list)) {
312 block_t *b = list_get_instance(list_first(&cache->free_list),
313 block_t, free_link);
314
315 list_remove(&b->free_link);
316 if (b->dirty) {
317 rc = write_blocks(devcon, b->pba, cache->blocks_cluster,
318 b->data, b->size);
319 if (rc != EOK)
320 return rc;
321 }
322
323 hash_table_remove_item(&cache->block_hash, &b->hash_link);
324
325 free(b->data);
326 free(b);
327 }
328
329 hash_table_destroy(&cache->block_hash);
330 devcon->cache = NULL;
331 free(cache);
332
333 return EOK;
334}
335
336#define CACHE_LO_WATERMARK 10
337#define CACHE_HI_WATERMARK 20
338static bool cache_can_grow(cache_t *cache)
339{
340 if (cache->blocks_cached < CACHE_LO_WATERMARK)
341 return true;
342 if (!list_empty(&cache->free_list))
343 return false;
344 return true;
345}
346
347static void block_initialize(block_t *b)
348{
349 fibril_mutex_initialize(&b->lock);
350 b->refcnt = 1;
351 b->dirty = false;
352 b->toxic = false;
353 fibril_rwlock_initialize(&b->contents_lock);
354 link_initialize(&b->free_link);
355}
356
357/** Instantiate a block in memory and get a reference to it.
358 *
359 * @param block Pointer to where the function will store the
360 * block pointer on success.
361 * @param service_id Service ID of the block device.
362 * @param ba Block address (logical).
363 * @param flags If BLOCK_FLAGS_NOREAD is specified, block_get()
364 * will not read the contents of the block from the
365 * device.
366 *
367 * @return EOK on success or a negative error code.
368 */
369int block_get(block_t **block, service_id_t service_id, aoff64_t ba, int flags)
370{
371 devcon_t *devcon;
372 cache_t *cache;
373 block_t *b;
374 link_t *link;
375
376 int rc;
377
378 devcon = devcon_search(service_id);
379
380 assert(devcon);
381 assert(devcon->cache);
382
383 cache = devcon->cache;
384
385retry:
386 rc = EOK;
387 b = NULL;
388
389 fibril_mutex_lock(&cache->lock);
390 ht_link_t *hlink = hash_table_find(&cache->block_hash, &ba);
391 if (hlink) {
392found:
393 /*
394 * We found the block in the cache.
395 */
396 b = hash_table_get_inst(hlink, block_t, hash_link);
397 fibril_mutex_lock(&b->lock);
398 if (b->refcnt++ == 0)
399 list_remove(&b->free_link);
400 if (b->toxic)
401 rc = EIO;
402 fibril_mutex_unlock(&b->lock);
403 fibril_mutex_unlock(&cache->lock);
404 } else {
405 /*
406 * The block was not found in the cache.
407 */
408 if (cache_can_grow(cache)) {
409 /*
410 * We can grow the cache by allocating new blocks.
411 * Should the allocation fail, we fail over and try to
412 * recycle a block from the cache.
413 */
414 b = malloc(sizeof(block_t));
415 if (!b)
416 goto recycle;
417 b->data = malloc(cache->lblock_size);
418 if (!b->data) {
419 free(b);
420 b = NULL;
421 goto recycle;
422 }
423 cache->blocks_cached++;
424 } else {
425 /*
426 * Try to recycle a block from the free list.
427 */
428recycle:
429 if (list_empty(&cache->free_list)) {
430 fibril_mutex_unlock(&cache->lock);
431 rc = ENOMEM;
432 goto out;
433 }
434 link = list_first(&cache->free_list);
435 b = list_get_instance(link, block_t, free_link);
436
437 fibril_mutex_lock(&b->lock);
438 if (b->dirty) {
439 /*
440 * The block needs to be written back to the
441 * device before it changes identity. Do this
442 * while not holding the cache lock so that
443 * concurrency is not impeded. Also move the
444 * block to the end of the free list so that we
445 * do not slow down other instances of
446 * block_get() draining the free list.
447 */
448 list_remove(&b->free_link);
449 list_append(&b->free_link, &cache->free_list);
450 fibril_mutex_unlock(&cache->lock);
451 rc = write_blocks(devcon, b->pba,
452 cache->blocks_cluster, b->data, b->size);
453 if (rc != EOK) {
454 /*
455 * We did not manage to write the block
456 * to the device. Keep it around for
457 * another try. Hopefully, we will grab
458 * another block next time.
459 */
460 fibril_mutex_unlock(&b->lock);
461 goto retry;
462 }
463 b->dirty = false;
464 if (!fibril_mutex_trylock(&cache->lock)) {
465 /*
466 * Somebody is probably racing with us.
467 * Unlock the block and retry.
468 */
469 fibril_mutex_unlock(&b->lock);
470 goto retry;
471 }
472 hlink = hash_table_find(&cache->block_hash, &ba);
473 if (hlink) {
474 /*
475 * Someone else must have already
476 * instantiated the block while we were
477 * not holding the cache lock.
478 * Leave the recycled block on the
479 * freelist and continue as if we
480 * found the block of interest during
481 * the first try.
482 */
483 fibril_mutex_unlock(&b->lock);
484 goto found;
485 }
486
487 }
488 fibril_mutex_unlock(&b->lock);
489
490 /*
491 * Unlink the block from the free list and the hash
492 * table.
493 */
494 list_remove(&b->free_link);
495 hash_table_remove_item(&cache->block_hash, &b->hash_link);
496 }
497
498 block_initialize(b);
499 b->service_id = service_id;
500 b->size = cache->lblock_size;
501 b->lba = ba;
502 b->pba = ba_ltop(devcon, b->lba);
503 hash_table_insert(&cache->block_hash, &b->hash_link);
504
505 /*
506 * Lock the block before releasing the cache lock. Thus we don't
507 * kill concurrent operations on the cache while doing I/O on
508 * the block.
509 */
510 fibril_mutex_lock(&b->lock);
511 fibril_mutex_unlock(&cache->lock);
512
513 if (!(flags & BLOCK_FLAGS_NOREAD)) {
514 /*
515 * The block contains old or no data. We need to read
516 * the new contents from the device.
517 */
518 rc = read_blocks(devcon, b->pba, cache->blocks_cluster,
519 b->data, cache->lblock_size);
520 if (rc != EOK)
521 b->toxic = true;
522 } else
523 rc = EOK;
524
525 fibril_mutex_unlock(&b->lock);
526 }
527out:
528 if ((rc != EOK) && b) {
529 assert(b->toxic);
530 (void) block_put(b);
531 b = NULL;
532 }
533 *block = b;
534 return rc;
535}
536
537/** Release a reference to a block.
538 *
539 * If the last reference is dropped, the block is put on the free list.
540 *
541 * @param block Block of which a reference is to be released.
542 *
543 * @return EOK on success or a negative error code.
544 */
545int block_put(block_t *block)
546{
547 devcon_t *devcon = devcon_search(block->service_id);
548 cache_t *cache;
549 unsigned blocks_cached;
550 enum cache_mode mode;
551 int rc = EOK;
552
553 assert(devcon);
554 assert(devcon->cache);
555 assert(block->refcnt >= 1);
556
557 cache = devcon->cache;
558
559retry:
560 fibril_mutex_lock(&cache->lock);
561 blocks_cached = cache->blocks_cached;
562 mode = cache->mode;
563 fibril_mutex_unlock(&cache->lock);
564
565 /*
566 * Determine whether to sync the block. Syncing the block is best done
567 * when not holding the cache lock as it does not impede concurrency.
568 * Since the situation may have changed when we unlocked the cache, the
569 * blocks_cached and mode variables are mere hints. We will recheck the
570 * conditions later when the cache lock is held again.
571 */
572 fibril_mutex_lock(&block->lock);
573 if (block->toxic)
574 block->dirty = false; /* will not write back toxic block */
575 if (block->dirty && (block->refcnt == 1) &&
576 (blocks_cached > CACHE_HI_WATERMARK || mode != CACHE_MODE_WB)) {
577 rc = write_blocks(devcon, block->pba, cache->blocks_cluster,
578 block->data, block->size);
579 block->dirty = false;
580 }
581 fibril_mutex_unlock(&block->lock);
582
583 fibril_mutex_lock(&cache->lock);
584 fibril_mutex_lock(&block->lock);
585 if (!--block->refcnt) {
586 /*
587 * Last reference to the block was dropped. Either free the
588 * block or put it on the free list. In case of an I/O error,
589 * free the block.
590 */
591 if ((cache->blocks_cached > CACHE_HI_WATERMARK) ||
592 (rc != EOK)) {
593 /*
594 * Currently there are too many cached blocks or there
595 * was an I/O error when writing the block back to the
596 * device.
597 */
598 if (block->dirty) {
599 /*
600 * We cannot sync the block while holding the
601 * cache lock. Release everything and retry.
602 */
603 block->refcnt++;
604 fibril_mutex_unlock(&block->lock);
605 fibril_mutex_unlock(&cache->lock);
606 goto retry;
607 }
608 /*
609 * Take the block out of the cache and free it.
610 */
611 hash_table_remove_item(&cache->block_hash, &block->hash_link);
612 fibril_mutex_unlock(&block->lock);
613 free(block->data);
614 free(block);
615 cache->blocks_cached--;
616 fibril_mutex_unlock(&cache->lock);
617 return rc;
618 }
619 /*
620 * Put the block on the free list.
621 */
622 if (cache->mode != CACHE_MODE_WB && block->dirty) {
623 /*
624 * We cannot sync the block while holding the cache
625 * lock. Release everything and retry.
626 */
627 block->refcnt++;
628 fibril_mutex_unlock(&block->lock);
629 fibril_mutex_unlock(&cache->lock);
630 goto retry;
631 }
632 list_append(&block->free_link, &cache->free_list);
633 }
634 fibril_mutex_unlock(&block->lock);
635 fibril_mutex_unlock(&cache->lock);
636
637 return rc;
638}
639
640/** Read sequential data from a block device.
641 *
642 * @param service_id Service ID of the block device.
643 * @param buf Buffer for holding one block
644 * @param bufpos Pointer to the first unread valid offset within the
645 * communication buffer.
646 * @param buflen Pointer to the number of unread bytes that are ready in
647 * the communication buffer.
648 * @param pos Device position to be read.
649 * @param dst Destination buffer.
650 * @param size Size of the destination buffer.
651 * @param block_size Block size to be used for the transfer.
652 *
653 * @return EOK on success or a negative return code on failure.
654 */
655int block_seqread(service_id_t service_id, void *buf, size_t *bufpos,
656 size_t *buflen, aoff64_t *pos, void *dst, size_t size)
657{
658 size_t offset = 0;
659 size_t left = size;
660 size_t block_size;
661 devcon_t *devcon;
662
663 devcon = devcon_search(service_id);
664 assert(devcon);
665 block_size = devcon->pblock_size;
666
667 while (left > 0) {
668 size_t rd;
669
670 if (*bufpos + left < *buflen)
671 rd = left;
672 else
673 rd = *buflen - *bufpos;
674
675 if (rd > 0) {
676 /*
677 * Copy the contents of the communication buffer to the
678 * destination buffer.
679 */
680 memcpy(dst + offset, buf + *bufpos, rd);
681 offset += rd;
682 *bufpos += rd;
683 *pos += rd;
684 left -= rd;
685 }
686
687 if (*bufpos == *buflen) {
688 /* Refill the communication buffer with a new block. */
689 int rc;
690
691 rc = read_blocks(devcon, *pos / block_size, 1, buf,
692 devcon->pblock_size);
693 if (rc != EOK) {
694 return rc;
695 }
696
697 *bufpos = 0;
698 *buflen = block_size;
699 }
700 }
701
702 return EOK;
703}
704
705/** Read blocks directly from device (bypass cache).
706 *
707 * @param service_id Service ID of the block device.
708 * @param ba Address of first block (physical).
709 * @param cnt Number of blocks.
710 * @param src Buffer for storing the data.
711 *
712 * @return EOK on success or negative error code on failure.
713 */
714int block_read_direct(service_id_t service_id, aoff64_t ba, size_t cnt, void *buf)
715{
716 devcon_t *devcon;
717
718 devcon = devcon_search(service_id);
719 assert(devcon);
720
721 return read_blocks(devcon, ba, cnt, buf, devcon->pblock_size * cnt);
722}
723
724/** Write blocks directly to device (bypass cache).
725 *
726 * @param service_id Service ID of the block device.
727 * @param ba Address of first block (physical).
728 * @param cnt Number of blocks.
729 * @param src The data to be written.
730 *
731 * @return EOK on success or negative error code on failure.
732 */
733int block_write_direct(service_id_t service_id, aoff64_t ba, size_t cnt,
734 const void *data)
735{
736 devcon_t *devcon;
737
738 devcon = devcon_search(service_id);
739 assert(devcon);
740
741 return write_blocks(devcon, ba, cnt, (void *)data, devcon->pblock_size * cnt);
742}
743
744/** Get device block size.
745 *
746 * @param service_id Service ID of the block device.
747 * @param bsize Output block size.
748 *
749 * @return EOK on success or negative error code on failure.
750 */
751int block_get_bsize(service_id_t service_id, size_t *bsize)
752{
753 devcon_t *devcon;
754
755 devcon = devcon_search(service_id);
756 assert(devcon);
757
758 return bd_get_block_size(devcon->bd, bsize);
759}
760
761/** Get number of blocks on device.
762 *
763 * @param service_id Service ID of the block device.
764 * @param nblocks Output number of blocks.
765 *
766 * @return EOK on success or negative error code on failure.
767 */
768int block_get_nblocks(service_id_t service_id, aoff64_t *nblocks)
769{
770 devcon_t *devcon = devcon_search(service_id);
771 assert(devcon);
772
773 return bd_get_num_blocks(devcon->bd, nblocks);
774}
775
776/** Read bytes directly from the device (bypass cache)
777 *
778 * @param service_id Service ID of the block device.
779 * @param abs_offset Absolute offset in bytes where to start reading
780 * @param bytes Number of bytes to read
781 * @param data Buffer that receives the data
782 *
783 * @return EOK on success or negative error code on failure.
784 */
785int block_read_bytes_direct(service_id_t service_id, aoff64_t abs_offset,
786 size_t bytes, void *data)
787{
788 int rc;
789 size_t phys_block_size;
790 size_t buf_size;
791 void *buffer;
792 aoff64_t first_block;
793 aoff64_t last_block;
794 size_t blocks;
795 size_t offset;
796
797 rc = block_get_bsize(service_id, &phys_block_size);
798 if (rc != EOK) {
799 return rc;
800 }
801
802 /* calculate data position and required space */
803 first_block = abs_offset / phys_block_size;
804 offset = abs_offset % phys_block_size;
805 last_block = (abs_offset + bytes - 1) / phys_block_size;
806 blocks = last_block - first_block + 1;
807 buf_size = blocks * phys_block_size;
808
809 /* read the data into memory */
810 buffer = malloc(buf_size);
811 if (buffer == NULL) {
812 return ENOMEM;
813 }
814
815 rc = block_read_direct(service_id, first_block, blocks, buffer);
816 if (rc != EOK) {
817 free(buffer);
818 return rc;
819 }
820
821 /* copy the data from the buffer */
822 memcpy(data, buffer + offset, bytes);
823 free(buffer);
824
825 return EOK;
826}
827
828/** Get TOC from device.
829 *
830 * @param service_id Service ID of the block device.
831 * @param session Starting session.
832 *
833 * @return Allocated TOC structure.
834 * @return NULL on failure.
835 *
836 */
837toc_block_t *block_get_toc(service_id_t service_id, uint8_t session)
838{
839 devcon_t *devcon = devcon_search(service_id);
840 toc_block_t *toc = NULL;
841 int rc;
842
843 assert(devcon);
844
845 toc = (toc_block_t *) malloc(sizeof(toc_block_t));
846 if (toc == NULL)
847 return NULL;
848
849 rc = bd_read_toc(devcon->bd, session, toc, sizeof(toc_block_t));
850 if (rc != EOK) {
851 free(toc);
852 return NULL;
853 }
854
855 return toc;
856}
857
858/** Read blocks from block device.
859 *
860 * @param devcon Device connection.
861 * @param ba Address of first block.
862 * @param cnt Number of blocks.
863 * @param src Buffer for storing the data.
864 *
865 * @return EOK on success or negative error code on failure.
866 */
867static int read_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt, void *buf,
868 size_t size)
869{
870 assert(devcon);
871
872 int rc = bd_read_blocks(devcon->bd, ba, cnt, buf, size);
873 if (rc != EOK) {
874 printf("Error %d reading %zu blocks starting at block %" PRIuOFF64
875 " from device handle %" PRIun "\n", rc, cnt, ba,
876 devcon->service_id);
877#ifndef NDEBUG
878 stacktrace_print();
879#endif
880 }
881
882 return rc;
883}
884
885/** Write block to block device.
886 *
887 * @param devcon Device connection.
888 * @param ba Address of first block.
889 * @param cnt Number of blocks.
890 * @param src Buffer containing the data to write.
891 *
892 * @return EOK on success or negative error code on failure.
893 */
894static int write_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt, void *data,
895 size_t size)
896{
897 assert(devcon);
898
899 int rc = bd_write_blocks(devcon->bd, ba, cnt, data, size);
900 if (rc != EOK) {
901 printf("Error %d writing %zu blocks starting at block %" PRIuOFF64
902 " to device handle %" PRIun "\n", rc, cnt, ba, devcon->service_id);
903#ifndef NDEBUG
904 stacktrace_print();
905#endif
906 }
907
908 return rc;
909}
910
911/** Convert logical block address to physical block address. */
912static aoff64_t ba_ltop(devcon_t *devcon, aoff64_t lba)
913{
914 assert(devcon->cache != NULL);
915 return lba * devcon->cache->blocks_cluster;
916}
917
918/** @}
919 */
Note: See TracBrowser for help on using the repository browser.