source: mainline/uspace/lib/block/block.c@ 064e0fd

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 064e0fd was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 23.5 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2008 Martin Decky
4 * Copyright (c) 2011 Martin Sucha
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libblock
32 * @{
33 */
34/**
35 * @file
36 * @brief
37 */
38
39#include <ipc/loc.h>
40#include <ipc/services.h>
41#include <errno.h>
42#include <async.h>
43#include <as.h>
44#include <assert.h>
45#include <bd.h>
46#include <fibril_synch.h>
47#include <adt/list.h>
48#include <adt/hash_table.h>
49#include <macros.h>
50#include <mem.h>
51#include <stdlib.h>
52#include <stdio.h>
53#include <stacktrace.h>
54#include <str_error.h>
55#include <offset.h>
56#include <inttypes.h>
57#include "block.h"
58
59#define MAX_WRITE_RETRIES 10
60
61/** Lock protecting the device connection list */
62static FIBRIL_MUTEX_INITIALIZE(dcl_lock);
63/** Device connection list head. */
64static LIST_INITIALIZE(dcl);
65
66
67typedef struct {
68 fibril_mutex_t lock;
69 size_t lblock_size; /**< Logical block size. */
70 unsigned blocks_cluster; /**< Physical blocks per block_t */
71 unsigned block_count; /**< Total number of blocks. */
72 unsigned blocks_cached; /**< Number of cached blocks. */
73 hash_table_t block_hash;
74 list_t free_list;
75 enum cache_mode mode;
76} cache_t;
77
78typedef struct {
79 link_t link;
80 service_id_t service_id;
81 async_sess_t *sess;
82 bd_t *bd;
83 void *bb_buf;
84 aoff64_t bb_addr;
85 aoff64_t pblocks; /**< Number of physical blocks */
86 size_t pblock_size; /**< Physical block size. */
87 cache_t *cache;
88} devcon_t;
89
90static errno_t read_blocks(devcon_t *, aoff64_t, size_t, void *, size_t);
91static errno_t write_blocks(devcon_t *, aoff64_t, size_t, void *, size_t);
92static aoff64_t ba_ltop(devcon_t *, aoff64_t);
93
94static devcon_t *devcon_search(service_id_t service_id)
95{
96 fibril_mutex_lock(&dcl_lock);
97
98 list_foreach(dcl, link, devcon_t, devcon) {
99 if (devcon->service_id == service_id) {
100 fibril_mutex_unlock(&dcl_lock);
101 return devcon;
102 }
103 }
104
105 fibril_mutex_unlock(&dcl_lock);
106 return NULL;
107}
108
109static errno_t devcon_add(service_id_t service_id, async_sess_t *sess,
110 size_t bsize, aoff64_t dev_size, bd_t *bd)
111{
112 devcon_t *devcon;
113
114 devcon = malloc(sizeof(devcon_t));
115 if (!devcon)
116 return ENOMEM;
117
118 link_initialize(&devcon->link);
119 devcon->service_id = service_id;
120 devcon->sess = sess;
121 devcon->bd = bd;
122 devcon->bb_buf = NULL;
123 devcon->bb_addr = 0;
124 devcon->pblock_size = bsize;
125 devcon->pblocks = dev_size;
126 devcon->cache = NULL;
127
128 fibril_mutex_lock(&dcl_lock);
129 list_foreach(dcl, link, devcon_t, d) {
130 if (d->service_id == service_id) {
131 fibril_mutex_unlock(&dcl_lock);
132 free(devcon);
133 return EEXIST;
134 }
135 }
136 list_append(&devcon->link, &dcl);
137 fibril_mutex_unlock(&dcl_lock);
138 return EOK;
139}
140
141static void devcon_remove(devcon_t *devcon)
142{
143 fibril_mutex_lock(&dcl_lock);
144 list_remove(&devcon->link);
145 fibril_mutex_unlock(&dcl_lock);
146}
147
148errno_t block_init(service_id_t service_id, size_t comm_size)
149{
150 bd_t *bd;
151
152 async_sess_t *sess = loc_service_connect(service_id, INTERFACE_BLOCK,
153 IPC_FLAG_BLOCKING);
154 if (!sess) {
155 return ENOENT;
156 }
157
158 errno_t rc = bd_open(sess, &bd);
159 if (rc != EOK) {
160 async_hangup(sess);
161 return rc;
162 }
163
164 size_t bsize;
165 rc = bd_get_block_size(bd, &bsize);
166 if (rc != EOK) {
167 bd_close(bd);
168 async_hangup(sess);
169 return rc;
170 }
171
172 aoff64_t dev_size;
173 rc = bd_get_num_blocks(bd, &dev_size);
174 if (rc != EOK) {
175 bd_close(bd);
176 async_hangup(sess);
177 return rc;
178 }
179
180 rc = devcon_add(service_id, sess, bsize, dev_size, bd);
181 if (rc != EOK) {
182 bd_close(bd);
183 async_hangup(sess);
184 return rc;
185 }
186
187 return EOK;
188}
189
190void block_fini(service_id_t service_id)
191{
192 devcon_t *devcon = devcon_search(service_id);
193 assert(devcon);
194
195 if (devcon->cache)
196 (void) block_cache_fini(service_id);
197
198 (void)bd_sync_cache(devcon->bd, 0, 0);
199
200 devcon_remove(devcon);
201
202 if (devcon->bb_buf)
203 free(devcon->bb_buf);
204
205 bd_close(devcon->bd);
206 async_hangup(devcon->sess);
207
208 free(devcon);
209}
210
211errno_t block_bb_read(service_id_t service_id, aoff64_t ba)
212{
213 void *bb_buf;
214 errno_t rc;
215
216 devcon_t *devcon = devcon_search(service_id);
217 if (!devcon)
218 return ENOENT;
219 if (devcon->bb_buf)
220 return EEXIST;
221 bb_buf = malloc(devcon->pblock_size);
222 if (!bb_buf)
223 return ENOMEM;
224
225 rc = read_blocks(devcon, 0, 1, bb_buf, devcon->pblock_size);
226 if (rc != EOK) {
227 free(bb_buf);
228 return rc;
229 }
230
231 devcon->bb_buf = bb_buf;
232 devcon->bb_addr = ba;
233
234 return EOK;
235}
236
237void *block_bb_get(service_id_t service_id)
238{
239 devcon_t *devcon = devcon_search(service_id);
240 assert(devcon);
241 return devcon->bb_buf;
242}
243
244static size_t cache_key_hash(void *key)
245{
246 aoff64_t *lba = (aoff64_t*)key;
247 return *lba;
248}
249
250static size_t cache_hash(const ht_link_t *item)
251{
252 block_t *b = hash_table_get_inst(item, block_t, hash_link);
253 return b->lba;
254}
255
256static bool cache_key_equal(void *key, const ht_link_t *item)
257{
258 aoff64_t *lba = (aoff64_t*)key;
259 block_t *b = hash_table_get_inst(item, block_t, hash_link);
260 return b->lba == *lba;
261}
262
263
264static hash_table_ops_t cache_ops = {
265 .hash = cache_hash,
266 .key_hash = cache_key_hash,
267 .key_equal = cache_key_equal,
268 .equal = NULL,
269 .remove_callback = NULL
270};
271
272errno_t block_cache_init(service_id_t service_id, size_t size, unsigned blocks,
273 enum cache_mode mode)
274{
275 devcon_t *devcon = devcon_search(service_id);
276 cache_t *cache;
277 if (!devcon)
278 return ENOENT;
279 if (devcon->cache)
280 return EEXIST;
281 cache = malloc(sizeof(cache_t));
282 if (!cache)
283 return ENOMEM;
284
285 fibril_mutex_initialize(&cache->lock);
286 list_initialize(&cache->free_list);
287 cache->lblock_size = size;
288 cache->block_count = blocks;
289 cache->blocks_cached = 0;
290 cache->mode = mode;
291
292 /* Allow 1:1 or small-to-large block size translation */
293 if (cache->lblock_size % devcon->pblock_size != 0) {
294 free(cache);
295 return ENOTSUP;
296 }
297
298 cache->blocks_cluster = cache->lblock_size / devcon->pblock_size;
299
300 if (!hash_table_create(&cache->block_hash, 0, 0, &cache_ops)) {
301 free(cache);
302 return ENOMEM;
303 }
304
305 devcon->cache = cache;
306 return EOK;
307}
308
309errno_t block_cache_fini(service_id_t service_id)
310{
311 devcon_t *devcon = devcon_search(service_id);
312 cache_t *cache;
313 errno_t rc;
314
315 if (!devcon)
316 return ENOENT;
317 if (!devcon->cache)
318 return EOK;
319 cache = devcon->cache;
320
321 /*
322 * We are expecting to find all blocks for this device handle on the
323 * free list, i.e. the block reference count should be zero. Do not
324 * bother with the cache and block locks because we are single-threaded.
325 */
326 while (!list_empty(&cache->free_list)) {
327 block_t *b = list_get_instance(list_first(&cache->free_list),
328 block_t, free_link);
329
330 list_remove(&b->free_link);
331 if (b->dirty) {
332 rc = write_blocks(devcon, b->pba, cache->blocks_cluster,
333 b->data, b->size);
334 if (rc != EOK)
335 return rc;
336 }
337
338 hash_table_remove_item(&cache->block_hash, &b->hash_link);
339
340 free(b->data);
341 free(b);
342 }
343
344 hash_table_destroy(&cache->block_hash);
345 devcon->cache = NULL;
346 free(cache);
347
348 return EOK;
349}
350
351#define CACHE_LO_WATERMARK 10
352#define CACHE_HI_WATERMARK 20
353static bool cache_can_grow(cache_t *cache)
354{
355 if (cache->blocks_cached < CACHE_LO_WATERMARK)
356 return true;
357 if (!list_empty(&cache->free_list))
358 return false;
359 return true;
360}
361
362static void block_initialize(block_t *b)
363{
364 fibril_mutex_initialize(&b->lock);
365 b->refcnt = 1;
366 b->write_failures = 0;
367 b->dirty = false;
368 b->toxic = false;
369 fibril_rwlock_initialize(&b->contents_lock);
370 link_initialize(&b->free_link);
371}
372
373/** Instantiate a block in memory and get a reference to it.
374 *
375 * @param block Pointer to where the function will store the
376 * block pointer on success.
377 * @param service_id Service ID of the block device.
378 * @param ba Block address (logical).
379 * @param flags If BLOCK_FLAGS_NOREAD is specified, block_get()
380 * will not read the contents of the block from the
381 * device.
382 *
383 * @return EOK on success or an error code.
384 */
385errno_t block_get(block_t **block, service_id_t service_id, aoff64_t ba, int flags)
386{
387 devcon_t *devcon;
388 cache_t *cache;
389 block_t *b;
390 link_t *link;
391 aoff64_t p_ba;
392 errno_t rc;
393
394 devcon = devcon_search(service_id);
395
396 assert(devcon);
397 assert(devcon->cache);
398
399 cache = devcon->cache;
400
401 /* Check whether the logical block (or part of it) is beyond
402 * the end of the device or not.
403 */
404 p_ba = ba_ltop(devcon, ba);
405 p_ba += cache->blocks_cluster;
406 if (p_ba >= devcon->pblocks) {
407 /* This request cannot be satisfied */
408 return EIO;
409 }
410
411
412retry:
413 rc = EOK;
414 b = NULL;
415
416 fibril_mutex_lock(&cache->lock);
417 ht_link_t *hlink = hash_table_find(&cache->block_hash, &ba);
418 if (hlink) {
419found:
420 /*
421 * We found the block in the cache.
422 */
423 b = hash_table_get_inst(hlink, block_t, hash_link);
424 fibril_mutex_lock(&b->lock);
425 if (b->refcnt++ == 0)
426 list_remove(&b->free_link);
427 if (b->toxic)
428 rc = EIO;
429 fibril_mutex_unlock(&b->lock);
430 fibril_mutex_unlock(&cache->lock);
431 } else {
432 /*
433 * The block was not found in the cache.
434 */
435 if (cache_can_grow(cache)) {
436 /*
437 * We can grow the cache by allocating new blocks.
438 * Should the allocation fail, we fail over and try to
439 * recycle a block from the cache.
440 */
441 b = malloc(sizeof(block_t));
442 if (!b)
443 goto recycle;
444 b->data = malloc(cache->lblock_size);
445 if (!b->data) {
446 free(b);
447 b = NULL;
448 goto recycle;
449 }
450 cache->blocks_cached++;
451 } else {
452 /*
453 * Try to recycle a block from the free list.
454 */
455recycle:
456 if (list_empty(&cache->free_list)) {
457 fibril_mutex_unlock(&cache->lock);
458 rc = ENOMEM;
459 goto out;
460 }
461 link = list_first(&cache->free_list);
462 b = list_get_instance(link, block_t, free_link);
463
464 fibril_mutex_lock(&b->lock);
465 if (b->dirty) {
466 /*
467 * The block needs to be written back to the
468 * device before it changes identity. Do this
469 * while not holding the cache lock so that
470 * concurrency is not impeded. Also move the
471 * block to the end of the free list so that we
472 * do not slow down other instances of
473 * block_get() draining the free list.
474 */
475 list_remove(&b->free_link);
476 list_append(&b->free_link, &cache->free_list);
477 fibril_mutex_unlock(&cache->lock);
478 rc = write_blocks(devcon, b->pba,
479 cache->blocks_cluster, b->data, b->size);
480 if (rc != EOK) {
481 /*
482 * We did not manage to write the block
483 * to the device. Keep it around for
484 * another try. Hopefully, we will grab
485 * another block next time.
486 */
487 if (b->write_failures < MAX_WRITE_RETRIES) {
488 b->write_failures++;
489 fibril_mutex_unlock(&b->lock);
490 goto retry;
491 } else {
492 printf("Too many errors writing block %"
493 PRIuOFF64 "from device handle %" PRIun "\n"
494 "SEVERE DATA LOSS POSSIBLE\n",
495 b->lba, devcon->service_id);
496 }
497 } else
498 b->write_failures = 0;
499
500 b->dirty = false;
501 if (!fibril_mutex_trylock(&cache->lock)) {
502 /*
503 * Somebody is probably racing with us.
504 * Unlock the block and retry.
505 */
506 fibril_mutex_unlock(&b->lock);
507 goto retry;
508 }
509 hlink = hash_table_find(&cache->block_hash, &ba);
510 if (hlink) {
511 /*
512 * Someone else must have already
513 * instantiated the block while we were
514 * not holding the cache lock.
515 * Leave the recycled block on the
516 * freelist and continue as if we
517 * found the block of interest during
518 * the first try.
519 */
520 fibril_mutex_unlock(&b->lock);
521 goto found;
522 }
523
524 }
525 fibril_mutex_unlock(&b->lock);
526
527 /*
528 * Unlink the block from the free list and the hash
529 * table.
530 */
531 list_remove(&b->free_link);
532 hash_table_remove_item(&cache->block_hash, &b->hash_link);
533 }
534
535 block_initialize(b);
536 b->service_id = service_id;
537 b->size = cache->lblock_size;
538 b->lba = ba;
539 b->pba = ba_ltop(devcon, b->lba);
540 hash_table_insert(&cache->block_hash, &b->hash_link);
541
542 /*
543 * Lock the block before releasing the cache lock. Thus we don't
544 * kill concurrent operations on the cache while doing I/O on
545 * the block.
546 */
547 fibril_mutex_lock(&b->lock);
548 fibril_mutex_unlock(&cache->lock);
549
550 if (!(flags & BLOCK_FLAGS_NOREAD)) {
551 /*
552 * The block contains old or no data. We need to read
553 * the new contents from the device.
554 */
555 rc = read_blocks(devcon, b->pba, cache->blocks_cluster,
556 b->data, cache->lblock_size);
557 if (rc != EOK)
558 b->toxic = true;
559 } else
560 rc = EOK;
561
562 fibril_mutex_unlock(&b->lock);
563 }
564out:
565 if ((rc != EOK) && b) {
566 assert(b->toxic);
567 (void) block_put(b);
568 b = NULL;
569 }
570 *block = b;
571 return rc;
572}
573
574/** Release a reference to a block.
575 *
576 * If the last reference is dropped, the block is put on the free list.
577 *
578 * @param block Block of which a reference is to be released.
579 *
580 * @return EOK on success or an error code.
581 */
582errno_t block_put(block_t *block)
583{
584 devcon_t *devcon = devcon_search(block->service_id);
585 cache_t *cache;
586 unsigned blocks_cached;
587 enum cache_mode mode;
588 errno_t rc = EOK;
589
590 assert(devcon);
591 assert(devcon->cache);
592 assert(block->refcnt >= 1);
593
594 cache = devcon->cache;
595
596retry:
597 fibril_mutex_lock(&cache->lock);
598 blocks_cached = cache->blocks_cached;
599 mode = cache->mode;
600 fibril_mutex_unlock(&cache->lock);
601
602 /*
603 * Determine whether to sync the block. Syncing the block is best done
604 * when not holding the cache lock as it does not impede concurrency.
605 * Since the situation may have changed when we unlocked the cache, the
606 * blocks_cached and mode variables are mere hints. We will recheck the
607 * conditions later when the cache lock is held again.
608 */
609 fibril_mutex_lock(&block->lock);
610 if (block->toxic)
611 block->dirty = false; /* will not write back toxic block */
612 if (block->dirty && (block->refcnt == 1) &&
613 (blocks_cached > CACHE_HI_WATERMARK || mode != CACHE_MODE_WB)) {
614 rc = write_blocks(devcon, block->pba, cache->blocks_cluster,
615 block->data, block->size);
616 if (rc == EOK)
617 block->write_failures = 0;
618 block->dirty = false;
619 }
620 fibril_mutex_unlock(&block->lock);
621
622 fibril_mutex_lock(&cache->lock);
623 fibril_mutex_lock(&block->lock);
624 if (!--block->refcnt) {
625 /*
626 * Last reference to the block was dropped. Either free the
627 * block or put it on the free list. In case of an I/O error,
628 * free the block.
629 */
630 if ((cache->blocks_cached > CACHE_HI_WATERMARK) ||
631 (rc != EOK)) {
632 /*
633 * Currently there are too many cached blocks or there
634 * was an I/O error when writing the block back to the
635 * device.
636 */
637 if (block->dirty) {
638 /*
639 * We cannot sync the block while holding the
640 * cache lock. Release everything and retry.
641 */
642 block->refcnt++;
643
644 if (block->write_failures < MAX_WRITE_RETRIES) {
645 block->write_failures++;
646 fibril_mutex_unlock(&block->lock);
647 fibril_mutex_unlock(&cache->lock);
648 goto retry;
649 } else {
650 printf("Too many errors writing block %"
651 PRIuOFF64 "from device handle %" PRIun "\n"
652 "SEVERE DATA LOSS POSSIBLE\n",
653 block->lba, devcon->service_id);
654 }
655 }
656 /*
657 * Take the block out of the cache and free it.
658 */
659 hash_table_remove_item(&cache->block_hash, &block->hash_link);
660 fibril_mutex_unlock(&block->lock);
661 free(block->data);
662 free(block);
663 cache->blocks_cached--;
664 fibril_mutex_unlock(&cache->lock);
665 return rc;
666 }
667 /*
668 * Put the block on the free list.
669 */
670 if (cache->mode != CACHE_MODE_WB && block->dirty) {
671 /*
672 * We cannot sync the block while holding the cache
673 * lock. Release everything and retry.
674 */
675 block->refcnt++;
676 fibril_mutex_unlock(&block->lock);
677 fibril_mutex_unlock(&cache->lock);
678 goto retry;
679 }
680 list_append(&block->free_link, &cache->free_list);
681 }
682 fibril_mutex_unlock(&block->lock);
683 fibril_mutex_unlock(&cache->lock);
684
685 return rc;
686}
687
688/** Read sequential data from a block device.
689 *
690 * @param service_id Service ID of the block device.
691 * @param buf Buffer for holding one block
692 * @param bufpos Pointer to the first unread valid offset within the
693 * communication buffer.
694 * @param buflen Pointer to the number of unread bytes that are ready in
695 * the communication buffer.
696 * @param pos Device position to be read.
697 * @param dst Destination buffer.
698 * @param size Size of the destination buffer.
699 * @param block_size Block size to be used for the transfer.
700 *
701 * @return EOK on success or an error code on failure.
702 */
703errno_t block_seqread(service_id_t service_id, void *buf, size_t *bufpos,
704 size_t *buflen, aoff64_t *pos, void *dst, size_t size)
705{
706 size_t offset = 0;
707 size_t left = size;
708 size_t block_size;
709 devcon_t *devcon;
710
711 devcon = devcon_search(service_id);
712 assert(devcon);
713 block_size = devcon->pblock_size;
714
715 while (left > 0) {
716 size_t rd;
717
718 if (*bufpos + left < *buflen)
719 rd = left;
720 else
721 rd = *buflen - *bufpos;
722
723 if (rd > 0) {
724 /*
725 * Copy the contents of the communication buffer to the
726 * destination buffer.
727 */
728 memcpy(dst + offset, buf + *bufpos, rd);
729 offset += rd;
730 *bufpos += rd;
731 *pos += rd;
732 left -= rd;
733 }
734
735 if (*bufpos == *buflen) {
736 /* Refill the communication buffer with a new block. */
737 errno_t rc;
738
739 rc = read_blocks(devcon, *pos / block_size, 1, buf,
740 devcon->pblock_size);
741 if (rc != EOK) {
742 return rc;
743 }
744
745 *bufpos = 0;
746 *buflen = block_size;
747 }
748 }
749
750 return EOK;
751}
752
753/** Read blocks directly from device (bypass cache).
754 *
755 * @param service_id Service ID of the block device.
756 * @param ba Address of first block (physical).
757 * @param cnt Number of blocks.
758 * @param src Buffer for storing the data.
759 *
760 * @return EOK on success or an error code on failure.
761 */
762errno_t block_read_direct(service_id_t service_id, aoff64_t ba, size_t cnt, void *buf)
763{
764 devcon_t *devcon;
765
766 devcon = devcon_search(service_id);
767 assert(devcon);
768
769 return read_blocks(devcon, ba, cnt, buf, devcon->pblock_size * cnt);
770}
771
772/** Write blocks directly to device (bypass cache).
773 *
774 * @param service_id Service ID of the block device.
775 * @param ba Address of first block (physical).
776 * @param cnt Number of blocks.
777 * @param src The data to be written.
778 *
779 * @return EOK on success or an error code on failure.
780 */
781errno_t block_write_direct(service_id_t service_id, aoff64_t ba, size_t cnt,
782 const void *data)
783{
784 devcon_t *devcon;
785
786 devcon = devcon_search(service_id);
787 assert(devcon);
788
789 return write_blocks(devcon, ba, cnt, (void *)data, devcon->pblock_size * cnt);
790}
791
792/** Synchronize blocks to persistent storage.
793 *
794 * @param service_id Service ID of the block device.
795 * @param ba Address of first block (physical).
796 * @param cnt Number of blocks.
797 *
798 * @return EOK on success or an error code on failure.
799 */
800errno_t block_sync_cache(service_id_t service_id, aoff64_t ba, size_t cnt)
801{
802 devcon_t *devcon;
803
804 devcon = devcon_search(service_id);
805 assert(devcon);
806
807 return bd_sync_cache(devcon->bd, ba, cnt);
808}
809
810/** Get device block size.
811 *
812 * @param service_id Service ID of the block device.
813 * @param bsize Output block size.
814 *
815 * @return EOK on success or an error code on failure.
816 */
817errno_t block_get_bsize(service_id_t service_id, size_t *bsize)
818{
819 devcon_t *devcon;
820
821 devcon = devcon_search(service_id);
822 assert(devcon);
823
824 return bd_get_block_size(devcon->bd, bsize);
825}
826
827/** Get number of blocks on device.
828 *
829 * @param service_id Service ID of the block device.
830 * @param nblocks Output number of blocks.
831 *
832 * @return EOK on success or an error code on failure.
833 */
834errno_t block_get_nblocks(service_id_t service_id, aoff64_t *nblocks)
835{
836 devcon_t *devcon = devcon_search(service_id);
837 assert(devcon);
838
839 return bd_get_num_blocks(devcon->bd, nblocks);
840}
841
842/** Read bytes directly from the device (bypass cache)
843 *
844 * @param service_id Service ID of the block device.
845 * @param abs_offset Absolute offset in bytes where to start reading
846 * @param bytes Number of bytes to read
847 * @param data Buffer that receives the data
848 *
849 * @return EOK on success or an error code on failure.
850 */
851errno_t block_read_bytes_direct(service_id_t service_id, aoff64_t abs_offset,
852 size_t bytes, void *data)
853{
854 errno_t rc;
855 size_t phys_block_size;
856 size_t buf_size;
857 void *buffer;
858 aoff64_t first_block;
859 aoff64_t last_block;
860 size_t blocks;
861 size_t offset;
862
863 rc = block_get_bsize(service_id, &phys_block_size);
864 if (rc != EOK) {
865 return rc;
866 }
867
868 /* calculate data position and required space */
869 first_block = abs_offset / phys_block_size;
870 offset = abs_offset % phys_block_size;
871 last_block = (abs_offset + bytes - 1) / phys_block_size;
872 blocks = last_block - first_block + 1;
873 buf_size = blocks * phys_block_size;
874
875 /* read the data into memory */
876 buffer = malloc(buf_size);
877 if (buffer == NULL) {
878 return ENOMEM;
879 }
880
881 rc = block_read_direct(service_id, first_block, blocks, buffer);
882 if (rc != EOK) {
883 free(buffer);
884 return rc;
885 }
886
887 /* copy the data from the buffer */
888 memcpy(data, buffer + offset, bytes);
889 free(buffer);
890
891 return EOK;
892}
893
894/** Get TOC from device.
895 *
896 * @param service_id Service ID of the block device.
897 * @param session Starting session.
898 *
899 * @return Allocated TOC structure.
900 * @return EOK on success or an error code.
901 *
902 */
903errno_t block_read_toc(service_id_t service_id, uint8_t session, void *buf,
904 size_t bufsize)
905{
906 devcon_t *devcon = devcon_search(service_id);
907
908 assert(devcon);
909 return bd_read_toc(devcon->bd, session, buf, bufsize);
910}
911
912/** Read blocks from block device.
913 *
914 * @param devcon Device connection.
915 * @param ba Address of first block.
916 * @param cnt Number of blocks.
917 * @param src Buffer for storing the data.
918 *
919 * @return EOK on success or an error code on failure.
920 */
921static errno_t read_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt, void *buf,
922 size_t size)
923{
924 assert(devcon);
925
926 errno_t rc = bd_read_blocks(devcon->bd, ba, cnt, buf, size);
927 if (rc != EOK) {
928 printf("Error %s reading %zu blocks starting at block %" PRIuOFF64
929 " from device handle %" PRIun "\n", str_error_name(rc), cnt, ba,
930 devcon->service_id);
931#ifndef NDEBUG
932 stacktrace_print();
933#endif
934 }
935
936 return rc;
937}
938
939/** Write block to block device.
940 *
941 * @param devcon Device connection.
942 * @param ba Address of first block.
943 * @param cnt Number of blocks.
944 * @param src Buffer containing the data to write.
945 *
946 * @return EOK on success or an error code on failure.
947 */
948static errno_t write_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt, void *data,
949 size_t size)
950{
951 assert(devcon);
952
953 errno_t rc = bd_write_blocks(devcon->bd, ba, cnt, data, size);
954 if (rc != EOK) {
955 printf("Error %s writing %zu blocks starting at block %" PRIuOFF64
956 " to device handle %" PRIun "\n", str_error_name(rc), cnt, ba, devcon->service_id);
957#ifndef NDEBUG
958 stacktrace_print();
959#endif
960 }
961
962 return rc;
963}
964
965/** Convert logical block address to physical block address. */
966static aoff64_t ba_ltop(devcon_t *devcon, aoff64_t lba)
967{
968 assert(devcon->cache != NULL);
969 return lba * devcon->cache->blocks_cluster;
970}
971
972/** @}
973 */
Note: See TracBrowser for help on using the repository browser.