source: mainline/uspace/lib/block/libblock.c@ bc216a0

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since bc216a0 was bc216a0, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

Refactored any users of hash_table to use opaque void* keys instead of the cumbersome unsigned long[] keys. Switched from the ad hoc computations of hashes of multiple values to hash_combine().

  • Property mode set to 100644
File size: 24.7 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2008 Martin Decky
4 * Copyright (c) 2011 Martin Sucha
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libblock
32 * @{
33 */
34/**
35 * @file
36 * @brief
37 */
38
39#include "libblock.h"
40#include "../../srv/vfs/vfs.h"
41#include <ipc/loc.h>
42#include <ipc/bd.h>
43#include <ipc/services.h>
44#include <errno.h>
45#include <sys/mman.h>
46#include <async.h>
47#include <as.h>
48#include <assert.h>
49#include <fibril_synch.h>
50#include <adt/list.h>
51#include <adt/hash_table.h>
52#include <macros.h>
53#include <mem.h>
54#include <malloc.h>
55#include <stdio.h>
56#include <sys/typefmt.h>
57#include <stacktrace.h>
58
59/** Lock protecting the device connection list */
60static FIBRIL_MUTEX_INITIALIZE(dcl_lock);
61/** Device connection list head. */
62static LIST_INITIALIZE(dcl);
63
64
65typedef struct {
66 fibril_mutex_t lock;
67 size_t lblock_size; /**< Logical block size. */
68 unsigned blocks_cluster; /**< Physical blocks per block_t */
69 unsigned block_count; /**< Total number of blocks. */
70 unsigned blocks_cached; /**< Number of cached blocks. */
71 hash_table_t block_hash;
72 list_t free_list;
73 enum cache_mode mode;
74} cache_t;
75
76typedef struct {
77 link_t link;
78 service_id_t service_id;
79 async_sess_t *sess;
80 fibril_mutex_t comm_area_lock;
81 void *comm_area;
82 size_t comm_size;
83 void *bb_buf;
84 aoff64_t bb_addr;
85 size_t pblock_size; /**< Physical block size. */
86 cache_t *cache;
87} devcon_t;
88
89static int read_blocks(devcon_t *, aoff64_t, size_t);
90static int write_blocks(devcon_t *, aoff64_t, size_t);
91static int get_block_size(async_sess_t *, size_t *);
92static int get_num_blocks(async_sess_t *, aoff64_t *);
93static aoff64_t ba_ltop(devcon_t *, aoff64_t);
94
95static devcon_t *devcon_search(service_id_t service_id)
96{
97 fibril_mutex_lock(&dcl_lock);
98
99 list_foreach(dcl, cur) {
100 devcon_t *devcon = list_get_instance(cur, devcon_t, link);
101 if (devcon->service_id == service_id) {
102 fibril_mutex_unlock(&dcl_lock);
103 return devcon;
104 }
105 }
106
107 fibril_mutex_unlock(&dcl_lock);
108 return NULL;
109}
110
111static int devcon_add(service_id_t service_id, async_sess_t *sess,
112 size_t bsize, void *comm_area, size_t comm_size)
113{
114 devcon_t *devcon;
115
116 if (comm_size < bsize)
117 return EINVAL;
118
119 devcon = malloc(sizeof(devcon_t));
120 if (!devcon)
121 return ENOMEM;
122
123 link_initialize(&devcon->link);
124 devcon->service_id = service_id;
125 devcon->sess = sess;
126 fibril_mutex_initialize(&devcon->comm_area_lock);
127 devcon->comm_area = comm_area;
128 devcon->comm_size = comm_size;
129 devcon->bb_buf = NULL;
130 devcon->bb_addr = 0;
131 devcon->pblock_size = bsize;
132 devcon->cache = NULL;
133
134 fibril_mutex_lock(&dcl_lock);
135 list_foreach(dcl, cur) {
136 devcon_t *d = list_get_instance(cur, devcon_t, link);
137 if (d->service_id == service_id) {
138 fibril_mutex_unlock(&dcl_lock);
139 free(devcon);
140 return EEXIST;
141 }
142 }
143 list_append(&devcon->link, &dcl);
144 fibril_mutex_unlock(&dcl_lock);
145 return EOK;
146}
147
148static void devcon_remove(devcon_t *devcon)
149{
150 fibril_mutex_lock(&dcl_lock);
151 list_remove(&devcon->link);
152 fibril_mutex_unlock(&dcl_lock);
153}
154
155int block_init(exch_mgmt_t mgmt, service_id_t service_id,
156 size_t comm_size)
157{
158 void *comm_area = mmap(NULL, comm_size, PROTO_READ | PROTO_WRITE,
159 MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
160 if (!comm_area)
161 return ENOMEM;
162
163 async_sess_t *sess = loc_service_connect(mgmt, service_id,
164 IPC_FLAG_BLOCKING);
165 if (!sess) {
166 munmap(comm_area, comm_size);
167 return ENOENT;
168 }
169
170 async_exch_t *exch = async_exchange_begin(sess);
171 int rc = async_share_out_start(exch, comm_area,
172 AS_AREA_READ | AS_AREA_WRITE);
173 async_exchange_end(exch);
174
175 if (rc != EOK) {
176 munmap(comm_area, comm_size);
177 async_hangup(sess);
178 return rc;
179 }
180
181 size_t bsize;
182 rc = get_block_size(sess, &bsize);
183
184 if (rc != EOK) {
185 munmap(comm_area, comm_size);
186 async_hangup(sess);
187 return rc;
188 }
189
190 rc = devcon_add(service_id, sess, bsize, comm_area, comm_size);
191 if (rc != EOK) {
192 munmap(comm_area, comm_size);
193 async_hangup(sess);
194 return rc;
195 }
196
197 return EOK;
198}
199
200void block_fini(service_id_t service_id)
201{
202 devcon_t *devcon = devcon_search(service_id);
203 assert(devcon);
204
205 if (devcon->cache)
206 (void) block_cache_fini(service_id);
207
208 devcon_remove(devcon);
209
210 if (devcon->bb_buf)
211 free(devcon->bb_buf);
212
213 munmap(devcon->comm_area, devcon->comm_size);
214 async_hangup(devcon->sess);
215
216 free(devcon);
217}
218
219int block_bb_read(service_id_t service_id, aoff64_t ba)
220{
221 void *bb_buf;
222 int rc;
223
224 devcon_t *devcon = devcon_search(service_id);
225 if (!devcon)
226 return ENOENT;
227 if (devcon->bb_buf)
228 return EEXIST;
229 bb_buf = malloc(devcon->pblock_size);
230 if (!bb_buf)
231 return ENOMEM;
232
233 fibril_mutex_lock(&devcon->comm_area_lock);
234 rc = read_blocks(devcon, 0, 1);
235 if (rc != EOK) {
236 fibril_mutex_unlock(&devcon->comm_area_lock);
237 free(bb_buf);
238 return rc;
239 }
240 memcpy(bb_buf, devcon->comm_area, devcon->pblock_size);
241 fibril_mutex_unlock(&devcon->comm_area_lock);
242
243 devcon->bb_buf = bb_buf;
244 devcon->bb_addr = ba;
245
246 return EOK;
247}
248
249void *block_bb_get(service_id_t service_id)
250{
251 devcon_t *devcon = devcon_search(service_id);
252 assert(devcon);
253 return devcon->bb_buf;
254}
255
256static size_t cache_key_hash(void *key)
257{
258 aoff64_t *lba = (aoff64_t*)key;
259 return *lba;
260}
261
262static size_t cache_hash(const ht_link_t *item)
263{
264 block_t *b = hash_table_get_inst(item, block_t, hash_link);
265 return b->lba;
266}
267
268static bool cache_key_equal(void *key, const ht_link_t *item)
269{
270 aoff64_t *lba = (aoff64_t*)key;
271 block_t *b = hash_table_get_inst(item, block_t, hash_link);
272 return b->lba == *lba;
273}
274
275
276static hash_table_ops_t cache_ops = {
277 .hash = cache_hash,
278 .key_hash = cache_key_hash,
279 .key_equal = cache_key_equal,
280 .equal = 0,
281 .remove_callback = 0
282};
283
284int block_cache_init(service_id_t service_id, size_t size, unsigned blocks,
285 enum cache_mode mode)
286{
287 devcon_t *devcon = devcon_search(service_id);
288 cache_t *cache;
289 if (!devcon)
290 return ENOENT;
291 if (devcon->cache)
292 return EEXIST;
293 cache = malloc(sizeof(cache_t));
294 if (!cache)
295 return ENOMEM;
296
297 fibril_mutex_initialize(&cache->lock);
298 list_initialize(&cache->free_list);
299 cache->lblock_size = size;
300 cache->block_count = blocks;
301 cache->blocks_cached = 0;
302 cache->mode = mode;
303
304 /* Allow 1:1 or small-to-large block size translation */
305 if (cache->lblock_size % devcon->pblock_size != 0) {
306 free(cache);
307 return ENOTSUP;
308 }
309
310 cache->blocks_cluster = cache->lblock_size / devcon->pblock_size;
311
312 if (!hash_table_create(&cache->block_hash, 0, 0, &cache_ops)) {
313 free(cache);
314 return ENOMEM;
315 }
316
317 devcon->cache = cache;
318 return EOK;
319}
320
321int block_cache_fini(service_id_t service_id)
322{
323 devcon_t *devcon = devcon_search(service_id);
324 cache_t *cache;
325 int rc;
326
327 if (!devcon)
328 return ENOENT;
329 if (!devcon->cache)
330 return EOK;
331 cache = devcon->cache;
332
333 /*
334 * We are expecting to find all blocks for this device handle on the
335 * free list, i.e. the block reference count should be zero. Do not
336 * bother with the cache and block locks because we are single-threaded.
337 */
338 while (!list_empty(&cache->free_list)) {
339 block_t *b = list_get_instance(list_first(&cache->free_list),
340 block_t, free_link);
341
342 list_remove(&b->free_link);
343 if (b->dirty) {
344 memcpy(devcon->comm_area, b->data, b->size);
345 rc = write_blocks(devcon, b->pba, cache->blocks_cluster);
346 if (rc != EOK)
347 return rc;
348 }
349
350 hash_table_remove_item(&cache->block_hash, &b->hash_link);
351
352 free(b->data);
353 free(b);
354 }
355
356 hash_table_destroy(&cache->block_hash);
357 devcon->cache = NULL;
358 free(cache);
359
360 return EOK;
361}
362
363#define CACHE_LO_WATERMARK 10
364#define CACHE_HI_WATERMARK 20
365static bool cache_can_grow(cache_t *cache)
366{
367 if (cache->blocks_cached < CACHE_LO_WATERMARK)
368 return true;
369 if (!list_empty(&cache->free_list))
370 return false;
371 return true;
372}
373
374static void block_initialize(block_t *b)
375{
376 fibril_mutex_initialize(&b->lock);
377 b->refcnt = 1;
378 b->dirty = false;
379 b->toxic = false;
380 fibril_rwlock_initialize(&b->contents_lock);
381 link_initialize(&b->free_link);
382}
383
384/** Instantiate a block in memory and get a reference to it.
385 *
386 * @param block Pointer to where the function will store the
387 * block pointer on success.
388 * @param service_id Service ID of the block device.
389 * @param ba Block address (logical).
390 * @param flags If BLOCK_FLAGS_NOREAD is specified, block_get()
391 * will not read the contents of the block from the
392 * device.
393 *
394 * @return EOK on success or a negative error code.
395 */
396int block_get(block_t **block, service_id_t service_id, aoff64_t ba, int flags)
397{
398 devcon_t *devcon;
399 cache_t *cache;
400 block_t *b;
401 link_t *link;
402
403 int rc;
404
405 devcon = devcon_search(service_id);
406
407 assert(devcon);
408 assert(devcon->cache);
409
410 cache = devcon->cache;
411
412retry:
413 rc = EOK;
414 b = NULL;
415
416 fibril_mutex_lock(&cache->lock);
417 ht_link_t *hlink = hash_table_find(&cache->block_hash, &ba);
418 if (hlink) {
419found:
420 /*
421 * We found the block in the cache.
422 */
423 b = hash_table_get_inst(hlink, block_t, hash_link);
424 fibril_mutex_lock(&b->lock);
425 if (b->refcnt++ == 0)
426 list_remove(&b->free_link);
427 if (b->toxic)
428 rc = EIO;
429 fibril_mutex_unlock(&b->lock);
430 fibril_mutex_unlock(&cache->lock);
431 } else {
432 /*
433 * The block was not found in the cache.
434 */
435 if (cache_can_grow(cache)) {
436 /*
437 * We can grow the cache by allocating new blocks.
438 * Should the allocation fail, we fail over and try to
439 * recycle a block from the cache.
440 */
441 b = malloc(sizeof(block_t));
442 if (!b)
443 goto recycle;
444 b->data = malloc(cache->lblock_size);
445 if (!b->data) {
446 free(b);
447 b = NULL;
448 goto recycle;
449 }
450 cache->blocks_cached++;
451 } else {
452 /*
453 * Try to recycle a block from the free list.
454 */
455recycle:
456 if (list_empty(&cache->free_list)) {
457 fibril_mutex_unlock(&cache->lock);
458 rc = ENOMEM;
459 goto out;
460 }
461 link = list_first(&cache->free_list);
462 b = list_get_instance(link, block_t, free_link);
463
464 fibril_mutex_lock(&b->lock);
465 if (b->dirty) {
466 /*
467 * The block needs to be written back to the
468 * device before it changes identity. Do this
469 * while not holding the cache lock so that
470 * concurrency is not impeded. Also move the
471 * block to the end of the free list so that we
472 * do not slow down other instances of
473 * block_get() draining the free list.
474 */
475 list_remove(&b->free_link);
476 list_append(&b->free_link, &cache->free_list);
477 fibril_mutex_unlock(&cache->lock);
478 fibril_mutex_lock(&devcon->comm_area_lock);
479 memcpy(devcon->comm_area, b->data, b->size);
480 rc = write_blocks(devcon, b->pba,
481 cache->blocks_cluster);
482 fibril_mutex_unlock(&devcon->comm_area_lock);
483 if (rc != EOK) {
484 /*
485 * We did not manage to write the block
486 * to the device. Keep it around for
487 * another try. Hopefully, we will grab
488 * another block next time.
489 */
490 fibril_mutex_unlock(&b->lock);
491 goto retry;
492 }
493 b->dirty = false;
494 if (!fibril_mutex_trylock(&cache->lock)) {
495 /*
496 * Somebody is probably racing with us.
497 * Unlock the block and retry.
498 */
499 fibril_mutex_unlock(&b->lock);
500 goto retry;
501 }
502 hlink = hash_table_find(&cache->block_hash, &ba);
503 if (hlink) {
504 /*
505 * Someone else must have already
506 * instantiated the block while we were
507 * not holding the cache lock.
508 * Leave the recycled block on the
509 * freelist and continue as if we
510 * found the block of interest during
511 * the first try.
512 */
513 fibril_mutex_unlock(&b->lock);
514 goto found;
515 }
516
517 }
518 fibril_mutex_unlock(&b->lock);
519
520 /*
521 * Unlink the block from the free list and the hash
522 * table.
523 */
524 list_remove(&b->free_link);
525 hash_table_remove_item(&cache->block_hash, &b->hash_link);
526 }
527
528 block_initialize(b);
529 b->service_id = service_id;
530 b->size = cache->lblock_size;
531 b->lba = ba;
532 b->pba = ba_ltop(devcon, b->lba);
533 hash_table_insert(&cache->block_hash, &b->hash_link);
534
535 /*
536 * Lock the block before releasing the cache lock. Thus we don't
537 * kill concurrent operations on the cache while doing I/O on
538 * the block.
539 */
540 fibril_mutex_lock(&b->lock);
541 fibril_mutex_unlock(&cache->lock);
542
543 if (!(flags & BLOCK_FLAGS_NOREAD)) {
544 /*
545 * The block contains old or no data. We need to read
546 * the new contents from the device.
547 */
548 fibril_mutex_lock(&devcon->comm_area_lock);
549 rc = read_blocks(devcon, b->pba, cache->blocks_cluster);
550 memcpy(b->data, devcon->comm_area, cache->lblock_size);
551 fibril_mutex_unlock(&devcon->comm_area_lock);
552 if (rc != EOK)
553 b->toxic = true;
554 } else
555 rc = EOK;
556
557 fibril_mutex_unlock(&b->lock);
558 }
559out:
560 if ((rc != EOK) && b) {
561 assert(b->toxic);
562 (void) block_put(b);
563 b = NULL;
564 }
565 *block = b;
566 return rc;
567}
568
569/** Release a reference to a block.
570 *
571 * If the last reference is dropped, the block is put on the free list.
572 *
573 * @param block Block of which a reference is to be released.
574 *
575 * @return EOK on success or a negative error code.
576 */
577int block_put(block_t *block)
578{
579 devcon_t *devcon = devcon_search(block->service_id);
580 cache_t *cache;
581 unsigned blocks_cached;
582 enum cache_mode mode;
583 int rc = EOK;
584
585 assert(devcon);
586 assert(devcon->cache);
587 assert(block->refcnt >= 1);
588
589 cache = devcon->cache;
590
591retry:
592 fibril_mutex_lock(&cache->lock);
593 blocks_cached = cache->blocks_cached;
594 mode = cache->mode;
595 fibril_mutex_unlock(&cache->lock);
596
597 /*
598 * Determine whether to sync the block. Syncing the block is best done
599 * when not holding the cache lock as it does not impede concurrency.
600 * Since the situation may have changed when we unlocked the cache, the
601 * blocks_cached and mode variables are mere hints. We will recheck the
602 * conditions later when the cache lock is held again.
603 */
604 fibril_mutex_lock(&block->lock);
605 if (block->toxic)
606 block->dirty = false; /* will not write back toxic block */
607 if (block->dirty && (block->refcnt == 1) &&
608 (blocks_cached > CACHE_HI_WATERMARK || mode != CACHE_MODE_WB)) {
609 fibril_mutex_lock(&devcon->comm_area_lock);
610 memcpy(devcon->comm_area, block->data, block->size);
611 rc = write_blocks(devcon, block->pba, cache->blocks_cluster);
612 fibril_mutex_unlock(&devcon->comm_area_lock);
613 block->dirty = false;
614 }
615 fibril_mutex_unlock(&block->lock);
616
617 fibril_mutex_lock(&cache->lock);
618 fibril_mutex_lock(&block->lock);
619 if (!--block->refcnt) {
620 /*
621 * Last reference to the block was dropped. Either free the
622 * block or put it on the free list. In case of an I/O error,
623 * free the block.
624 */
625 if ((cache->blocks_cached > CACHE_HI_WATERMARK) ||
626 (rc != EOK)) {
627 /*
628 * Currently there are too many cached blocks or there
629 * was an I/O error when writing the block back to the
630 * device.
631 */
632 if (block->dirty) {
633 /*
634 * We cannot sync the block while holding the
635 * cache lock. Release everything and retry.
636 */
637 block->refcnt++;
638 fibril_mutex_unlock(&block->lock);
639 fibril_mutex_unlock(&cache->lock);
640 goto retry;
641 }
642 /*
643 * Take the block out of the cache and free it.
644 */
645 hash_table_remove_item(&cache->block_hash, &block->hash_link);
646 fibril_mutex_unlock(&block->lock);
647 free(block->data);
648 free(block);
649 cache->blocks_cached--;
650 fibril_mutex_unlock(&cache->lock);
651 return rc;
652 }
653 /*
654 * Put the block on the free list.
655 */
656 if (cache->mode != CACHE_MODE_WB && block->dirty) {
657 /*
658 * We cannot sync the block while holding the cache
659 * lock. Release everything and retry.
660 */
661 block->refcnt++;
662 fibril_mutex_unlock(&block->lock);
663 fibril_mutex_unlock(&cache->lock);
664 goto retry;
665 }
666 list_append(&block->free_link, &cache->free_list);
667 }
668 fibril_mutex_unlock(&block->lock);
669 fibril_mutex_unlock(&cache->lock);
670
671 return rc;
672}
673
674/** Read sequential data from a block device.
675 *
676 * @param service_id Service ID of the block device.
677 * @param bufpos Pointer to the first unread valid offset within the
678 * communication buffer.
679 * @param buflen Pointer to the number of unread bytes that are ready in
680 * the communication buffer.
681 * @param pos Device position to be read.
682 * @param dst Destination buffer.
683 * @param size Size of the destination buffer.
684 * @param block_size Block size to be used for the transfer.
685 *
686 * @return EOK on success or a negative return code on failure.
687 */
688int block_seqread(service_id_t service_id, size_t *bufpos, size_t *buflen,
689 aoff64_t *pos, void *dst, size_t size)
690{
691 size_t offset = 0;
692 size_t left = size;
693 size_t block_size;
694 devcon_t *devcon;
695
696 devcon = devcon_search(service_id);
697 assert(devcon);
698 block_size = devcon->pblock_size;
699
700 fibril_mutex_lock(&devcon->comm_area_lock);
701 while (left > 0) {
702 size_t rd;
703
704 if (*bufpos + left < *buflen)
705 rd = left;
706 else
707 rd = *buflen - *bufpos;
708
709 if (rd > 0) {
710 /*
711 * Copy the contents of the communication buffer to the
712 * destination buffer.
713 */
714 memcpy(dst + offset, devcon->comm_area + *bufpos, rd);
715 offset += rd;
716 *bufpos += rd;
717 *pos += rd;
718 left -= rd;
719 }
720
721 if (*bufpos == *buflen) {
722 /* Refill the communication buffer with a new block. */
723 int rc;
724
725 rc = read_blocks(devcon, *pos / block_size, 1);
726 if (rc != EOK) {
727 fibril_mutex_unlock(&devcon->comm_area_lock);
728 return rc;
729 }
730
731 *bufpos = 0;
732 *buflen = block_size;
733 }
734 }
735 fibril_mutex_unlock(&devcon->comm_area_lock);
736
737 return EOK;
738}
739
740/** Read blocks directly from device (bypass cache).
741 *
742 * @param service_id Service ID of the block device.
743 * @param ba Address of first block (physical).
744 * @param cnt Number of blocks.
745 * @param src Buffer for storing the data.
746 *
747 * @return EOK on success or negative error code on failure.
748 */
749int block_read_direct(service_id_t service_id, aoff64_t ba, size_t cnt, void *buf)
750{
751 devcon_t *devcon;
752 int rc;
753
754 devcon = devcon_search(service_id);
755 assert(devcon);
756
757 fibril_mutex_lock(&devcon->comm_area_lock);
758
759 rc = read_blocks(devcon, ba, cnt);
760 if (rc == EOK)
761 memcpy(buf, devcon->comm_area, devcon->pblock_size * cnt);
762
763 fibril_mutex_unlock(&devcon->comm_area_lock);
764
765 return rc;
766}
767
768/** Write blocks directly to device (bypass cache).
769 *
770 * @param service_id Service ID of the block device.
771 * @param ba Address of first block (physical).
772 * @param cnt Number of blocks.
773 * @param src The data to be written.
774 *
775 * @return EOK on success or negative error code on failure.
776 */
777int block_write_direct(service_id_t service_id, aoff64_t ba, size_t cnt,
778 const void *data)
779{
780 devcon_t *devcon;
781 int rc;
782
783 devcon = devcon_search(service_id);
784 assert(devcon);
785
786 fibril_mutex_lock(&devcon->comm_area_lock);
787
788 memcpy(devcon->comm_area, data, devcon->pblock_size * cnt);
789 rc = write_blocks(devcon, ba, cnt);
790
791 fibril_mutex_unlock(&devcon->comm_area_lock);
792
793 return rc;
794}
795
796/** Get device block size.
797 *
798 * @param service_id Service ID of the block device.
799 * @param bsize Output block size.
800 *
801 * @return EOK on success or negative error code on failure.
802 */
803int block_get_bsize(service_id_t service_id, size_t *bsize)
804{
805 devcon_t *devcon;
806
807 devcon = devcon_search(service_id);
808 assert(devcon);
809
810 return get_block_size(devcon->sess, bsize);
811}
812
813/** Get number of blocks on device.
814 *
815 * @param service_id Service ID of the block device.
816 * @param nblocks Output number of blocks.
817 *
818 * @return EOK on success or negative error code on failure.
819 */
820int block_get_nblocks(service_id_t service_id, aoff64_t *nblocks)
821{
822 devcon_t *devcon = devcon_search(service_id);
823 assert(devcon);
824
825 return get_num_blocks(devcon->sess, nblocks);
826}
827
828/** Read bytes directly from the device (bypass cache)
829 *
830 * @param service_id Service ID of the block device.
831 * @param abs_offset Absolute offset in bytes where to start reading
832 * @param bytes Number of bytes to read
833 * @param data Buffer that receives the data
834 *
835 * @return EOK on success or negative error code on failure.
836 */
837int block_read_bytes_direct(service_id_t service_id, aoff64_t abs_offset,
838 size_t bytes, void *data)
839{
840 int rc;
841 size_t phys_block_size;
842 size_t buf_size;
843 void *buffer;
844 aoff64_t first_block;
845 aoff64_t last_block;
846 size_t blocks;
847 size_t offset;
848
849 rc = block_get_bsize(service_id, &phys_block_size);
850 if (rc != EOK) {
851 return rc;
852 }
853
854 /* calculate data position and required space */
855 first_block = abs_offset / phys_block_size;
856 offset = abs_offset % phys_block_size;
857 last_block = (abs_offset + bytes - 1) / phys_block_size;
858 blocks = last_block - first_block + 1;
859 buf_size = blocks * phys_block_size;
860
861 /* read the data into memory */
862 buffer = malloc(buf_size);
863 if (buffer == NULL) {
864 return ENOMEM;
865 }
866
867 rc = block_read_direct(service_id, first_block, blocks, buffer);
868 if (rc != EOK) {
869 free(buffer);
870 return rc;
871 }
872
873 /* copy the data from the buffer */
874 memcpy(data, buffer + offset, bytes);
875 free(buffer);
876
877 return EOK;
878}
879
880/** Get TOC from device.
881 *
882 * @param service_id Service ID of the block device.
883 * @param session Starting session.
884 *
885 * @return Allocated TOC structure.
886 * @return NULL on failure.
887 *
888 */
889toc_block_t *block_get_toc(service_id_t service_id, uint8_t session)
890{
891 devcon_t *devcon = devcon_search(service_id);
892 assert(devcon);
893
894 toc_block_t *toc = NULL;
895
896 fibril_mutex_lock(&devcon->comm_area_lock);
897
898 async_exch_t *exch = async_exchange_begin(devcon->sess);
899 int rc = async_req_1_0(exch, BD_READ_TOC, session);
900 async_exchange_end(exch);
901
902 if (rc == EOK) {
903 toc = (toc_block_t *) malloc(sizeof(toc_block_t));
904 if (toc != NULL) {
905 memset(toc, 0, sizeof(toc_block_t));
906 memcpy(toc, devcon->comm_area,
907 min(devcon->pblock_size, sizeof(toc_block_t)));
908 }
909 }
910
911
912 fibril_mutex_unlock(&devcon->comm_area_lock);
913
914 return toc;
915}
916
917/** Read blocks from block device.
918 *
919 * @param devcon Device connection.
920 * @param ba Address of first block.
921 * @param cnt Number of blocks.
922 * @param src Buffer for storing the data.
923 *
924 * @return EOK on success or negative error code on failure.
925 */
926static int read_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt)
927{
928 assert(devcon);
929
930 async_exch_t *exch = async_exchange_begin(devcon->sess);
931 int rc = async_req_3_0(exch, BD_READ_BLOCKS, LOWER32(ba),
932 UPPER32(ba), cnt);
933 async_exchange_end(exch);
934
935 if (rc != EOK) {
936 printf("Error %d reading %zu blocks starting at block %" PRIuOFF64
937 " from device handle %" PRIun "\n", rc, cnt, ba,
938 devcon->service_id);
939#ifndef NDEBUG
940 stacktrace_print();
941#endif
942 }
943
944 return rc;
945}
946
947/** Write block to block device.
948 *
949 * @param devcon Device connection.
950 * @param ba Address of first block.
951 * @param cnt Number of blocks.
952 * @param src Buffer containing the data to write.
953 *
954 * @return EOK on success or negative error code on failure.
955 */
956static int write_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt)
957{
958 assert(devcon);
959
960 async_exch_t *exch = async_exchange_begin(devcon->sess);
961 int rc = async_req_3_0(exch, BD_WRITE_BLOCKS, LOWER32(ba),
962 UPPER32(ba), cnt);
963 async_exchange_end(exch);
964
965 if (rc != EOK) {
966 printf("Error %d writing %zu blocks starting at block %" PRIuOFF64
967 " to device handle %" PRIun "\n", rc, cnt, ba, devcon->service_id);
968#ifndef NDEBUG
969 stacktrace_print();
970#endif
971 }
972
973 return rc;
974}
975
976/** Get block size used by the device. */
977static int get_block_size(async_sess_t *sess, size_t *bsize)
978{
979 sysarg_t bs;
980
981 async_exch_t *exch = async_exchange_begin(sess);
982 int rc = async_req_0_1(exch, BD_GET_BLOCK_SIZE, &bs);
983 async_exchange_end(exch);
984
985 if (rc == EOK)
986 *bsize = (size_t) bs;
987
988 return rc;
989}
990
991/** Get total number of blocks on block device. */
992static int get_num_blocks(async_sess_t *sess, aoff64_t *nblocks)
993{
994 sysarg_t nb_l;
995 sysarg_t nb_h;
996
997 async_exch_t *exch = async_exchange_begin(sess);
998 int rc = async_req_0_2(exch, BD_GET_NUM_BLOCKS, &nb_l, &nb_h);
999 async_exchange_end(exch);
1000
1001 if (rc == EOK)
1002 *nblocks = (aoff64_t) MERGE_LOUP32(nb_l, nb_h);
1003
1004 return rc;
1005}
1006
1007/** Convert logical block address to physical block address. */
1008static aoff64_t ba_ltop(devcon_t *devcon, aoff64_t lba)
1009{
1010 assert(devcon->cache != NULL);
1011 return lba * devcon->cache->blocks_cluster;
1012}
1013
1014/** @}
1015 */
Note: See TracBrowser for help on using the repository browser.