source: mainline/uspace/lib/block/libblock.c@ aa865ee

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since aa865ee was b72efe8, checked in by Jiri Svoboda <jiri@…>, 14 years ago

Separate list_t typedef from link_t (user-space part).

  • list_t represents lists
  • Use list_first(), list_last(), list_empty() where appropriate
  • Use list_foreach() where possible
  • assert_link_not_used()
  • usb_hid_report_path_free() shall not unlink the path, caller must do it
  • Property mode set to 100644
File size: 24.2 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2008 Martin Decky
4 * Copyright (c) 2011 Martin Sucha
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup libblock
32 * @{
33 */
34/**
35 * @file
36 * @brief
37 */
38
39#include "libblock.h"
40#include "../../srv/vfs/vfs.h"
41#include <ipc/devmap.h>
42#include <ipc/bd.h>
43#include <ipc/services.h>
44#include <errno.h>
45#include <sys/mman.h>
46#include <async.h>
47#include <as.h>
48#include <assert.h>
49#include <fibril_synch.h>
50#include <adt/list.h>
51#include <adt/hash_table.h>
52#include <macros.h>
53#include <mem.h>
54#include <malloc.h>
55#include <stdio.h>
56#include <sys/typefmt.h>
57#include <stacktrace.h>
58
59/** Lock protecting the device connection list */
60static FIBRIL_MUTEX_INITIALIZE(dcl_lock);
61/** Device connection list head. */
62static LIST_INITIALIZE(dcl);
63
64#define CACHE_BUCKETS_LOG2 10
65#define CACHE_BUCKETS (1 << CACHE_BUCKETS_LOG2)
66
67typedef struct {
68 fibril_mutex_t lock;
69 size_t lblock_size; /**< Logical block size. */
70 unsigned blocks_cluster; /**< Physical blocks per block_t */
71 unsigned block_count; /**< Total number of blocks. */
72 unsigned blocks_cached; /**< Number of cached blocks. */
73 hash_table_t block_hash;
74 list_t free_list;
75 enum cache_mode mode;
76} cache_t;
77
78typedef struct {
79 link_t link;
80 devmap_handle_t devmap_handle;
81 async_sess_t *sess;
82 fibril_mutex_t comm_area_lock;
83 void *comm_area;
84 size_t comm_size;
85 void *bb_buf;
86 aoff64_t bb_addr;
87 size_t pblock_size; /**< Physical block size. */
88 cache_t *cache;
89} devcon_t;
90
91static int read_blocks(devcon_t *, aoff64_t, size_t);
92static int write_blocks(devcon_t *, aoff64_t, size_t);
93static int get_block_size(async_sess_t *, size_t *);
94static int get_num_blocks(async_sess_t *, aoff64_t *);
95static aoff64_t ba_ltop(devcon_t *, aoff64_t);
96
97static devcon_t *devcon_search(devmap_handle_t devmap_handle)
98{
99 fibril_mutex_lock(&dcl_lock);
100
101 list_foreach(dcl, cur) {
102 devcon_t *devcon = list_get_instance(cur, devcon_t, link);
103 if (devcon->devmap_handle == devmap_handle) {
104 fibril_mutex_unlock(&dcl_lock);
105 return devcon;
106 }
107 }
108
109 fibril_mutex_unlock(&dcl_lock);
110 return NULL;
111}
112
113static int devcon_add(devmap_handle_t devmap_handle, async_sess_t *sess,
114 size_t bsize, void *comm_area, size_t comm_size)
115{
116 devcon_t *devcon;
117
118 if (comm_size < bsize)
119 return EINVAL;
120
121 devcon = malloc(sizeof(devcon_t));
122 if (!devcon)
123 return ENOMEM;
124
125 link_initialize(&devcon->link);
126 devcon->devmap_handle = devmap_handle;
127 devcon->sess = sess;
128 fibril_mutex_initialize(&devcon->comm_area_lock);
129 devcon->comm_area = comm_area;
130 devcon->comm_size = comm_size;
131 devcon->bb_buf = NULL;
132 devcon->bb_addr = 0;
133 devcon->pblock_size = bsize;
134 devcon->cache = NULL;
135
136 fibril_mutex_lock(&dcl_lock);
137 list_foreach(dcl, cur) {
138 devcon_t *d = list_get_instance(cur, devcon_t, link);
139 if (d->devmap_handle == devmap_handle) {
140 fibril_mutex_unlock(&dcl_lock);
141 free(devcon);
142 return EEXIST;
143 }
144 }
145 list_append(&devcon->link, &dcl);
146 fibril_mutex_unlock(&dcl_lock);
147 return EOK;
148}
149
150static void devcon_remove(devcon_t *devcon)
151{
152 fibril_mutex_lock(&dcl_lock);
153 list_remove(&devcon->link);
154 fibril_mutex_unlock(&dcl_lock);
155}
156
157int block_init(exch_mgmt_t mgmt, devmap_handle_t devmap_handle,
158 size_t comm_size)
159{
160 void *comm_area = mmap(NULL, comm_size, PROTO_READ | PROTO_WRITE,
161 MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
162 if (!comm_area)
163 return ENOMEM;
164
165 async_sess_t *sess = devmap_device_connect(mgmt, devmap_handle,
166 IPC_FLAG_BLOCKING);
167 if (!sess) {
168 munmap(comm_area, comm_size);
169 return ENOENT;
170 }
171
172 async_exch_t *exch = async_exchange_begin(sess);
173 int rc = async_share_out_start(exch, comm_area,
174 AS_AREA_READ | AS_AREA_WRITE);
175 async_exchange_end(exch);
176
177 if (rc != EOK) {
178 munmap(comm_area, comm_size);
179 async_hangup(sess);
180 return rc;
181 }
182
183 size_t bsize;
184 rc = get_block_size(sess, &bsize);
185
186 if (rc != EOK) {
187 munmap(comm_area, comm_size);
188 async_hangup(sess);
189 return rc;
190 }
191
192 rc = devcon_add(devmap_handle, sess, bsize, comm_area, comm_size);
193 if (rc != EOK) {
194 munmap(comm_area, comm_size);
195 async_hangup(sess);
196 return rc;
197 }
198
199 return EOK;
200}
201
202void block_fini(devmap_handle_t devmap_handle)
203{
204 devcon_t *devcon = devcon_search(devmap_handle);
205 assert(devcon);
206
207 if (devcon->cache)
208 (void) block_cache_fini(devmap_handle);
209
210 devcon_remove(devcon);
211
212 if (devcon->bb_buf)
213 free(devcon->bb_buf);
214
215 munmap(devcon->comm_area, devcon->comm_size);
216 async_hangup(devcon->sess);
217
218 free(devcon);
219}
220
221int block_bb_read(devmap_handle_t devmap_handle, aoff64_t ba)
222{
223 void *bb_buf;
224 int rc;
225
226 devcon_t *devcon = devcon_search(devmap_handle);
227 if (!devcon)
228 return ENOENT;
229 if (devcon->bb_buf)
230 return EEXIST;
231 bb_buf = malloc(devcon->pblock_size);
232 if (!bb_buf)
233 return ENOMEM;
234
235 fibril_mutex_lock(&devcon->comm_area_lock);
236 rc = read_blocks(devcon, 0, 1);
237 if (rc != EOK) {
238 fibril_mutex_unlock(&devcon->comm_area_lock);
239 free(bb_buf);
240 return rc;
241 }
242 memcpy(bb_buf, devcon->comm_area, devcon->pblock_size);
243 fibril_mutex_unlock(&devcon->comm_area_lock);
244
245 devcon->bb_buf = bb_buf;
246 devcon->bb_addr = ba;
247
248 return EOK;
249}
250
251void *block_bb_get(devmap_handle_t devmap_handle)
252{
253 devcon_t *devcon = devcon_search(devmap_handle);
254 assert(devcon);
255 return devcon->bb_buf;
256}
257
258static hash_index_t cache_hash(unsigned long *key)
259{
260 return *key & (CACHE_BUCKETS - 1);
261}
262
263static int cache_compare(unsigned long *key, hash_count_t keys, link_t *item)
264{
265 block_t *b = hash_table_get_instance(item, block_t, hash_link);
266 return b->lba == *key;
267}
268
269static void cache_remove_callback(link_t *item)
270{
271}
272
273static hash_table_operations_t cache_ops = {
274 .hash = cache_hash,
275 .compare = cache_compare,
276 .remove_callback = cache_remove_callback
277};
278
279int block_cache_init(devmap_handle_t devmap_handle, size_t size, unsigned blocks,
280 enum cache_mode mode)
281{
282 devcon_t *devcon = devcon_search(devmap_handle);
283 cache_t *cache;
284 if (!devcon)
285 return ENOENT;
286 if (devcon->cache)
287 return EEXIST;
288 cache = malloc(sizeof(cache_t));
289 if (!cache)
290 return ENOMEM;
291
292 fibril_mutex_initialize(&cache->lock);
293 list_initialize(&cache->free_list);
294 cache->lblock_size = size;
295 cache->block_count = blocks;
296 cache->blocks_cached = 0;
297 cache->mode = mode;
298
299 /* Allow 1:1 or small-to-large block size translation */
300 if (cache->lblock_size % devcon->pblock_size != 0) {
301 free(cache);
302 return ENOTSUP;
303 }
304
305 cache->blocks_cluster = cache->lblock_size / devcon->pblock_size;
306
307 if (!hash_table_create(&cache->block_hash, CACHE_BUCKETS, 1,
308 &cache_ops)) {
309 free(cache);
310 return ENOMEM;
311 }
312
313 devcon->cache = cache;
314 return EOK;
315}
316
317int block_cache_fini(devmap_handle_t devmap_handle)
318{
319 devcon_t *devcon = devcon_search(devmap_handle);
320 cache_t *cache;
321 int rc;
322
323 if (!devcon)
324 return ENOENT;
325 if (!devcon->cache)
326 return EOK;
327 cache = devcon->cache;
328
329 /*
330 * We are expecting to find all blocks for this device handle on the
331 * free list, i.e. the block reference count should be zero. Do not
332 * bother with the cache and block locks because we are single-threaded.
333 */
334 while (!list_empty(&cache->free_list)) {
335 block_t *b = list_get_instance(list_first(&cache->free_list),
336 block_t, free_link);
337
338 list_remove(&b->free_link);
339 if (b->dirty) {
340 memcpy(devcon->comm_area, b->data, b->size);
341 rc = write_blocks(devcon, b->pba, cache->blocks_cluster);
342 if (rc != EOK)
343 return rc;
344 }
345
346 unsigned long key = b->lba;
347 hash_table_remove(&cache->block_hash, &key, 1);
348
349 free(b->data);
350 free(b);
351 }
352
353 hash_table_destroy(&cache->block_hash);
354 devcon->cache = NULL;
355 free(cache);
356
357 return EOK;
358}
359
360#define CACHE_LO_WATERMARK 10
361#define CACHE_HI_WATERMARK 20
362static bool cache_can_grow(cache_t *cache)
363{
364 if (cache->blocks_cached < CACHE_LO_WATERMARK)
365 return true;
366 if (!list_empty(&cache->free_list))
367 return false;
368 return true;
369}
370
371static void block_initialize(block_t *b)
372{
373 fibril_mutex_initialize(&b->lock);
374 b->refcnt = 1;
375 b->dirty = false;
376 b->toxic = false;
377 fibril_rwlock_initialize(&b->contents_lock);
378 link_initialize(&b->free_link);
379 link_initialize(&b->hash_link);
380}
381
382/** Instantiate a block in memory and get a reference to it.
383 *
384 * @param block Pointer to where the function will store the
385 * block pointer on success.
386 * @param devmap_handle Device handle of the block device.
387 * @param ba Block address (logical).
388 * @param flags If BLOCK_FLAGS_NOREAD is specified, block_get()
389 * will not read the contents of the block from the
390 * device.
391 *
392 * @return EOK on success or a negative error code.
393 */
394int block_get(block_t **block, devmap_handle_t devmap_handle, aoff64_t ba, int flags)
395{
396 devcon_t *devcon;
397 cache_t *cache;
398 block_t *b;
399 link_t *l;
400 unsigned long key = ba;
401 int rc;
402
403 devcon = devcon_search(devmap_handle);
404
405 assert(devcon);
406 assert(devcon->cache);
407
408 cache = devcon->cache;
409
410retry:
411 rc = EOK;
412 b = NULL;
413
414 fibril_mutex_lock(&cache->lock);
415 l = hash_table_find(&cache->block_hash, &key);
416 if (l) {
417found:
418 /*
419 * We found the block in the cache.
420 */
421 b = hash_table_get_instance(l, block_t, hash_link);
422 fibril_mutex_lock(&b->lock);
423 if (b->refcnt++ == 0)
424 list_remove(&b->free_link);
425 if (b->toxic)
426 rc = EIO;
427 fibril_mutex_unlock(&b->lock);
428 fibril_mutex_unlock(&cache->lock);
429 } else {
430 /*
431 * The block was not found in the cache.
432 */
433 if (cache_can_grow(cache)) {
434 /*
435 * We can grow the cache by allocating new blocks.
436 * Should the allocation fail, we fail over and try to
437 * recycle a block from the cache.
438 */
439 b = malloc(sizeof(block_t));
440 if (!b)
441 goto recycle;
442 b->data = malloc(cache->lblock_size);
443 if (!b->data) {
444 free(b);
445 b = NULL;
446 goto recycle;
447 }
448 cache->blocks_cached++;
449 } else {
450 /*
451 * Try to recycle a block from the free list.
452 */
453 unsigned long temp_key;
454recycle:
455 if (list_empty(&cache->free_list)) {
456 fibril_mutex_unlock(&cache->lock);
457 rc = ENOMEM;
458 goto out;
459 }
460 l = list_first(&cache->free_list);
461 b = list_get_instance(l, block_t, free_link);
462
463 fibril_mutex_lock(&b->lock);
464 if (b->dirty) {
465 /*
466 * The block needs to be written back to the
467 * device before it changes identity. Do this
468 * while not holding the cache lock so that
469 * concurrency is not impeded. Also move the
470 * block to the end of the free list so that we
471 * do not slow down other instances of
472 * block_get() draining the free list.
473 */
474 list_remove(&b->free_link);
475 list_append(&b->free_link, &cache->free_list);
476 fibril_mutex_unlock(&cache->lock);
477 fibril_mutex_lock(&devcon->comm_area_lock);
478 memcpy(devcon->comm_area, b->data, b->size);
479 rc = write_blocks(devcon, b->pba,
480 cache->blocks_cluster);
481 fibril_mutex_unlock(&devcon->comm_area_lock);
482 if (rc != EOK) {
483 /*
484 * We did not manage to write the block
485 * to the device. Keep it around for
486 * another try. Hopefully, we will grab
487 * another block next time.
488 */
489 fibril_mutex_unlock(&b->lock);
490 goto retry;
491 }
492 b->dirty = false;
493 if (!fibril_mutex_trylock(&cache->lock)) {
494 /*
495 * Somebody is probably racing with us.
496 * Unlock the block and retry.
497 */
498 fibril_mutex_unlock(&b->lock);
499 goto retry;
500 }
501 l = hash_table_find(&cache->block_hash, &key);
502 if (l) {
503 /*
504 * Someone else must have already
505 * instantiated the block while we were
506 * not holding the cache lock.
507 * Leave the recycled block on the
508 * freelist and continue as if we
509 * found the block of interest during
510 * the first try.
511 */
512 fibril_mutex_unlock(&b->lock);
513 goto found;
514 }
515
516 }
517 fibril_mutex_unlock(&b->lock);
518
519 /*
520 * Unlink the block from the free list and the hash
521 * table.
522 */
523 list_remove(&b->free_link);
524 temp_key = b->lba;
525 hash_table_remove(&cache->block_hash, &temp_key, 1);
526 }
527
528 block_initialize(b);
529 b->devmap_handle = devmap_handle;
530 b->size = cache->lblock_size;
531 b->lba = ba;
532 b->pba = ba_ltop(devcon, b->lba);
533 hash_table_insert(&cache->block_hash, &key, &b->hash_link);
534
535 /*
536 * Lock the block before releasing the cache lock. Thus we don't
537 * kill concurrent operations on the cache while doing I/O on
538 * the block.
539 */
540 fibril_mutex_lock(&b->lock);
541 fibril_mutex_unlock(&cache->lock);
542
543 if (!(flags & BLOCK_FLAGS_NOREAD)) {
544 /*
545 * The block contains old or no data. We need to read
546 * the new contents from the device.
547 */
548 fibril_mutex_lock(&devcon->comm_area_lock);
549 rc = read_blocks(devcon, b->pba, cache->blocks_cluster);
550 memcpy(b->data, devcon->comm_area, cache->lblock_size);
551 fibril_mutex_unlock(&devcon->comm_area_lock);
552 if (rc != EOK)
553 b->toxic = true;
554 } else
555 rc = EOK;
556
557 fibril_mutex_unlock(&b->lock);
558 }
559out:
560 if ((rc != EOK) && b) {
561 assert(b->toxic);
562 (void) block_put(b);
563 b = NULL;
564 }
565 *block = b;
566 return rc;
567}
568
569/** Release a reference to a block.
570 *
571 * If the last reference is dropped, the block is put on the free list.
572 *
573 * @param block Block of which a reference is to be released.
574 *
575 * @return EOK on success or a negative error code.
576 */
577int block_put(block_t *block)
578{
579 devcon_t *devcon = devcon_search(block->devmap_handle);
580 cache_t *cache;
581 unsigned blocks_cached;
582 enum cache_mode mode;
583 int rc = EOK;
584
585 assert(devcon);
586 assert(devcon->cache);
587 assert(block->refcnt >= 1);
588
589 cache = devcon->cache;
590
591retry:
592 fibril_mutex_lock(&cache->lock);
593 blocks_cached = cache->blocks_cached;
594 mode = cache->mode;
595 fibril_mutex_unlock(&cache->lock);
596
597 /*
598 * Determine whether to sync the block. Syncing the block is best done
599 * when not holding the cache lock as it does not impede concurrency.
600 * Since the situation may have changed when we unlocked the cache, the
601 * blocks_cached and mode variables are mere hints. We will recheck the
602 * conditions later when the cache lock is held again.
603 */
604 fibril_mutex_lock(&block->lock);
605 if (block->toxic)
606 block->dirty = false; /* will not write back toxic block */
607 if (block->dirty && (block->refcnt == 1) &&
608 (blocks_cached > CACHE_HI_WATERMARK || mode != CACHE_MODE_WB)) {
609 fibril_mutex_lock(&devcon->comm_area_lock);
610 memcpy(devcon->comm_area, block->data, block->size);
611 rc = write_blocks(devcon, block->pba, cache->blocks_cluster);
612 fibril_mutex_unlock(&devcon->comm_area_lock);
613 block->dirty = false;
614 }
615 fibril_mutex_unlock(&block->lock);
616
617 fibril_mutex_lock(&cache->lock);
618 fibril_mutex_lock(&block->lock);
619 if (!--block->refcnt) {
620 /*
621 * Last reference to the block was dropped. Either free the
622 * block or put it on the free list. In case of an I/O error,
623 * free the block.
624 */
625 if ((cache->blocks_cached > CACHE_HI_WATERMARK) ||
626 (rc != EOK)) {
627 /*
628 * Currently there are too many cached blocks or there
629 * was an I/O error when writing the block back to the
630 * device.
631 */
632 if (block->dirty) {
633 /*
634 * We cannot sync the block while holding the
635 * cache lock. Release everything and retry.
636 */
637 block->refcnt++;
638 fibril_mutex_unlock(&block->lock);
639 fibril_mutex_unlock(&cache->lock);
640 goto retry;
641 }
642 /*
643 * Take the block out of the cache and free it.
644 */
645 unsigned long key = block->lba;
646 hash_table_remove(&cache->block_hash, &key, 1);
647 fibril_mutex_unlock(&block->lock);
648 free(block->data);
649 free(block);
650 cache->blocks_cached--;
651 fibril_mutex_unlock(&cache->lock);
652 return rc;
653 }
654 /*
655 * Put the block on the free list.
656 */
657 if (cache->mode != CACHE_MODE_WB && block->dirty) {
658 /*
659 * We cannot sync the block while holding the cache
660 * lock. Release everything and retry.
661 */
662 block->refcnt++;
663 fibril_mutex_unlock(&block->lock);
664 fibril_mutex_unlock(&cache->lock);
665 goto retry;
666 }
667 list_append(&block->free_link, &cache->free_list);
668 }
669 fibril_mutex_unlock(&block->lock);
670 fibril_mutex_unlock(&cache->lock);
671
672 return rc;
673}
674
675/** Read sequential data from a block device.
676 *
677 * @param devmap_handle Device handle of the block device.
678 * @param bufpos Pointer to the first unread valid offset within the
679 * communication buffer.
680 * @param buflen Pointer to the number of unread bytes that are ready in
681 * the communication buffer.
682 * @param pos Device position to be read.
683 * @param dst Destination buffer.
684 * @param size Size of the destination buffer.
685 * @param block_size Block size to be used for the transfer.
686 *
687 * @return EOK on success or a negative return code on failure.
688 */
689int block_seqread(devmap_handle_t devmap_handle, size_t *bufpos, size_t *buflen,
690 aoff64_t *pos, void *dst, size_t size)
691{
692 size_t offset = 0;
693 size_t left = size;
694 size_t block_size;
695 devcon_t *devcon;
696
697 devcon = devcon_search(devmap_handle);
698 assert(devcon);
699 block_size = devcon->pblock_size;
700
701 fibril_mutex_lock(&devcon->comm_area_lock);
702 while (left > 0) {
703 size_t rd;
704
705 if (*bufpos + left < *buflen)
706 rd = left;
707 else
708 rd = *buflen - *bufpos;
709
710 if (rd > 0) {
711 /*
712 * Copy the contents of the communication buffer to the
713 * destination buffer.
714 */
715 memcpy(dst + offset, devcon->comm_area + *bufpos, rd);
716 offset += rd;
717 *bufpos += rd;
718 *pos += rd;
719 left -= rd;
720 }
721
722 if (*bufpos == *buflen) {
723 /* Refill the communication buffer with a new block. */
724 int rc;
725
726 rc = read_blocks(devcon, *pos / block_size, 1);
727 if (rc != EOK) {
728 fibril_mutex_unlock(&devcon->comm_area_lock);
729 return rc;
730 }
731
732 *bufpos = 0;
733 *buflen = block_size;
734 }
735 }
736 fibril_mutex_unlock(&devcon->comm_area_lock);
737
738 return EOK;
739}
740
741/** Read blocks directly from device (bypass cache).
742 *
743 * @param devmap_handle Device handle of the block device.
744 * @param ba Address of first block (physical).
745 * @param cnt Number of blocks.
746 * @param src Buffer for storing the data.
747 *
748 * @return EOK on success or negative error code on failure.
749 */
750int block_read_direct(devmap_handle_t devmap_handle, aoff64_t ba, size_t cnt, void *buf)
751{
752 devcon_t *devcon;
753 int rc;
754
755 devcon = devcon_search(devmap_handle);
756 assert(devcon);
757
758 fibril_mutex_lock(&devcon->comm_area_lock);
759
760 rc = read_blocks(devcon, ba, cnt);
761 if (rc == EOK)
762 memcpy(buf, devcon->comm_area, devcon->pblock_size * cnt);
763
764 fibril_mutex_unlock(&devcon->comm_area_lock);
765
766 return rc;
767}
768
769/** Write blocks directly to device (bypass cache).
770 *
771 * @param devmap_handle Device handle of the block device.
772 * @param ba Address of first block (physical).
773 * @param cnt Number of blocks.
774 * @param src The data to be written.
775 *
776 * @return EOK on success or negative error code on failure.
777 */
778int block_write_direct(devmap_handle_t devmap_handle, aoff64_t ba, size_t cnt,
779 const void *data)
780{
781 devcon_t *devcon;
782 int rc;
783
784 devcon = devcon_search(devmap_handle);
785 assert(devcon);
786
787 fibril_mutex_lock(&devcon->comm_area_lock);
788
789 memcpy(devcon->comm_area, data, devcon->pblock_size * cnt);
790 rc = write_blocks(devcon, ba, cnt);
791
792 fibril_mutex_unlock(&devcon->comm_area_lock);
793
794 return rc;
795}
796
797/** Get device block size.
798 *
799 * @param devmap_handle Device handle of the block device.
800 * @param bsize Output block size.
801 *
802 * @return EOK on success or negative error code on failure.
803 */
804int block_get_bsize(devmap_handle_t devmap_handle, size_t *bsize)
805{
806 devcon_t *devcon;
807
808 devcon = devcon_search(devmap_handle);
809 assert(devcon);
810
811 return get_block_size(devcon->sess, bsize);
812}
813
814/** Get number of blocks on device.
815 *
816 * @param devmap_handle Device handle of the block device.
817 * @param nblocks Output number of blocks.
818 *
819 * @return EOK on success or negative error code on failure.
820 */
821int block_get_nblocks(devmap_handle_t devmap_handle, aoff64_t *nblocks)
822{
823 devcon_t *devcon = devcon_search(devmap_handle);
824 assert(devcon);
825
826 return get_num_blocks(devcon->sess, nblocks);
827}
828
829/** Read bytes directly from the device (bypass cache)
830 *
831 * @param devmap_handle Device handle of the block device.
832 * @param abs_offset Absolute offset in bytes where to start reading
833 * @param bytes Number of bytes to read
834 * @param data Buffer that receives the data
835 *
836 * @return EOK on success or negative error code on failure.
837 */
838int block_read_bytes_direct(devmap_handle_t devmap_handle, aoff64_t abs_offset,
839 size_t bytes, void *data)
840{
841 int rc;
842 size_t phys_block_size;
843 size_t buf_size;
844 void *buffer;
845 aoff64_t first_block;
846 aoff64_t last_block;
847 size_t blocks;
848 size_t offset;
849
850 rc = block_get_bsize(devmap_handle, &phys_block_size);
851 if (rc != EOK) {
852 return rc;
853 }
854
855 /* calculate data position and required space */
856 first_block = abs_offset / phys_block_size;
857 offset = abs_offset % phys_block_size;
858 last_block = (abs_offset + bytes - 1) / phys_block_size;
859 blocks = last_block - first_block + 1;
860 buf_size = blocks * phys_block_size;
861
862 /* read the data into memory */
863 buffer = malloc(buf_size);
864 if (buffer == NULL) {
865 return ENOMEM;
866 }
867
868 rc = block_read_direct(devmap_handle, first_block, blocks, buffer);
869 if (rc != EOK) {
870 free(buffer);
871 return rc;
872 }
873
874 /* copy the data from the buffer */
875 memcpy(data, buffer + offset, bytes);
876 free(buffer);
877
878 return EOK;
879}
880
881/** Read blocks from block device.
882 *
883 * @param devcon Device connection.
884 * @param ba Address of first block.
885 * @param cnt Number of blocks.
886 * @param src Buffer for storing the data.
887 *
888 * @return EOK on success or negative error code on failure.
889 */
890static int read_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt)
891{
892 assert(devcon);
893
894 async_exch_t *exch = async_exchange_begin(devcon->sess);
895 int rc = async_req_3_0(exch, BD_READ_BLOCKS, LOWER32(ba),
896 UPPER32(ba), cnt);
897 async_exchange_end(exch);
898
899 if (rc != EOK) {
900 printf("Error %d reading %zu blocks starting at block %" PRIuOFF64
901 " from device handle %" PRIun "\n", rc, cnt, ba,
902 devcon->devmap_handle);
903#ifndef NDEBUG
904 stacktrace_print();
905#endif
906 }
907
908 return rc;
909}
910
911/** Write block to block device.
912 *
913 * @param devcon Device connection.
914 * @param ba Address of first block.
915 * @param cnt Number of blocks.
916 * @param src Buffer containing the data to write.
917 *
918 * @return EOK on success or negative error code on failure.
919 */
920static int write_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt)
921{
922 assert(devcon);
923
924 async_exch_t *exch = async_exchange_begin(devcon->sess);
925 int rc = async_req_3_0(exch, BD_WRITE_BLOCKS, LOWER32(ba),
926 UPPER32(ba), cnt);
927 async_exchange_end(exch);
928
929 if (rc != EOK) {
930 printf("Error %d writing %zu blocks starting at block %" PRIuOFF64
931 " to device handle %" PRIun "\n", rc, cnt, ba, devcon->devmap_handle);
932#ifndef NDEBUG
933 stacktrace_print();
934#endif
935 }
936
937 return rc;
938}
939
940/** Get block size used by the device. */
941static int get_block_size(async_sess_t *sess, size_t *bsize)
942{
943 sysarg_t bs;
944
945 async_exch_t *exch = async_exchange_begin(sess);
946 int rc = async_req_0_1(exch, BD_GET_BLOCK_SIZE, &bs);
947 async_exchange_end(exch);
948
949 if (rc == EOK)
950 *bsize = (size_t) bs;
951
952 return rc;
953}
954
955/** Get total number of blocks on block device. */
956static int get_num_blocks(async_sess_t *sess, aoff64_t *nblocks)
957{
958 sysarg_t nb_l;
959 sysarg_t nb_h;
960
961 async_exch_t *exch = async_exchange_begin(sess);
962 int rc = async_req_0_2(exch, BD_GET_NUM_BLOCKS, &nb_l, &nb_h);
963 async_exchange_end(exch);
964
965 if (rc == EOK)
966 *nblocks = (aoff64_t) MERGE_LOUP32(nb_l, nb_h);
967
968 return rc;
969}
970
971/** Convert logical block address to physical block address. */
972static aoff64_t ba_ltop(devcon_t *devcon, aoff64_t lba)
973{
974 assert(devcon->cache != NULL);
975 return lba * devcon->cache->blocks_cluster;
976}
977
978/** @}
979 */
Note: See TracBrowser for help on using the repository browser.