source: mainline/uspace/lib/block/block.c@ 3aac088

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 3aac088 was 4e00f87, checked in by Jakub Jermar <jakub@…>, 13 years ago

Use NULL instead of 0 as a hash_table_ops_t member initializer.

  • Property mode set to 100644
File size: 22.0 KB
RevLine 
[fc840d9]1/*
[ed903174]2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2008 Martin Decky
[e272949]4 * Copyright (c) 2011 Martin Sucha
[fc840d9]5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
[97c9da8]31/** @addtogroup libblock
[fc840d9]32 * @{
[97c9da8]33 */
[fc840d9]34/**
35 * @file
36 * @brief
37 */
38
39#include "../../srv/vfs/vfs.h"
[15f3c3f]40#include <ipc/loc.h>
[7858bc5f]41#include <ipc/services.h>
[fc840d9]42#include <errno.h>
[7858bc5f]43#include <sys/mman.h>
[fc840d9]44#include <async.h>
45#include <as.h>
46#include <assert.h>
[4802dd7]47#include <bd.h>
[1e4cada]48#include <fibril_synch.h>
[d9c8c81]49#include <adt/list.h>
50#include <adt/hash_table.h>
[1ee00b7]51#include <macros.h>
[d00ae4c]52#include <mem.h>
[c7bbf029]53#include <malloc.h>
54#include <stdio.h>
[16fc3c9]55#include <sys/typefmt.h>
56#include <stacktrace.h>
[f73b291]57#include "block.h"
[fc840d9]58
[916bf1a]59/** Lock protecting the device connection list */
[4e1b57d]60static FIBRIL_MUTEX_INITIALIZE(dcl_lock);
[916bf1a]61/** Device connection list head. */
[b72efe8]62static LIST_INITIALIZE(dcl);
[916bf1a]63
[f1ba5d6]64
65typedef struct {
[4e1b57d]66 fibril_mutex_t lock;
[79ae36dd]67 size_t lblock_size; /**< Logical block size. */
68 unsigned blocks_cluster; /**< Physical blocks per block_t */
69 unsigned block_count; /**< Total number of blocks. */
70 unsigned blocks_cached; /**< Number of cached blocks. */
[f1ba5d6]71 hash_table_t block_hash;
[b72efe8]72 list_t free_list;
[1fbe064b]73 enum cache_mode mode;
[f1ba5d6]74} cache_t;
75
[916bf1a]76typedef struct {
77 link_t link;
[15f3c3f]78 service_id_t service_id;
[79ae36dd]79 async_sess_t *sess;
[4802dd7]80 bd_t *bd;
[916bf1a]81 void *bb_buf;
[ed903174]82 aoff64_t bb_addr;
[79ae36dd]83 size_t pblock_size; /**< Physical block size. */
[f1ba5d6]84 cache_t *cache;
[916bf1a]85} devcon_t;
86
[4802dd7]87static int read_blocks(devcon_t *, aoff64_t, size_t, void *, size_t);
88static int write_blocks(devcon_t *, aoff64_t, size_t, void *, size_t);
[79ae36dd]89static aoff64_t ba_ltop(devcon_t *, aoff64_t);
[1fbe064b]90
[15f3c3f]91static devcon_t *devcon_search(service_id_t service_id)
[916bf1a]92{
[4e1b57d]93 fibril_mutex_lock(&dcl_lock);
[79ae36dd]94
[b72efe8]95 list_foreach(dcl, cur) {
[916bf1a]96 devcon_t *devcon = list_get_instance(cur, devcon_t, link);
[15f3c3f]97 if (devcon->service_id == service_id) {
[4e1b57d]98 fibril_mutex_unlock(&dcl_lock);
[916bf1a]99 return devcon;
100 }
101 }
[79ae36dd]102
[4e1b57d]103 fibril_mutex_unlock(&dcl_lock);
[916bf1a]104 return NULL;
105}
106
[15f3c3f]107static int devcon_add(service_id_t service_id, async_sess_t *sess,
[4802dd7]108 size_t bsize, bd_t *bd)
[916bf1a]109{
110 devcon_t *devcon;
[79ae36dd]111
[916bf1a]112 devcon = malloc(sizeof(devcon_t));
113 if (!devcon)
114 return ENOMEM;
115
116 link_initialize(&devcon->link);
[15f3c3f]117 devcon->service_id = service_id;
[79ae36dd]118 devcon->sess = sess;
[4802dd7]119 devcon->bd = bd;
[6284978]120 devcon->bb_buf = NULL;
[1ee00b7]121 devcon->bb_addr = 0;
122 devcon->pblock_size = bsize;
[f1ba5d6]123 devcon->cache = NULL;
[79ae36dd]124
[4e1b57d]125 fibril_mutex_lock(&dcl_lock);
[b72efe8]126 list_foreach(dcl, cur) {
[916bf1a]127 devcon_t *d = list_get_instance(cur, devcon_t, link);
[15f3c3f]128 if (d->service_id == service_id) {
[4e1b57d]129 fibril_mutex_unlock(&dcl_lock);
[916bf1a]130 free(devcon);
131 return EEXIST;
132 }
133 }
[b72efe8]134 list_append(&devcon->link, &dcl);
[4e1b57d]135 fibril_mutex_unlock(&dcl_lock);
[916bf1a]136 return EOK;
137}
138
139static void devcon_remove(devcon_t *devcon)
140{
[4e1b57d]141 fibril_mutex_lock(&dcl_lock);
[916bf1a]142 list_remove(&devcon->link);
[4e1b57d]143 fibril_mutex_unlock(&dcl_lock);
[916bf1a]144}
[7858bc5f]145
[15f3c3f]146int block_init(exch_mgmt_t mgmt, service_id_t service_id,
[79ae36dd]147 size_t comm_size)
[7858bc5f]148{
[4802dd7]149 bd_t *bd;
150
[15f3c3f]151 async_sess_t *sess = loc_service_connect(mgmt, service_id,
[79ae36dd]152 IPC_FLAG_BLOCKING);
153 if (!sess) {
154 return ENOENT;
[7858bc5f]155 }
[79ae36dd]156
[4802dd7]157 int rc = bd_open(sess, &bd);
[7858bc5f]158 if (rc != EOK) {
[79ae36dd]159 async_hangup(sess);
[7858bc5f]160 return rc;
161 }
[79ae36dd]162
163 size_t bsize;
[4802dd7]164 rc = bd_get_block_size(bd, &bsize);
[79ae36dd]165 if (rc != EOK) {
[4802dd7]166 bd_close(bd);
[79ae36dd]167 async_hangup(sess);
[1ee00b7]168 return rc;
169 }
[916bf1a]170
[4802dd7]171 rc = devcon_add(service_id, sess, bsize, bd);
[916bf1a]172 if (rc != EOK) {
[4802dd7]173 bd_close(bd);
[79ae36dd]174 async_hangup(sess);
[916bf1a]175 return rc;
176 }
[79ae36dd]177
[7858bc5f]178 return EOK;
179}
180
[15f3c3f]181void block_fini(service_id_t service_id)
[7858bc5f]182{
[15f3c3f]183 devcon_t *devcon = devcon_search(service_id);
[916bf1a]184 assert(devcon);
185
[64bc4b6]186 if (devcon->cache)
[15f3c3f]187 (void) block_cache_fini(service_id);
[79ae36dd]188
[916bf1a]189 devcon_remove(devcon);
[79ae36dd]190
[6284978]191 if (devcon->bb_buf)
192 free(devcon->bb_buf);
[79ae36dd]193
[4802dd7]194 bd_close(devcon->bd);
[79ae36dd]195 async_hangup(devcon->sess);
196
197 free(devcon);
[7858bc5f]198}
199
[15f3c3f]200int block_bb_read(service_id_t service_id, aoff64_t ba)
[6284978]201{
202 void *bb_buf;
[0c243b4]203 int rc;
[6284978]204
[15f3c3f]205 devcon_t *devcon = devcon_search(service_id);
[6284978]206 if (!devcon)
207 return ENOENT;
208 if (devcon->bb_buf)
209 return EEXIST;
[1ee00b7]210 bb_buf = malloc(devcon->pblock_size);
[6284978]211 if (!bb_buf)
212 return ENOMEM;
[1ee00b7]213
[4802dd7]214 rc = read_blocks(devcon, 0, 1, bb_buf, devcon->pblock_size);
[0c243b4]215 if (rc != EOK) {
[6284978]216 free(bb_buf);
[0c243b4]217 return rc;
[6284978]218 }
[6408be3]219
[6284978]220 devcon->bb_buf = bb_buf;
[1ee00b7]221 devcon->bb_addr = ba;
[6284978]222
223 return EOK;
224}
225
[15f3c3f]226void *block_bb_get(service_id_t service_id)
[7858bc5f]227{
[15f3c3f]228 devcon_t *devcon = devcon_search(service_id);
[916bf1a]229 assert(devcon);
230 return devcon->bb_buf;
[7858bc5f]231}
232
[062d900]233static size_t cache_key_hash(void *key)
[f1ba5d6]234{
[062d900]235 aoff64_t *lba = (aoff64_t*)key;
236 return *lba;
[f1ba5d6]237}
238
[062d900]239static size_t cache_hash(const ht_link_t *item)
[f1ba5d6]240{
[062d900]241 block_t *b = hash_table_get_inst(item, block_t, hash_link);
242 return b->lba;
[f1ba5d6]243}
244
[062d900]245static bool cache_key_equal(void *key, const ht_link_t *item)
[f1ba5d6]246{
[062d900]247 aoff64_t *lba = (aoff64_t*)key;
248 block_t *b = hash_table_get_inst(item, block_t, hash_link);
249 return b->lba == *lba;
[f1ba5d6]250}
251
[062d900]252
253static hash_table_ops_t cache_ops = {
[f1ba5d6]254 .hash = cache_hash,
[062d900]255 .key_hash = cache_key_hash,
256 .key_equal = cache_key_equal,
[4e00f87]257 .equal = NULL,
258 .remove_callback = NULL
[f1ba5d6]259};
260
[15f3c3f]261int block_cache_init(service_id_t service_id, size_t size, unsigned blocks,
[1fbe064b]262 enum cache_mode mode)
[f1ba5d6]263{
[15f3c3f]264 devcon_t *devcon = devcon_search(service_id);
[f1ba5d6]265 cache_t *cache;
266 if (!devcon)
267 return ENOENT;
268 if (devcon->cache)
269 return EEXIST;
270 cache = malloc(sizeof(cache_t));
271 if (!cache)
272 return ENOMEM;
273
[4e1b57d]274 fibril_mutex_initialize(&cache->lock);
[b72efe8]275 list_initialize(&cache->free_list);
[1ee00b7]276 cache->lblock_size = size;
[f1ba5d6]277 cache->block_count = blocks;
[d68e4d5]278 cache->blocks_cached = 0;
[1fbe064b]279 cache->mode = mode;
[f1ba5d6]280
[f092718]281 /* Allow 1:1 or small-to-large block size translation */
[37cf3792]282 if (cache->lblock_size % devcon->pblock_size != 0) {
283 free(cache);
[f092718]284 return ENOTSUP;
[37cf3792]285 }
[f092718]286
287 cache->blocks_cluster = cache->lblock_size / devcon->pblock_size;
[1ee00b7]288
[062d900]289 if (!hash_table_create(&cache->block_hash, 0, 0, &cache_ops)) {
[f1ba5d6]290 free(cache);
291 return ENOMEM;
292 }
293
294 devcon->cache = cache;
295 return EOK;
296}
297
[15f3c3f]298int block_cache_fini(service_id_t service_id)
[64bc4b6]299{
[15f3c3f]300 devcon_t *devcon = devcon_search(service_id);
[64bc4b6]301 cache_t *cache;
302 int rc;
303
304 if (!devcon)
305 return ENOENT;
306 if (!devcon->cache)
307 return EOK;
308 cache = devcon->cache;
309
310 /*
311 * We are expecting to find all blocks for this device handle on the
312 * free list, i.e. the block reference count should be zero. Do not
313 * bother with the cache and block locks because we are single-threaded.
314 */
[b72efe8]315 while (!list_empty(&cache->free_list)) {
316 block_t *b = list_get_instance(list_first(&cache->free_list),
[64bc4b6]317 block_t, free_link);
318
319 list_remove(&b->free_link);
320 if (b->dirty) {
[4802dd7]321 rc = write_blocks(devcon, b->pba, cache->blocks_cluster,
322 b->data, b->size);
[64bc4b6]323 if (rc != EOK)
324 return rc;
325 }
326
[062d900]327 hash_table_remove_item(&cache->block_hash, &b->hash_link);
[64bc4b6]328
329 free(b->data);
330 free(b);
331 }
332
333 hash_table_destroy(&cache->block_hash);
334 devcon->cache = NULL;
335 free(cache);
336
337 return EOK;
338}
339
[d68e4d5]340#define CACHE_LO_WATERMARK 10
341#define CACHE_HI_WATERMARK 20
[e1c88d5]342static bool cache_can_grow(cache_t *cache)
[fc840d9]343{
[d68e4d5]344 if (cache->blocks_cached < CACHE_LO_WATERMARK)
345 return true;
[b72efe8]346 if (!list_empty(&cache->free_list))
[d68e4d5]347 return false;
[e1c88d5]348 return true;
349}
350
351static void block_initialize(block_t *b)
352{
[4e1b57d]353 fibril_mutex_initialize(&b->lock);
[e1c88d5]354 b->refcnt = 1;
355 b->dirty = false;
[cd688d9]356 b->toxic = false;
[4e1b57d]357 fibril_rwlock_initialize(&b->contents_lock);
[e1c88d5]358 link_initialize(&b->free_link);
359}
360
361/** Instantiate a block in memory and get a reference to it.
362 *
[c91f2d1b]363 * @param block Pointer to where the function will store the
364 * block pointer on success.
[15f3c3f]365 * @param service_id Service ID of the block device.
[a6ba0c9]366 * @param ba Block address (logical).
[1d8cdb1]367 * @param flags If BLOCK_FLAGS_NOREAD is specified, block_get()
368 * will not read the contents of the block from the
369 * device.
[e1c88d5]370 *
[c91f2d1b]371 * @return EOK on success or a negative error code.
[e1c88d5]372 */
[15f3c3f]373int block_get(block_t **block, service_id_t service_id, aoff64_t ba, int flags)
[e1c88d5]374{
375 devcon_t *devcon;
376 cache_t *cache;
[fc840d9]377 block_t *b;
[062d900]378 link_t *link;
[867e2555]379
[b7b3fda]380 int rc;
[e1c88d5]381
[15f3c3f]382 devcon = devcon_search(service_id);
[fc840d9]383
[e1c88d5]384 assert(devcon);
385 assert(devcon->cache);
[fc840d9]386
[e1c88d5]387 cache = devcon->cache;
[02ee6bf5]388
389retry:
[b7b3fda]390 rc = EOK;
[4f690cd]391 b = NULL;
[b7b3fda]392
[4e1b57d]393 fibril_mutex_lock(&cache->lock);
[062d900]394 ht_link_t *hlink = hash_table_find(&cache->block_hash, &ba);
395 if (hlink) {
[5716e9a]396found:
[e1c88d5]397 /*
398 * We found the block in the cache.
399 */
[062d900]400 b = hash_table_get_inst(hlink, block_t, hash_link);
[4e1b57d]401 fibril_mutex_lock(&b->lock);
[e1c88d5]402 if (b->refcnt++ == 0)
403 list_remove(&b->free_link);
[402a18f]404 if (b->toxic)
405 rc = EIO;
[4e1b57d]406 fibril_mutex_unlock(&b->lock);
407 fibril_mutex_unlock(&cache->lock);
[e1c88d5]408 } else {
409 /*
410 * The block was not found in the cache.
411 */
412 if (cache_can_grow(cache)) {
413 /*
414 * We can grow the cache by allocating new blocks.
415 * Should the allocation fail, we fail over and try to
416 * recycle a block from the cache.
417 */
418 b = malloc(sizeof(block_t));
419 if (!b)
420 goto recycle;
[1ee00b7]421 b->data = malloc(cache->lblock_size);
[e1c88d5]422 if (!b->data) {
423 free(b);
[0dfaa099]424 b = NULL;
[e1c88d5]425 goto recycle;
426 }
[d68e4d5]427 cache->blocks_cached++;
[e1c88d5]428 } else {
429 /*
430 * Try to recycle a block from the free list.
431 */
432recycle:
[b72efe8]433 if (list_empty(&cache->free_list)) {
[7a56b1ed]434 fibril_mutex_unlock(&cache->lock);
435 rc = ENOMEM;
436 goto out;
437 }
[062d900]438 link = list_first(&cache->free_list);
439 b = list_get_instance(link, block_t, free_link);
[02ee6bf5]440
441 fibril_mutex_lock(&b->lock);
442 if (b->dirty) {
443 /*
444 * The block needs to be written back to the
445 * device before it changes identity. Do this
446 * while not holding the cache lock so that
447 * concurrency is not impeded. Also move the
448 * block to the end of the free list so that we
449 * do not slow down other instances of
450 * block_get() draining the free list.
451 */
452 list_remove(&b->free_link);
[b72efe8]453 list_append(&b->free_link, &cache->free_list);
[02ee6bf5]454 fibril_mutex_unlock(&cache->lock);
[f092718]455 rc = write_blocks(devcon, b->pba,
[4802dd7]456 cache->blocks_cluster, b->data, b->size);
[402a18f]457 if (rc != EOK) {
458 /*
459 * We did not manage to write the block
460 * to the device. Keep it around for
461 * another try. Hopefully, we will grab
462 * another block next time.
463 */
464 fibril_mutex_unlock(&b->lock);
465 goto retry;
466 }
[02ee6bf5]467 b->dirty = false;
468 if (!fibril_mutex_trylock(&cache->lock)) {
469 /*
470 * Somebody is probably racing with us.
471 * Unlock the block and retry.
472 */
473 fibril_mutex_unlock(&b->lock);
474 goto retry;
475 }
[062d900]476 hlink = hash_table_find(&cache->block_hash, &ba);
477 if (hlink) {
[5716e9a]478 /*
479 * Someone else must have already
480 * instantiated the block while we were
481 * not holding the cache lock.
482 * Leave the recycled block on the
483 * freelist and continue as if we
484 * found the block of interest during
485 * the first try.
486 */
487 fibril_mutex_unlock(&b->lock);
488 goto found;
489 }
[02ee6bf5]490
491 }
492 fibril_mutex_unlock(&b->lock);
493
494 /*
495 * Unlink the block from the free list and the hash
496 * table.
497 */
498 list_remove(&b->free_link);
[062d900]499 hash_table_remove_item(&cache->block_hash, &b->hash_link);
[e1c88d5]500 }
[fc840d9]501
[e1c88d5]502 block_initialize(b);
[15f3c3f]503 b->service_id = service_id;
[1ee00b7]504 b->size = cache->lblock_size;
[a6ba0c9]505 b->lba = ba;
506 b->pba = ba_ltop(devcon, b->lba);
[062d900]507 hash_table_insert(&cache->block_hash, &b->hash_link);
[a6d97fb9]508
509 /*
510 * Lock the block before releasing the cache lock. Thus we don't
[5ac8918]511 * kill concurrent operations on the cache while doing I/O on
512 * the block.
[a6d97fb9]513 */
[4e1b57d]514 fibril_mutex_lock(&b->lock);
515 fibril_mutex_unlock(&cache->lock);
[a6d97fb9]516
[1d8cdb1]517 if (!(flags & BLOCK_FLAGS_NOREAD)) {
518 /*
519 * The block contains old or no data. We need to read
520 * the new contents from the device.
521 */
[4802dd7]522 rc = read_blocks(devcon, b->pba, cache->blocks_cluster,
523 b->data, cache->lblock_size);
[402a18f]524 if (rc != EOK)
525 b->toxic = true;
526 } else
527 rc = EOK;
[fc840d9]528
[4e1b57d]529 fibril_mutex_unlock(&b->lock);
[a6d97fb9]530 }
[7a56b1ed]531out:
[4f690cd]532 if ((rc != EOK) && b) {
533 assert(b->toxic);
534 (void) block_put(b);
535 b = NULL;
536 }
[c91f2d1b]537 *block = b;
[402a18f]538 return rc;
[fc840d9]539}
540
[d5a720cf]541/** Release a reference to a block.
542 *
[a6d97fb9]543 * If the last reference is dropped, the block is put on the free list.
[d5a720cf]544 *
545 * @param block Block of which a reference is to be released.
[c91f2d1b]546 *
547 * @return EOK on success or a negative error code.
[d5a720cf]548 */
[c91f2d1b]549int block_put(block_t *block)
[fc840d9]550{
[15f3c3f]551 devcon_t *devcon = devcon_search(block->service_id);
[d5a720cf]552 cache_t *cache;
[ddfc39a3]553 unsigned blocks_cached;
554 enum cache_mode mode;
[402a18f]555 int rc = EOK;
[d5a720cf]556
557 assert(devcon);
558 assert(devcon->cache);
[0f1cf7a]559 assert(block->refcnt >= 1);
[d5a720cf]560
561 cache = devcon->cache;
[ddfc39a3]562
563retry:
564 fibril_mutex_lock(&cache->lock);
565 blocks_cached = cache->blocks_cached;
566 mode = cache->mode;
567 fibril_mutex_unlock(&cache->lock);
568
569 /*
570 * Determine whether to sync the block. Syncing the block is best done
571 * when not holding the cache lock as it does not impede concurrency.
572 * Since the situation may have changed when we unlocked the cache, the
573 * blocks_cached and mode variables are mere hints. We will recheck the
574 * conditions later when the cache lock is held again.
575 */
576 fibril_mutex_lock(&block->lock);
[402a18f]577 if (block->toxic)
578 block->dirty = false; /* will not write back toxic block */
[ddfc39a3]579 if (block->dirty && (block->refcnt == 1) &&
580 (blocks_cached > CACHE_HI_WATERMARK || mode != CACHE_MODE_WB)) {
[4802dd7]581 rc = write_blocks(devcon, block->pba, cache->blocks_cluster,
582 block->data, block->size);
[ddfc39a3]583 block->dirty = false;
584 }
585 fibril_mutex_unlock(&block->lock);
586
[4e1b57d]587 fibril_mutex_lock(&cache->lock);
588 fibril_mutex_lock(&block->lock);
[d5a720cf]589 if (!--block->refcnt) {
590 /*
[d68e4d5]591 * Last reference to the block was dropped. Either free the
[402a18f]592 * block or put it on the free list. In case of an I/O error,
593 * free the block.
[d68e4d5]594 */
[402a18f]595 if ((cache->blocks_cached > CACHE_HI_WATERMARK) ||
596 (rc != EOK)) {
[d68e4d5]597 /*
[402a18f]598 * Currently there are too many cached blocks or there
599 * was an I/O error when writing the block back to the
600 * device.
[d68e4d5]601 */
602 if (block->dirty) {
[ddfc39a3]603 /*
604 * We cannot sync the block while holding the
605 * cache lock. Release everything and retry.
606 */
607 block->refcnt++;
608 fibril_mutex_unlock(&block->lock);
609 fibril_mutex_unlock(&cache->lock);
610 goto retry;
[d68e4d5]611 }
612 /*
613 * Take the block out of the cache and free it.
614 */
[062d900]615 hash_table_remove_item(&cache->block_hash, &block->hash_link);
[956d4df8]616 fibril_mutex_unlock(&block->lock);
[d68e4d5]617 free(block->data);
[b9e6205]618 free(block);
[d68e4d5]619 cache->blocks_cached--;
620 fibril_mutex_unlock(&cache->lock);
[402a18f]621 return rc;
[d68e4d5]622 }
623 /*
624 * Put the block on the free list.
[d5a720cf]625 */
[1fbe064b]626 if (cache->mode != CACHE_MODE_WB && block->dirty) {
[ddfc39a3]627 /*
628 * We cannot sync the block while holding the cache
629 * lock. Release everything and retry.
630 */
631 block->refcnt++;
632 fibril_mutex_unlock(&block->lock);
633 fibril_mutex_unlock(&cache->lock);
634 goto retry;
[1fbe064b]635 }
[b72efe8]636 list_append(&block->free_link, &cache->free_list);
[d5a720cf]637 }
[4e1b57d]638 fibril_mutex_unlock(&block->lock);
639 fibril_mutex_unlock(&cache->lock);
[c91f2d1b]640
[402a18f]641 return rc;
[d5a720cf]642}
643
[6408be3]644/** Read sequential data from a block device.
[d5a720cf]645 *
[15f3c3f]646 * @param service_id Service ID of the block device.
[4802dd7]647 * @param buf Buffer for holding one block
[d5a720cf]648 * @param bufpos Pointer to the first unread valid offset within the
649 * communication buffer.
650 * @param buflen Pointer to the number of unread bytes that are ready in
651 * the communication buffer.
652 * @param pos Device position to be read.
653 * @param dst Destination buffer.
654 * @param size Size of the destination buffer.
655 * @param block_size Block size to be used for the transfer.
656 *
657 * @return EOK on success or a negative return code on failure.
658 */
[4802dd7]659int block_seqread(service_id_t service_id, void *buf, size_t *bufpos,
660 size_t *buflen, aoff64_t *pos, void *dst, size_t size)
[d5a720cf]661{
[ed903174]662 size_t offset = 0;
[d5a720cf]663 size_t left = size;
[1ee00b7]664 size_t block_size;
665 devcon_t *devcon;
666
[15f3c3f]667 devcon = devcon_search(service_id);
[d5a720cf]668 assert(devcon);
[1ee00b7]669 block_size = devcon->pblock_size;
[e1c88d5]670
[d5a720cf]671 while (left > 0) {
672 size_t rd;
673
674 if (*bufpos + left < *buflen)
675 rd = left;
676 else
677 rd = *buflen - *bufpos;
678
679 if (rd > 0) {
680 /*
681 * Copy the contents of the communication buffer to the
682 * destination buffer.
683 */
[4802dd7]684 memcpy(dst + offset, buf + *bufpos, rd);
[d5a720cf]685 offset += rd;
686 *bufpos += rd;
687 *pos += rd;
688 left -= rd;
689 }
690
[ed903174]691 if (*bufpos == *buflen) {
[d5a720cf]692 /* Refill the communication buffer with a new block. */
[6408be3]693 int rc;
694
[4802dd7]695 rc = read_blocks(devcon, *pos / block_size, 1, buf,
696 devcon->pblock_size);
[d68e4d5]697 if (rc != EOK) {
[6408be3]698 return rc;
[d68e4d5]699 }
[d5a720cf]700
701 *bufpos = 0;
702 *buflen = block_size;
703 }
704 }
705
706 return EOK;
[fc840d9]707}
708
[00b1d20e]709/** Read blocks directly from device (bypass cache).
710 *
[15f3c3f]711 * @param service_id Service ID of the block device.
[a6ba0c9]712 * @param ba Address of first block (physical).
[00b1d20e]713 * @param cnt Number of blocks.
714 * @param src Buffer for storing the data.
715 *
716 * @return EOK on success or negative error code on failure.
717 */
[15f3c3f]718int block_read_direct(service_id_t service_id, aoff64_t ba, size_t cnt, void *buf)
[00b1d20e]719{
720 devcon_t *devcon;
721
[15f3c3f]722 devcon = devcon_search(service_id);
[00b1d20e]723 assert(devcon);
724
[4802dd7]725 return read_blocks(devcon, ba, cnt, buf, devcon->pblock_size * cnt);
[00b1d20e]726}
727
728/** Write blocks directly to device (bypass cache).
729 *
[15f3c3f]730 * @param service_id Service ID of the block device.
[a6ba0c9]731 * @param ba Address of first block (physical).
[00b1d20e]732 * @param cnt Number of blocks.
733 * @param src The data to be written.
734 *
735 * @return EOK on success or negative error code on failure.
736 */
[15f3c3f]737int block_write_direct(service_id_t service_id, aoff64_t ba, size_t cnt,
[00b1d20e]738 const void *data)
739{
740 devcon_t *devcon;
741
[15f3c3f]742 devcon = devcon_search(service_id);
[00b1d20e]743 assert(devcon);
744
[4802dd7]745 return write_blocks(devcon, ba, cnt, (void *)data, devcon->pblock_size * cnt);
[00b1d20e]746}
747
748/** Get device block size.
749 *
[15f3c3f]750 * @param service_id Service ID of the block device.
[00b1d20e]751 * @param bsize Output block size.
752 *
753 * @return EOK on success or negative error code on failure.
754 */
[15f3c3f]755int block_get_bsize(service_id_t service_id, size_t *bsize)
[00b1d20e]756{
757 devcon_t *devcon;
758
[15f3c3f]759 devcon = devcon_search(service_id);
[00b1d20e]760 assert(devcon);
[4802dd7]761
762 return bd_get_block_size(devcon->bd, bsize);
[00b1d20e]763}
764
[08232ee]765/** Get number of blocks on device.
766 *
[15f3c3f]767 * @param service_id Service ID of the block device.
[08232ee]768 * @param nblocks Output number of blocks.
769 *
770 * @return EOK on success or negative error code on failure.
771 */
[15f3c3f]772int block_get_nblocks(service_id_t service_id, aoff64_t *nblocks)
[08232ee]773{
[15f3c3f]774 devcon_t *devcon = devcon_search(service_id);
[08232ee]775 assert(devcon);
776
[4802dd7]777 return bd_get_num_blocks(devcon->bd, nblocks);
[08232ee]778}
779
[e272949]780/** Read bytes directly from the device (bypass cache)
781 *
[15f3c3f]782 * @param service_id Service ID of the block device.
[e272949]783 * @param abs_offset Absolute offset in bytes where to start reading
784 * @param bytes Number of bytes to read
785 * @param data Buffer that receives the data
786 *
787 * @return EOK on success or negative error code on failure.
788 */
[15f3c3f]789int block_read_bytes_direct(service_id_t service_id, aoff64_t abs_offset,
[e272949]790 size_t bytes, void *data)
791{
792 int rc;
793 size_t phys_block_size;
794 size_t buf_size;
795 void *buffer;
796 aoff64_t first_block;
797 aoff64_t last_block;
798 size_t blocks;
799 size_t offset;
800
[15f3c3f]801 rc = block_get_bsize(service_id, &phys_block_size);
[e272949]802 if (rc != EOK) {
803 return rc;
804 }
805
[c4aa9cf]806 /* calculate data position and required space */
[e272949]807 first_block = abs_offset / phys_block_size;
808 offset = abs_offset % phys_block_size;
809 last_block = (abs_offset + bytes - 1) / phys_block_size;
810 blocks = last_block - first_block + 1;
811 buf_size = blocks * phys_block_size;
812
[c4aa9cf]813 /* read the data into memory */
[e272949]814 buffer = malloc(buf_size);
815 if (buffer == NULL) {
816 return ENOMEM;
817 }
818
[15f3c3f]819 rc = block_read_direct(service_id, first_block, blocks, buffer);
[e272949]820 if (rc != EOK) {
821 free(buffer);
822 return rc;
823 }
824
[c4aa9cf]825 /* copy the data from the buffer */
[e272949]826 memcpy(data, buffer + offset, bytes);
827 free(buffer);
[f73b291]828
[e272949]829 return EOK;
830}
831
[4046b2f4]832/** Get TOC from device.
833 *
834 * @param service_id Service ID of the block device.
835 * @param session Starting session.
836 *
[08cba4b]837 * @return Allocated TOC structure.
838 * @return NULL on failure.
[4046b2f4]839 *
840 */
[08cba4b]841toc_block_t *block_get_toc(service_id_t service_id, uint8_t session)
[4046b2f4]842{
843 devcon_t *devcon = devcon_search(service_id);
[08cba4b]844 toc_block_t *toc = NULL;
[4802dd7]845 int rc;
[08cba4b]846
[4802dd7]847 assert(devcon);
[4046b2f4]848
[4802dd7]849 toc = (toc_block_t *) malloc(sizeof(toc_block_t));
850 if (toc == NULL)
851 return NULL;
[08cba4b]852
[4802dd7]853 rc = bd_read_toc(devcon->bd, session, toc, sizeof(toc_block_t));
854 if (rc != EOK) {
855 free(toc);
856 return NULL;
[08cba4b]857 }
858
859 return toc;
[4046b2f4]860}
861
[1ee00b7]862/** Read blocks from block device.
[6408be3]863 *
864 * @param devcon Device connection.
[1ee00b7]865 * @param ba Address of first block.
866 * @param cnt Number of blocks.
[6408be3]867 * @param src Buffer for storing the data.
868 *
869 * @return EOK on success or negative error code on failure.
870 */
[4802dd7]871static int read_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt, void *buf,
872 size_t size)
[6408be3]873{
874 assert(devcon);
[79ae36dd]875
[4802dd7]876 int rc = bd_read_blocks(devcon->bd, ba, cnt, buf, size);
[16fc3c9]877 if (rc != EOK) {
[7e752b2]878 printf("Error %d reading %zu blocks starting at block %" PRIuOFF64
879 " from device handle %" PRIun "\n", rc, cnt, ba,
[15f3c3f]880 devcon->service_id);
[16fc3c9]881#ifndef NDEBUG
882 stacktrace_print();
883#endif
884 }
[79ae36dd]885
[1ee00b7]886 return rc;
[6408be3]887}
888
[1fbe064b]889/** Write block to block device.
890 *
891 * @param devcon Device connection.
[1ee00b7]892 * @param ba Address of first block.
893 * @param cnt Number of blocks.
[1fbe064b]894 * @param src Buffer containing the data to write.
895 *
896 * @return EOK on success or negative error code on failure.
897 */
[4802dd7]898static int write_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt, void *data,
899 size_t size)
[1fbe064b]900{
901 assert(devcon);
[79ae36dd]902
[4802dd7]903 int rc = bd_write_blocks(devcon->bd, ba, cnt, data, size);
[16fc3c9]904 if (rc != EOK) {
[7e752b2]905 printf("Error %d writing %zu blocks starting at block %" PRIuOFF64
[15f3c3f]906 " to device handle %" PRIun "\n", rc, cnt, ba, devcon->service_id);
[16fc3c9]907#ifndef NDEBUG
908 stacktrace_print();
909#endif
910 }
[79ae36dd]911
[1ee00b7]912 return rc;
913}
[1fbe064b]914
[f092718]915/** Convert logical block address to physical block address. */
916static aoff64_t ba_ltop(devcon_t *devcon, aoff64_t lba)
917{
918 assert(devcon->cache != NULL);
919 return lba * devcon->cache->blocks_cluster;
920}
921
[fc840d9]922/** @}
923 */
Note: See TracBrowser for help on using the repository browser.