source: mainline/uspace/lib/block/block.c@ eeb5cc2

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since eeb5cc2 was feeac0d, checked in by Jiri Svoboda <jiri@…>, 12 years ago

Simplify use of list_foreach.

  • Property mode set to 100644
File size: 22.0 KB
RevLine 
[fc840d9]1/*
[ed903174]2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2008 Martin Decky
[e272949]4 * Copyright (c) 2011 Martin Sucha
[fc840d9]5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
[97c9da8]31/** @addtogroup libblock
[fc840d9]32 * @{
[97c9da8]33 */
[fc840d9]34/**
35 * @file
36 * @brief
37 */
38
39#include "../../srv/vfs/vfs.h"
[15f3c3f]40#include <ipc/loc.h>
[7858bc5f]41#include <ipc/services.h>
[fc840d9]42#include <errno.h>
[7858bc5f]43#include <sys/mman.h>
[fc840d9]44#include <async.h>
45#include <as.h>
46#include <assert.h>
[4802dd7]47#include <bd.h>
[1e4cada]48#include <fibril_synch.h>
[d9c8c81]49#include <adt/list.h>
50#include <adt/hash_table.h>
[1ee00b7]51#include <macros.h>
[d00ae4c]52#include <mem.h>
[c7bbf029]53#include <malloc.h>
54#include <stdio.h>
[16fc3c9]55#include <sys/typefmt.h>
56#include <stacktrace.h>
[f73b291]57#include "block.h"
[fc840d9]58
[916bf1a]59/** Lock protecting the device connection list */
[4e1b57d]60static FIBRIL_MUTEX_INITIALIZE(dcl_lock);
[916bf1a]61/** Device connection list head. */
[b72efe8]62static LIST_INITIALIZE(dcl);
[916bf1a]63
[f1ba5d6]64
65typedef struct {
[4e1b57d]66 fibril_mutex_t lock;
[79ae36dd]67 size_t lblock_size; /**< Logical block size. */
68 unsigned blocks_cluster; /**< Physical blocks per block_t */
69 unsigned block_count; /**< Total number of blocks. */
70 unsigned blocks_cached; /**< Number of cached blocks. */
[f1ba5d6]71 hash_table_t block_hash;
[b72efe8]72 list_t free_list;
[1fbe064b]73 enum cache_mode mode;
[f1ba5d6]74} cache_t;
75
[916bf1a]76typedef struct {
77 link_t link;
[15f3c3f]78 service_id_t service_id;
[79ae36dd]79 async_sess_t *sess;
[4802dd7]80 bd_t *bd;
[916bf1a]81 void *bb_buf;
[ed903174]82 aoff64_t bb_addr;
[79ae36dd]83 size_t pblock_size; /**< Physical block size. */
[f1ba5d6]84 cache_t *cache;
[916bf1a]85} devcon_t;
86
[4802dd7]87static int read_blocks(devcon_t *, aoff64_t, size_t, void *, size_t);
88static int write_blocks(devcon_t *, aoff64_t, size_t, void *, size_t);
[79ae36dd]89static aoff64_t ba_ltop(devcon_t *, aoff64_t);
[1fbe064b]90
[15f3c3f]91static devcon_t *devcon_search(service_id_t service_id)
[916bf1a]92{
[4e1b57d]93 fibril_mutex_lock(&dcl_lock);
[79ae36dd]94
[feeac0d]95 list_foreach(dcl, link, devcon_t, devcon) {
[15f3c3f]96 if (devcon->service_id == service_id) {
[4e1b57d]97 fibril_mutex_unlock(&dcl_lock);
[916bf1a]98 return devcon;
99 }
100 }
[79ae36dd]101
[4e1b57d]102 fibril_mutex_unlock(&dcl_lock);
[916bf1a]103 return NULL;
104}
105
[15f3c3f]106static int devcon_add(service_id_t service_id, async_sess_t *sess,
[4802dd7]107 size_t bsize, bd_t *bd)
[916bf1a]108{
109 devcon_t *devcon;
[79ae36dd]110
[916bf1a]111 devcon = malloc(sizeof(devcon_t));
112 if (!devcon)
113 return ENOMEM;
114
115 link_initialize(&devcon->link);
[15f3c3f]116 devcon->service_id = service_id;
[79ae36dd]117 devcon->sess = sess;
[4802dd7]118 devcon->bd = bd;
[6284978]119 devcon->bb_buf = NULL;
[1ee00b7]120 devcon->bb_addr = 0;
121 devcon->pblock_size = bsize;
[f1ba5d6]122 devcon->cache = NULL;
[79ae36dd]123
[4e1b57d]124 fibril_mutex_lock(&dcl_lock);
[feeac0d]125 list_foreach(dcl, link, devcon_t, d) {
[15f3c3f]126 if (d->service_id == service_id) {
[4e1b57d]127 fibril_mutex_unlock(&dcl_lock);
[916bf1a]128 free(devcon);
129 return EEXIST;
130 }
131 }
[b72efe8]132 list_append(&devcon->link, &dcl);
[4e1b57d]133 fibril_mutex_unlock(&dcl_lock);
[916bf1a]134 return EOK;
135}
136
137static void devcon_remove(devcon_t *devcon)
138{
[4e1b57d]139 fibril_mutex_lock(&dcl_lock);
[916bf1a]140 list_remove(&devcon->link);
[4e1b57d]141 fibril_mutex_unlock(&dcl_lock);
[916bf1a]142}
[7858bc5f]143
[15f3c3f]144int block_init(exch_mgmt_t mgmt, service_id_t service_id,
[79ae36dd]145 size_t comm_size)
[7858bc5f]146{
[4802dd7]147 bd_t *bd;
148
[15f3c3f]149 async_sess_t *sess = loc_service_connect(mgmt, service_id,
[79ae36dd]150 IPC_FLAG_BLOCKING);
151 if (!sess) {
152 return ENOENT;
[7858bc5f]153 }
[79ae36dd]154
[4802dd7]155 int rc = bd_open(sess, &bd);
[7858bc5f]156 if (rc != EOK) {
[79ae36dd]157 async_hangup(sess);
[7858bc5f]158 return rc;
159 }
[79ae36dd]160
161 size_t bsize;
[4802dd7]162 rc = bd_get_block_size(bd, &bsize);
[79ae36dd]163 if (rc != EOK) {
[4802dd7]164 bd_close(bd);
[79ae36dd]165 async_hangup(sess);
[1ee00b7]166 return rc;
167 }
[916bf1a]168
[4802dd7]169 rc = devcon_add(service_id, sess, bsize, bd);
[916bf1a]170 if (rc != EOK) {
[4802dd7]171 bd_close(bd);
[79ae36dd]172 async_hangup(sess);
[916bf1a]173 return rc;
174 }
[79ae36dd]175
[7858bc5f]176 return EOK;
177}
178
[15f3c3f]179void block_fini(service_id_t service_id)
[7858bc5f]180{
[15f3c3f]181 devcon_t *devcon = devcon_search(service_id);
[916bf1a]182 assert(devcon);
183
[64bc4b6]184 if (devcon->cache)
[15f3c3f]185 (void) block_cache_fini(service_id);
[79ae36dd]186
[916bf1a]187 devcon_remove(devcon);
[79ae36dd]188
[6284978]189 if (devcon->bb_buf)
190 free(devcon->bb_buf);
[79ae36dd]191
[4802dd7]192 bd_close(devcon->bd);
[79ae36dd]193 async_hangup(devcon->sess);
194
195 free(devcon);
[7858bc5f]196}
197
[15f3c3f]198int block_bb_read(service_id_t service_id, aoff64_t ba)
[6284978]199{
200 void *bb_buf;
[0c243b4]201 int rc;
[6284978]202
[15f3c3f]203 devcon_t *devcon = devcon_search(service_id);
[6284978]204 if (!devcon)
205 return ENOENT;
206 if (devcon->bb_buf)
207 return EEXIST;
[1ee00b7]208 bb_buf = malloc(devcon->pblock_size);
[6284978]209 if (!bb_buf)
210 return ENOMEM;
[1ee00b7]211
[4802dd7]212 rc = read_blocks(devcon, 0, 1, bb_buf, devcon->pblock_size);
[0c243b4]213 if (rc != EOK) {
[6284978]214 free(bb_buf);
[0c243b4]215 return rc;
[6284978]216 }
[6408be3]217
[6284978]218 devcon->bb_buf = bb_buf;
[1ee00b7]219 devcon->bb_addr = ba;
[6284978]220
221 return EOK;
222}
223
[15f3c3f]224void *block_bb_get(service_id_t service_id)
[7858bc5f]225{
[15f3c3f]226 devcon_t *devcon = devcon_search(service_id);
[916bf1a]227 assert(devcon);
228 return devcon->bb_buf;
[7858bc5f]229}
230
[062d900]231static size_t cache_key_hash(void *key)
[f1ba5d6]232{
[062d900]233 aoff64_t *lba = (aoff64_t*)key;
234 return *lba;
[f1ba5d6]235}
236
[062d900]237static size_t cache_hash(const ht_link_t *item)
[f1ba5d6]238{
[062d900]239 block_t *b = hash_table_get_inst(item, block_t, hash_link);
240 return b->lba;
[f1ba5d6]241}
242
[062d900]243static bool cache_key_equal(void *key, const ht_link_t *item)
[f1ba5d6]244{
[062d900]245 aoff64_t *lba = (aoff64_t*)key;
246 block_t *b = hash_table_get_inst(item, block_t, hash_link);
247 return b->lba == *lba;
[f1ba5d6]248}
249
[062d900]250
251static hash_table_ops_t cache_ops = {
[f1ba5d6]252 .hash = cache_hash,
[062d900]253 .key_hash = cache_key_hash,
254 .key_equal = cache_key_equal,
[4e00f87]255 .equal = NULL,
256 .remove_callback = NULL
[f1ba5d6]257};
258
[15f3c3f]259int block_cache_init(service_id_t service_id, size_t size, unsigned blocks,
[1fbe064b]260 enum cache_mode mode)
[f1ba5d6]261{
[15f3c3f]262 devcon_t *devcon = devcon_search(service_id);
[f1ba5d6]263 cache_t *cache;
264 if (!devcon)
265 return ENOENT;
266 if (devcon->cache)
267 return EEXIST;
268 cache = malloc(sizeof(cache_t));
269 if (!cache)
270 return ENOMEM;
271
[4e1b57d]272 fibril_mutex_initialize(&cache->lock);
[b72efe8]273 list_initialize(&cache->free_list);
[1ee00b7]274 cache->lblock_size = size;
[f1ba5d6]275 cache->block_count = blocks;
[d68e4d5]276 cache->blocks_cached = 0;
[1fbe064b]277 cache->mode = mode;
[f1ba5d6]278
[f092718]279 /* Allow 1:1 or small-to-large block size translation */
[37cf3792]280 if (cache->lblock_size % devcon->pblock_size != 0) {
281 free(cache);
[f092718]282 return ENOTSUP;
[37cf3792]283 }
[f092718]284
285 cache->blocks_cluster = cache->lblock_size / devcon->pblock_size;
[1ee00b7]286
[062d900]287 if (!hash_table_create(&cache->block_hash, 0, 0, &cache_ops)) {
[f1ba5d6]288 free(cache);
289 return ENOMEM;
290 }
291
292 devcon->cache = cache;
293 return EOK;
294}
295
[15f3c3f]296int block_cache_fini(service_id_t service_id)
[64bc4b6]297{
[15f3c3f]298 devcon_t *devcon = devcon_search(service_id);
[64bc4b6]299 cache_t *cache;
300 int rc;
301
302 if (!devcon)
303 return ENOENT;
304 if (!devcon->cache)
305 return EOK;
306 cache = devcon->cache;
307
308 /*
309 * We are expecting to find all blocks for this device handle on the
310 * free list, i.e. the block reference count should be zero. Do not
311 * bother with the cache and block locks because we are single-threaded.
312 */
[b72efe8]313 while (!list_empty(&cache->free_list)) {
314 block_t *b = list_get_instance(list_first(&cache->free_list),
[64bc4b6]315 block_t, free_link);
316
317 list_remove(&b->free_link);
318 if (b->dirty) {
[4802dd7]319 rc = write_blocks(devcon, b->pba, cache->blocks_cluster,
320 b->data, b->size);
[64bc4b6]321 if (rc != EOK)
322 return rc;
323 }
324
[062d900]325 hash_table_remove_item(&cache->block_hash, &b->hash_link);
[64bc4b6]326
327 free(b->data);
328 free(b);
329 }
330
331 hash_table_destroy(&cache->block_hash);
332 devcon->cache = NULL;
333 free(cache);
334
335 return EOK;
336}
337
[d68e4d5]338#define CACHE_LO_WATERMARK 10
339#define CACHE_HI_WATERMARK 20
[e1c88d5]340static bool cache_can_grow(cache_t *cache)
[fc840d9]341{
[d68e4d5]342 if (cache->blocks_cached < CACHE_LO_WATERMARK)
343 return true;
[b72efe8]344 if (!list_empty(&cache->free_list))
[d68e4d5]345 return false;
[e1c88d5]346 return true;
347}
348
349static void block_initialize(block_t *b)
350{
[4e1b57d]351 fibril_mutex_initialize(&b->lock);
[e1c88d5]352 b->refcnt = 1;
353 b->dirty = false;
[cd688d9]354 b->toxic = false;
[4e1b57d]355 fibril_rwlock_initialize(&b->contents_lock);
[e1c88d5]356 link_initialize(&b->free_link);
357}
358
359/** Instantiate a block in memory and get a reference to it.
360 *
[c91f2d1b]361 * @param block Pointer to where the function will store the
362 * block pointer on success.
[15f3c3f]363 * @param service_id Service ID of the block device.
[a6ba0c9]364 * @param ba Block address (logical).
[1d8cdb1]365 * @param flags If BLOCK_FLAGS_NOREAD is specified, block_get()
366 * will not read the contents of the block from the
367 * device.
[e1c88d5]368 *
[c91f2d1b]369 * @return EOK on success or a negative error code.
[e1c88d5]370 */
[15f3c3f]371int block_get(block_t **block, service_id_t service_id, aoff64_t ba, int flags)
[e1c88d5]372{
373 devcon_t *devcon;
374 cache_t *cache;
[fc840d9]375 block_t *b;
[062d900]376 link_t *link;
[867e2555]377
[b7b3fda]378 int rc;
[e1c88d5]379
[15f3c3f]380 devcon = devcon_search(service_id);
[fc840d9]381
[e1c88d5]382 assert(devcon);
383 assert(devcon->cache);
[fc840d9]384
[e1c88d5]385 cache = devcon->cache;
[02ee6bf5]386
387retry:
[b7b3fda]388 rc = EOK;
[4f690cd]389 b = NULL;
[b7b3fda]390
[4e1b57d]391 fibril_mutex_lock(&cache->lock);
[062d900]392 ht_link_t *hlink = hash_table_find(&cache->block_hash, &ba);
393 if (hlink) {
[5716e9a]394found:
[e1c88d5]395 /*
396 * We found the block in the cache.
397 */
[062d900]398 b = hash_table_get_inst(hlink, block_t, hash_link);
[4e1b57d]399 fibril_mutex_lock(&b->lock);
[e1c88d5]400 if (b->refcnt++ == 0)
401 list_remove(&b->free_link);
[402a18f]402 if (b->toxic)
403 rc = EIO;
[4e1b57d]404 fibril_mutex_unlock(&b->lock);
405 fibril_mutex_unlock(&cache->lock);
[e1c88d5]406 } else {
407 /*
408 * The block was not found in the cache.
409 */
410 if (cache_can_grow(cache)) {
411 /*
412 * We can grow the cache by allocating new blocks.
413 * Should the allocation fail, we fail over and try to
414 * recycle a block from the cache.
415 */
416 b = malloc(sizeof(block_t));
417 if (!b)
418 goto recycle;
[1ee00b7]419 b->data = malloc(cache->lblock_size);
[e1c88d5]420 if (!b->data) {
421 free(b);
[0dfaa099]422 b = NULL;
[e1c88d5]423 goto recycle;
424 }
[d68e4d5]425 cache->blocks_cached++;
[e1c88d5]426 } else {
427 /*
428 * Try to recycle a block from the free list.
429 */
430recycle:
[b72efe8]431 if (list_empty(&cache->free_list)) {
[7a56b1ed]432 fibril_mutex_unlock(&cache->lock);
433 rc = ENOMEM;
434 goto out;
435 }
[062d900]436 link = list_first(&cache->free_list);
437 b = list_get_instance(link, block_t, free_link);
[02ee6bf5]438
439 fibril_mutex_lock(&b->lock);
440 if (b->dirty) {
441 /*
442 * The block needs to be written back to the
443 * device before it changes identity. Do this
444 * while not holding the cache lock so that
445 * concurrency is not impeded. Also move the
446 * block to the end of the free list so that we
447 * do not slow down other instances of
448 * block_get() draining the free list.
449 */
450 list_remove(&b->free_link);
[b72efe8]451 list_append(&b->free_link, &cache->free_list);
[02ee6bf5]452 fibril_mutex_unlock(&cache->lock);
[f092718]453 rc = write_blocks(devcon, b->pba,
[4802dd7]454 cache->blocks_cluster, b->data, b->size);
[402a18f]455 if (rc != EOK) {
456 /*
457 * We did not manage to write the block
458 * to the device. Keep it around for
459 * another try. Hopefully, we will grab
460 * another block next time.
461 */
462 fibril_mutex_unlock(&b->lock);
463 goto retry;
464 }
[02ee6bf5]465 b->dirty = false;
466 if (!fibril_mutex_trylock(&cache->lock)) {
467 /*
468 * Somebody is probably racing with us.
469 * Unlock the block and retry.
470 */
471 fibril_mutex_unlock(&b->lock);
472 goto retry;
473 }
[062d900]474 hlink = hash_table_find(&cache->block_hash, &ba);
475 if (hlink) {
[5716e9a]476 /*
477 * Someone else must have already
478 * instantiated the block while we were
479 * not holding the cache lock.
480 * Leave the recycled block on the
481 * freelist and continue as if we
482 * found the block of interest during
483 * the first try.
484 */
485 fibril_mutex_unlock(&b->lock);
486 goto found;
487 }
[02ee6bf5]488
489 }
490 fibril_mutex_unlock(&b->lock);
491
492 /*
493 * Unlink the block from the free list and the hash
494 * table.
495 */
496 list_remove(&b->free_link);
[062d900]497 hash_table_remove_item(&cache->block_hash, &b->hash_link);
[e1c88d5]498 }
[fc840d9]499
[e1c88d5]500 block_initialize(b);
[15f3c3f]501 b->service_id = service_id;
[1ee00b7]502 b->size = cache->lblock_size;
[a6ba0c9]503 b->lba = ba;
504 b->pba = ba_ltop(devcon, b->lba);
[062d900]505 hash_table_insert(&cache->block_hash, &b->hash_link);
[a6d97fb9]506
507 /*
508 * Lock the block before releasing the cache lock. Thus we don't
[5ac8918]509 * kill concurrent operations on the cache while doing I/O on
510 * the block.
[a6d97fb9]511 */
[4e1b57d]512 fibril_mutex_lock(&b->lock);
513 fibril_mutex_unlock(&cache->lock);
[a6d97fb9]514
[1d8cdb1]515 if (!(flags & BLOCK_FLAGS_NOREAD)) {
516 /*
517 * The block contains old or no data. We need to read
518 * the new contents from the device.
519 */
[4802dd7]520 rc = read_blocks(devcon, b->pba, cache->blocks_cluster,
521 b->data, cache->lblock_size);
[402a18f]522 if (rc != EOK)
523 b->toxic = true;
524 } else
525 rc = EOK;
[fc840d9]526
[4e1b57d]527 fibril_mutex_unlock(&b->lock);
[a6d97fb9]528 }
[7a56b1ed]529out:
[4f690cd]530 if ((rc != EOK) && b) {
531 assert(b->toxic);
532 (void) block_put(b);
533 b = NULL;
534 }
[c91f2d1b]535 *block = b;
[402a18f]536 return rc;
[fc840d9]537}
538
[d5a720cf]539/** Release a reference to a block.
540 *
[a6d97fb9]541 * If the last reference is dropped, the block is put on the free list.
[d5a720cf]542 *
543 * @param block Block of which a reference is to be released.
[c91f2d1b]544 *
545 * @return EOK on success or a negative error code.
[d5a720cf]546 */
[c91f2d1b]547int block_put(block_t *block)
[fc840d9]548{
[15f3c3f]549 devcon_t *devcon = devcon_search(block->service_id);
[d5a720cf]550 cache_t *cache;
[ddfc39a3]551 unsigned blocks_cached;
552 enum cache_mode mode;
[402a18f]553 int rc = EOK;
[d5a720cf]554
555 assert(devcon);
556 assert(devcon->cache);
[0f1cf7a]557 assert(block->refcnt >= 1);
[d5a720cf]558
559 cache = devcon->cache;
[ddfc39a3]560
561retry:
562 fibril_mutex_lock(&cache->lock);
563 blocks_cached = cache->blocks_cached;
564 mode = cache->mode;
565 fibril_mutex_unlock(&cache->lock);
566
567 /*
568 * Determine whether to sync the block. Syncing the block is best done
569 * when not holding the cache lock as it does not impede concurrency.
570 * Since the situation may have changed when we unlocked the cache, the
571 * blocks_cached and mode variables are mere hints. We will recheck the
572 * conditions later when the cache lock is held again.
573 */
574 fibril_mutex_lock(&block->lock);
[402a18f]575 if (block->toxic)
576 block->dirty = false; /* will not write back toxic block */
[ddfc39a3]577 if (block->dirty && (block->refcnt == 1) &&
578 (blocks_cached > CACHE_HI_WATERMARK || mode != CACHE_MODE_WB)) {
[4802dd7]579 rc = write_blocks(devcon, block->pba, cache->blocks_cluster,
580 block->data, block->size);
[ddfc39a3]581 block->dirty = false;
582 }
583 fibril_mutex_unlock(&block->lock);
584
[4e1b57d]585 fibril_mutex_lock(&cache->lock);
586 fibril_mutex_lock(&block->lock);
[d5a720cf]587 if (!--block->refcnt) {
588 /*
[d68e4d5]589 * Last reference to the block was dropped. Either free the
[402a18f]590 * block or put it on the free list. In case of an I/O error,
591 * free the block.
[d68e4d5]592 */
[402a18f]593 if ((cache->blocks_cached > CACHE_HI_WATERMARK) ||
594 (rc != EOK)) {
[d68e4d5]595 /*
[402a18f]596 * Currently there are too many cached blocks or there
597 * was an I/O error when writing the block back to the
598 * device.
[d68e4d5]599 */
600 if (block->dirty) {
[ddfc39a3]601 /*
602 * We cannot sync the block while holding the
603 * cache lock. Release everything and retry.
604 */
605 block->refcnt++;
606 fibril_mutex_unlock(&block->lock);
607 fibril_mutex_unlock(&cache->lock);
608 goto retry;
[d68e4d5]609 }
610 /*
611 * Take the block out of the cache and free it.
612 */
[062d900]613 hash_table_remove_item(&cache->block_hash, &block->hash_link);
[956d4df8]614 fibril_mutex_unlock(&block->lock);
[d68e4d5]615 free(block->data);
[b9e6205]616 free(block);
[d68e4d5]617 cache->blocks_cached--;
618 fibril_mutex_unlock(&cache->lock);
[402a18f]619 return rc;
[d68e4d5]620 }
621 /*
622 * Put the block on the free list.
[d5a720cf]623 */
[1fbe064b]624 if (cache->mode != CACHE_MODE_WB && block->dirty) {
[ddfc39a3]625 /*
626 * We cannot sync the block while holding the cache
627 * lock. Release everything and retry.
628 */
629 block->refcnt++;
630 fibril_mutex_unlock(&block->lock);
631 fibril_mutex_unlock(&cache->lock);
632 goto retry;
[1fbe064b]633 }
[b72efe8]634 list_append(&block->free_link, &cache->free_list);
[d5a720cf]635 }
[4e1b57d]636 fibril_mutex_unlock(&block->lock);
637 fibril_mutex_unlock(&cache->lock);
[c91f2d1b]638
[402a18f]639 return rc;
[d5a720cf]640}
641
[6408be3]642/** Read sequential data from a block device.
[d5a720cf]643 *
[15f3c3f]644 * @param service_id Service ID of the block device.
[4802dd7]645 * @param buf Buffer for holding one block
[d5a720cf]646 * @param bufpos Pointer to the first unread valid offset within the
647 * communication buffer.
648 * @param buflen Pointer to the number of unread bytes that are ready in
649 * the communication buffer.
650 * @param pos Device position to be read.
651 * @param dst Destination buffer.
652 * @param size Size of the destination buffer.
653 * @param block_size Block size to be used for the transfer.
654 *
655 * @return EOK on success or a negative return code on failure.
656 */
[4802dd7]657int block_seqread(service_id_t service_id, void *buf, size_t *bufpos,
658 size_t *buflen, aoff64_t *pos, void *dst, size_t size)
[d5a720cf]659{
[ed903174]660 size_t offset = 0;
[d5a720cf]661 size_t left = size;
[1ee00b7]662 size_t block_size;
663 devcon_t *devcon;
664
[15f3c3f]665 devcon = devcon_search(service_id);
[d5a720cf]666 assert(devcon);
[1ee00b7]667 block_size = devcon->pblock_size;
[e1c88d5]668
[d5a720cf]669 while (left > 0) {
670 size_t rd;
671
672 if (*bufpos + left < *buflen)
673 rd = left;
674 else
675 rd = *buflen - *bufpos;
676
677 if (rd > 0) {
678 /*
679 * Copy the contents of the communication buffer to the
680 * destination buffer.
681 */
[4802dd7]682 memcpy(dst + offset, buf + *bufpos, rd);
[d5a720cf]683 offset += rd;
684 *bufpos += rd;
685 *pos += rd;
686 left -= rd;
687 }
688
[ed903174]689 if (*bufpos == *buflen) {
[d5a720cf]690 /* Refill the communication buffer with a new block. */
[6408be3]691 int rc;
692
[4802dd7]693 rc = read_blocks(devcon, *pos / block_size, 1, buf,
694 devcon->pblock_size);
[d68e4d5]695 if (rc != EOK) {
[6408be3]696 return rc;
[d68e4d5]697 }
[d5a720cf]698
699 *bufpos = 0;
700 *buflen = block_size;
701 }
702 }
703
704 return EOK;
[fc840d9]705}
706
[00b1d20e]707/** Read blocks directly from device (bypass cache).
708 *
[15f3c3f]709 * @param service_id Service ID of the block device.
[a6ba0c9]710 * @param ba Address of first block (physical).
[00b1d20e]711 * @param cnt Number of blocks.
712 * @param src Buffer for storing the data.
713 *
714 * @return EOK on success or negative error code on failure.
715 */
[15f3c3f]716int block_read_direct(service_id_t service_id, aoff64_t ba, size_t cnt, void *buf)
[00b1d20e]717{
718 devcon_t *devcon;
719
[15f3c3f]720 devcon = devcon_search(service_id);
[00b1d20e]721 assert(devcon);
722
[4802dd7]723 return read_blocks(devcon, ba, cnt, buf, devcon->pblock_size * cnt);
[00b1d20e]724}
725
726/** Write blocks directly to device (bypass cache).
727 *
[15f3c3f]728 * @param service_id Service ID of the block device.
[a6ba0c9]729 * @param ba Address of first block (physical).
[00b1d20e]730 * @param cnt Number of blocks.
731 * @param src The data to be written.
732 *
733 * @return EOK on success or negative error code on failure.
734 */
[15f3c3f]735int block_write_direct(service_id_t service_id, aoff64_t ba, size_t cnt,
[00b1d20e]736 const void *data)
737{
738 devcon_t *devcon;
739
[15f3c3f]740 devcon = devcon_search(service_id);
[00b1d20e]741 assert(devcon);
742
[4802dd7]743 return write_blocks(devcon, ba, cnt, (void *)data, devcon->pblock_size * cnt);
[00b1d20e]744}
745
746/** Get device block size.
747 *
[15f3c3f]748 * @param service_id Service ID of the block device.
[00b1d20e]749 * @param bsize Output block size.
750 *
751 * @return EOK on success or negative error code on failure.
752 */
[15f3c3f]753int block_get_bsize(service_id_t service_id, size_t *bsize)
[00b1d20e]754{
755 devcon_t *devcon;
756
[15f3c3f]757 devcon = devcon_search(service_id);
[00b1d20e]758 assert(devcon);
[4802dd7]759
760 return bd_get_block_size(devcon->bd, bsize);
[00b1d20e]761}
762
[08232ee]763/** Get number of blocks on device.
764 *
[15f3c3f]765 * @param service_id Service ID of the block device.
[08232ee]766 * @param nblocks Output number of blocks.
767 *
768 * @return EOK on success or negative error code on failure.
769 */
[15f3c3f]770int block_get_nblocks(service_id_t service_id, aoff64_t *nblocks)
[08232ee]771{
[15f3c3f]772 devcon_t *devcon = devcon_search(service_id);
[08232ee]773 assert(devcon);
774
[4802dd7]775 return bd_get_num_blocks(devcon->bd, nblocks);
[08232ee]776}
777
[e272949]778/** Read bytes directly from the device (bypass cache)
779 *
[15f3c3f]780 * @param service_id Service ID of the block device.
[e272949]781 * @param abs_offset Absolute offset in bytes where to start reading
782 * @param bytes Number of bytes to read
783 * @param data Buffer that receives the data
784 *
785 * @return EOK on success or negative error code on failure.
786 */
[15f3c3f]787int block_read_bytes_direct(service_id_t service_id, aoff64_t abs_offset,
[e272949]788 size_t bytes, void *data)
789{
790 int rc;
791 size_t phys_block_size;
792 size_t buf_size;
793 void *buffer;
794 aoff64_t first_block;
795 aoff64_t last_block;
796 size_t blocks;
797 size_t offset;
798
[15f3c3f]799 rc = block_get_bsize(service_id, &phys_block_size);
[e272949]800 if (rc != EOK) {
801 return rc;
802 }
803
[c4aa9cf]804 /* calculate data position and required space */
[e272949]805 first_block = abs_offset / phys_block_size;
806 offset = abs_offset % phys_block_size;
807 last_block = (abs_offset + bytes - 1) / phys_block_size;
808 blocks = last_block - first_block + 1;
809 buf_size = blocks * phys_block_size;
810
[c4aa9cf]811 /* read the data into memory */
[e272949]812 buffer = malloc(buf_size);
813 if (buffer == NULL) {
814 return ENOMEM;
815 }
816
[15f3c3f]817 rc = block_read_direct(service_id, first_block, blocks, buffer);
[e272949]818 if (rc != EOK) {
819 free(buffer);
820 return rc;
821 }
822
[c4aa9cf]823 /* copy the data from the buffer */
[e272949]824 memcpy(data, buffer + offset, bytes);
825 free(buffer);
[f73b291]826
[e272949]827 return EOK;
828}
829
[4046b2f4]830/** Get TOC from device.
831 *
832 * @param service_id Service ID of the block device.
833 * @param session Starting session.
834 *
[08cba4b]835 * @return Allocated TOC structure.
836 * @return NULL on failure.
[4046b2f4]837 *
838 */
[08cba4b]839toc_block_t *block_get_toc(service_id_t service_id, uint8_t session)
[4046b2f4]840{
841 devcon_t *devcon = devcon_search(service_id);
[08cba4b]842 toc_block_t *toc = NULL;
[4802dd7]843 int rc;
[08cba4b]844
[4802dd7]845 assert(devcon);
[4046b2f4]846
[4802dd7]847 toc = (toc_block_t *) malloc(sizeof(toc_block_t));
848 if (toc == NULL)
849 return NULL;
[08cba4b]850
[4802dd7]851 rc = bd_read_toc(devcon->bd, session, toc, sizeof(toc_block_t));
852 if (rc != EOK) {
853 free(toc);
854 return NULL;
[08cba4b]855 }
856
857 return toc;
[4046b2f4]858}
859
[1ee00b7]860/** Read blocks from block device.
[6408be3]861 *
862 * @param devcon Device connection.
[1ee00b7]863 * @param ba Address of first block.
864 * @param cnt Number of blocks.
[6408be3]865 * @param src Buffer for storing the data.
866 *
867 * @return EOK on success or negative error code on failure.
868 */
[4802dd7]869static int read_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt, void *buf,
870 size_t size)
[6408be3]871{
872 assert(devcon);
[79ae36dd]873
[4802dd7]874 int rc = bd_read_blocks(devcon->bd, ba, cnt, buf, size);
[16fc3c9]875 if (rc != EOK) {
[7e752b2]876 printf("Error %d reading %zu blocks starting at block %" PRIuOFF64
877 " from device handle %" PRIun "\n", rc, cnt, ba,
[15f3c3f]878 devcon->service_id);
[16fc3c9]879#ifndef NDEBUG
880 stacktrace_print();
881#endif
882 }
[79ae36dd]883
[1ee00b7]884 return rc;
[6408be3]885}
886
[1fbe064b]887/** Write block to block device.
888 *
889 * @param devcon Device connection.
[1ee00b7]890 * @param ba Address of first block.
891 * @param cnt Number of blocks.
[1fbe064b]892 * @param src Buffer containing the data to write.
893 *
894 * @return EOK on success or negative error code on failure.
895 */
[4802dd7]896static int write_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt, void *data,
897 size_t size)
[1fbe064b]898{
899 assert(devcon);
[79ae36dd]900
[4802dd7]901 int rc = bd_write_blocks(devcon->bd, ba, cnt, data, size);
[16fc3c9]902 if (rc != EOK) {
[7e752b2]903 printf("Error %d writing %zu blocks starting at block %" PRIuOFF64
[15f3c3f]904 " to device handle %" PRIun "\n", rc, cnt, ba, devcon->service_id);
[16fc3c9]905#ifndef NDEBUG
906 stacktrace_print();
907#endif
908 }
[79ae36dd]909
[1ee00b7]910 return rc;
911}
[1fbe064b]912
[f092718]913/** Convert logical block address to physical block address. */
914static aoff64_t ba_ltop(devcon_t *devcon, aoff64_t lba)
915{
916 assert(devcon->cache != NULL);
917 return lba * devcon->cache->blocks_cluster;
918}
919
[fc840d9]920/** @}
921 */
Note: See TracBrowser for help on using the repository browser.