Changeset bc3d695 in mainline
- Timestamp:
- 2025-01-05T18:22:28Z (4 months ago)
- Children:
- 7bf29e5
- Parents:
- 40be7eb
- Location:
- uspace/srv/bd/hr
- Files:
-
- 2 added
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/srv/bd/hr/hr.c
r40be7eb rbc3d695 1 1 /* 2 * Copyright (c) 202 4Miroslav Cimerman2 * Copyright (c) 2025 Miroslav Cimerman 3 3 * All rights reserved. 4 4 * … … 49 49 #include <str_error.h> 50 50 51 #include "fge.h" 52 #include "io.h" 51 53 #include "superblock.h" 52 54 #include "util.h" … … 83 85 list_foreach(hr_volumes, lvolumes, hr_volume_t, vol) { 84 86 if (vol->svc_id == svc_id) { 87 hr_fpool_destroy(vol->fge); 85 88 hr_fini_devs(vol); 86 89 list_remove(&vol->lvolumes); … … 154 157 return; 155 158 } 159 160 hr_fpool_t *fge = hr_fpool_create(16, 32, sizeof(hr_io_t)); 161 if (fge == NULL) { 162 free(new_volume); 163 free(cfg); 164 async_answer_0(icall, ENOMEM); 165 return; 166 } 167 new_volume->fge = fge; 156 168 157 169 str_cpy(new_volume->devname, HR_DEVNAME_LEN, cfg->devname); … … 234 246 } 235 247 236 fibril_mutex_initialize(&new_volume->lock); 248 fibril_mutex_initialize(&new_volume->lock); /* XXX: will remove this */ 249 250 fibril_mutex_initialize(&new_volume->halt_lock); 251 new_volume->halt_please = false; 252 253 fibril_rwlock_initialize(&new_volume->extents_lock); 254 fibril_rwlock_initialize(&new_volume->states_lock); 237 255 238 256 list_initialize(&new_volume->range_lock_list); … … 260 278 error: 261 279 free(cfg); 280 free(fge); 262 281 hr_fini_devs(new_volume); 263 282 free(new_volume); -
uspace/srv/bd/hr/meson.build
r40be7eb rbc3d695 1 1 # 2 # Copyright (c) 202 4Miroslav Cimerman2 # Copyright (c) 2025 Miroslav Cimerman 3 3 # All rights reserved. 4 4 # … … 31 31 'fge.c', 32 32 'hr.c', 33 'io.c', 33 34 'raid0.c', 34 35 'raid1.c', -
uspace/srv/bd/hr/raid0.c
r40be7eb rbc3d695 1 1 /* 2 * Copyright (c) 202 4Miroslav Cimerman2 * Copyright (c) 2025 Miroslav Cimerman 3 3 * All rights reserved. 4 4 * … … 48 48 #include <str_error.h> 49 49 50 #include "io.h" 50 51 #include "superblock.h" 51 52 #include "util.h" … … 54 55 extern loc_srv_t *hr_srv; 55 56 56 static errno_t hr_raid0_check_vol_status(hr_volume_t *);57 57 static errno_t hr_raid0_update_vol_status(hr_volume_t *); 58 58 static errno_t hr_raid0_bd_op(hr_bd_op_type_t, bd_srv_t *, aoff64_t, size_t, … … 95 95 return rc; 96 96 97 hr_update_vol_status(new_volume, HR_VOL_ONLINE); 98 97 99 bd_srvs_init(&new_volume->hr_bds); 98 100 new_volume->hr_bds.ops = &hr_raid0_bd_ops; … … 127 129 void hr_raid0_status_event(hr_volume_t *vol) 128 130 { 129 fibril_mutex_lock(&vol->lock);130 131 (void)hr_raid0_update_vol_status(vol); 131 fibril_mutex_unlock(&vol->lock);132 132 } 133 133 … … 175 175 *rnb = vol->data_blkno; 176 176 return EOK; 177 }178 179 static errno_t hr_raid0_check_vol_status(hr_volume_t *vol)180 {181 if (vol->status == HR_VOL_ONLINE)182 return EOK;183 return EIO;184 177 } 185 178 … … 190 183 static errno_t hr_raid0_update_vol_status(hr_volume_t *vol) 191 184 { 185 fibril_rwlock_read_lock(&vol->states_lock); 192 186 hr_vol_status_t old_state = vol->status; 193 187 194 188 for (size_t i = 0; i < vol->extent_no; i++) { 195 189 if (vol->extents[i].status != HR_EXT_ONLINE) { 190 fibril_rwlock_read_unlock(&vol->states_lock); 191 fibril_rwlock_write_lock(&vol->states_lock); 196 192 if (old_state != HR_VOL_FAULTY) 197 193 hr_update_vol_status(vol, HR_VOL_FAULTY); 194 fibril_rwlock_write_unlock(&vol->states_lock); 198 195 return EIO; 199 196 } 200 197 } 201 202 if (old_state != HR_VOL_ONLINE) 203 hr_update_vol_status(vol, HR_VOL_ONLINE); 204 205 return EOK; 198 fibril_rwlock_read_unlock(&vol->states_lock); 199 200 return EOK; 201 } 202 203 static void raid0_state_callback(hr_volume_t *vol, size_t extent, errno_t rc) 204 { 205 if (rc == EOK) 206 return; 207 208 fibril_rwlock_write_lock(&vol->states_lock); 209 210 switch (rc) { 211 case ENOENT: 212 hr_update_ext_status(vol, extent, HR_EXT_MISSING); 213 break; 214 default: 215 hr_update_ext_status(vol, extent, HR_EXT_FAILED); 216 } 217 218 hr_update_vol_status(vol, HR_VOL_FAULTY); 219 220 fibril_rwlock_write_unlock(&vol->states_lock); 206 221 } 207 222 … … 216 231 uint8_t *data_read = dst; 217 232 233 fibril_rwlock_read_lock(&vol->states_lock); 234 if (vol->status != HR_VOL_ONLINE) { 235 fibril_rwlock_read_unlock(&vol->states_lock); 236 return EIO; 237 } 238 fibril_rwlock_read_unlock(&vol->states_lock); 239 218 240 /* propagate sync */ 219 241 if (type == HR_BD_SYNC && ba == 0 && cnt == 0) { 220 hr_sync_all_extents(vol); 221 rc = hr_raid0_update_vol_status(vol); 222 return rc; 242 hr_fgroup_t *group = hr_fgroup_create(vol->fge, vol->extent_no); 243 244 for (size_t i = 0; i < vol->extent_no; i++) { 245 hr_io_t *io = hr_fgroup_alloc(group); 246 io->extent = i; 247 io->ba = ba; 248 io->cnt = cnt; 249 io->type = type; 250 io->vol = vol; 251 io->state_callback = raid0_state_callback; 252 253 hr_fgroup_submit(group, hr_io_worker, io); 254 } 255 256 size_t bad; 257 (void)hr_fgroup_wait(group, NULL, &bad); 258 if (bad > 0) 259 return EIO; 260 return EOK; 223 261 } 224 262 … … 232 270 233 271 uint64_t strip_size = vol->strip_size / vol->bsize; /* in blocks */ 234 uint64_t stripe = ba / strip_size; /* stripe number */ 235 uint64_t extent = stripe % vol->extent_no; 236 uint64_t ext_stripe = stripe / vol->extent_no; /* stripe level */ 237 uint64_t strip_off = ba % strip_size; /* strip offset */ 238 239 fibril_mutex_lock(&vol->lock); 240 241 rc = hr_raid0_check_vol_status(vol); 242 if (rc != EOK) { 243 fibril_mutex_unlock(&vol->lock); 244 return EIO; 245 } 272 uint64_t strip_no = ba / strip_size; 273 uint64_t extent = strip_no % vol->extent_no; 274 uint64_t stripe = strip_no / vol->extent_no; 275 uint64_t strip_off = ba % strip_size; 246 276 247 277 left = cnt; 248 278 279 /* calculate how many strips does the IO span */ 280 size_t end_strip_no = (ba + cnt - 1) / strip_size; 281 size_t span = end_strip_no - strip_no + 1; 282 283 hr_fgroup_t *group = hr_fgroup_create(vol->fge, span); 284 249 285 while (left != 0) { 250 phys_block = ext_stripe * strip_size + strip_off;286 phys_block = stripe * strip_size + strip_off; 251 287 cnt = min(left, strip_size - strip_off); 252 288 len = vol->bsize * cnt; 253 289 hr_add_ba_offset(vol, &phys_block); 254 switch (type) { 255 case HR_BD_SYNC: 256 rc = block_sync_cache(vol->extents[extent].svc_id, 257 phys_block, cnt); 258 /* allow unsupported sync */ 259 if (rc == ENOTSUP) 260 rc = EOK; 261 break; 262 case HR_BD_READ: 263 rc = block_read_direct(vol->extents[extent].svc_id, 264 phys_block, cnt, data_read); 290 291 hr_io_t *io = hr_fgroup_alloc(group); 292 io->extent = extent; 293 io->data_write = data_write; 294 io->data_read = data_read; 295 io->ba = ba; 296 io->cnt = cnt; 297 io->type = type; 298 io->vol = vol; 299 io->state_callback = raid0_state_callback; 300 301 hr_fgroup_submit(group, hr_io_worker, io); 302 303 if (type == HR_BD_READ) 265 304 data_read += len; 266 break; 267 case HR_BD_WRITE: 268 rc = block_write_direct(vol->extents[extent].svc_id, 269 phys_block, cnt, data_write); 305 else if (type == HR_BD_WRITE) 270 306 data_write += len; 271 break;272 default:273 rc = EINVAL;274 }275 276 if (rc == ENOENT) {277 hr_update_ext_status(vol, extent, HR_EXT_MISSING);278 rc = EIO;279 goto error;280 } else if (rc != EOK) {281 hr_update_ext_status(vol, extent, HR_EXT_FAILED);282 rc = EIO;283 goto error;284 }285 307 286 308 left -= cnt; … … 288 310 extent++; 289 311 if (extent >= vol->extent_no) { 290 ext_stripe++;312 stripe++; 291 313 extent = 0; 292 314 } 293 315 } 294 316 295 error: 296 (void)hr_raid0_update_vol_status(vol); 297 fibril_mutex_unlock(&vol->lock); 298 return rc; 317 size_t bad; 318 (void)hr_fgroup_wait(group, NULL, &bad); 319 if (bad > 0) 320 return EIO; 321 322 return EOK; 299 323 } 300 324 -
uspace/srv/bd/hr/util.h
r40be7eb rbc3d695 1 1 /* 2 * Copyright (c) 202 4Miroslav Cimerman2 * Copyright (c) 2025 Miroslav Cimerman 3 3 * All rights reserved. 4 4 * … … 38 38 39 39 #include <errno.h> 40 #include <io/log.h> 40 41 41 42 #include "var.h" -
uspace/srv/bd/hr/var.h
r40be7eb rbc3d695 1 1 /* 2 * Copyright (c) 202 4Miroslav Cimerman2 * Copyright (c) 2025 Miroslav Cimerman 3 3 * All rights reserved. 4 4 * … … 43 43 #include <hr.h> 44 44 45 #include "fge.h" 46 45 47 #define NAME "hr" 46 48 … … 60 62 bd_srvs_t hr_bds; 61 63 62 link_t lvolumes; 64 link_t lvolumes; /* protected by static hr_volumes_lock in hr.c */ 65 66 /* 67 * XXX: will be gone after all paralelization, but still used 68 * in yet-unparallelized levels 69 */ 63 70 fibril_mutex_t lock; 64 71 … … 66 73 fibril_mutex_t range_lock_list_lock; 67 74 75 hr_fpool_t *fge; 76 77 /* after assembly, these are invariant */ 68 78 size_t extent_no; 69 hr_extent_t extents[HR_MAX_EXTENTS];70 71 size_t hotspare_no;72 hr_extent_t hotspares[HR_MAX_HOTSPARES];73 74 79 size_t bsize; 75 80 uint64_t nblocks; … … 77 82 uint64_t data_offset; /* in blocks */ 78 83 uint32_t strip_size; 84 hr_level_t level; 85 uint8_t layout; /* RAID Level Qualifier */ 86 service_id_t svc_id; 87 char devname[HR_DEVNAME_LEN]; 88 89 hr_extent_t extents[HR_MAX_EXTENTS]; 90 size_t hotspare_no; 91 hr_extent_t hotspares[HR_MAX_HOTSPARES]; 92 93 /* protects ordering (hr_extent_t.svc_id, hotspares) */ 94 fibril_rwlock_t extents_lock; 95 96 /* protects states (hr_extent_t.status, hr_vol_status_t.status) */ 97 fibril_rwlock_t states_lock; 98 99 /* for halting IO requests when a REBUILD start waits */ 100 bool halt_please; 101 fibril_mutex_t halt_lock; 79 102 80 103 uint64_t rebuild_blk; 81 82 104 uint64_t counter; /* metadata syncing */ 83 84 service_id_t svc_id;85 105 hr_vol_status_t status; 86 hr_level_t level;87 uint8_t layout; /* RAID Level Qualifier */88 char devname[HR_DEVNAME_LEN];89 106 } hr_volume_t; 90 107
Note:
See TracChangeset
for help on using the changeset viewer.