Changeset f6b5593 in mainline
- Timestamp:
- 2009-09-21T11:53:03Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 4098e38
- Parents:
- 2f636b6 (diff), c1618ed (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 99 added
- 13 deleted
- 45 edited
- 10 moved
Legend:
- Unmodified
- Added
- Removed
-
HelenOS.config
r2f636b6 rf6b5593 219 219 220 220 % Compiler 221 @ "gcc_cross" GCC Cross-compiler 222 @ "gcc_native" GCC Native 223 @ "icc_native" ICC Native 224 @ "suncc_native" Sun Studio C Compiler 221 @ "gcc_cross" GNU C Compiler (cross-compiler) 222 @ "gcc_native" GNU C Compiler (native) 223 @ "icc" Intel C Compiler 224 @ "suncc" Sun Studio C Compiler 225 @ "clang" Clang 225 226 ! [PLATFORM=amd64|PLATFORM=ia32] COMPILER (choice) 226 227 227 228 % Compiler 228 @ "gcc_cross" G CC Cross-compiler229 @ "gcc_native" G CC Native230 @ "icc _native" ICC Native229 @ "gcc_cross" GNU C Compiler (cross-compiler) 230 @ "gcc_native" GNU C Compiler (native) 231 @ "icc" Intel C Compiler 231 232 ! [PLATFORM=ia64] COMPILER (choice) 232 233 233 234 % Compiler 234 @ "gcc_cross" G CC Cross-compiler235 @ "gcc_native" G CC Native236 @ "suncc _native" Sun Studio C Compiler235 @ "gcc_cross" GNU C Compiler (cross-compiler) 236 @ "gcc_native" GNU C Compiler (native) 237 @ "suncc" Sun Studio C Compiler 237 238 ! [PLATFORM=sparc64] COMPILER (choice) 238 239 239 240 % Compiler 240 @ "gcc_cross" G CC Cross-compiler241 @ "gcc_native" G CC Native241 @ "gcc_cross" GNU C Compiler (cross-compiler) 242 @ "gcc_native" GNU C Compiler (native) 242 243 ! [PLATFORM=arm32|PLATFORM=mips32|PLATFORM=ppc32] COMPILER (choice) 243 244 -
boot/arch/amd64/Makefile.inc
r2f636b6 rf6b5593 52 52 $(USPACEDIR)/srv/fs/fat/fat \ 53 53 $(USPACEDIR)/srv/bd/ata_bd/ata_bd \ 54 $(USPACEDIR)/srv/bd/file_bd/file_bd 54 $(USPACEDIR)/srv/bd/file_bd/file_bd \ 55 $(USPACEDIR)/srv/part/mbr_part/mbr_part 55 56 56 57 RD_APPS = \ 58 $(USPACEDIR)/app/edit/edit \ 57 59 $(USPACEDIR)/app/getvc/getvc \ 58 60 $(USPACEDIR)/app/tetris/tetris \ -
boot/arch/arm32/loader/Makefile
r2f636b6 rf6b5593 93 93 $(USPACEDIR)/srv/fs/tmpfs/tmpfs \ 94 94 $(USPACEDIR)/srv/fs/fat/fat \ 95 $(USPACEDIR)/srv/bd/file_bd/file_bd 95 $(USPACEDIR)/srv/bd/file_bd/file_bd \ 96 $(USPACEDIR)/srv/part/mbr_part/mbr_part 96 97 ifeq ($(MACHINE),testarm) 97 98 RD_SRVS += \ … … 100 101 101 102 RD_APPS = \ 103 $(USPACEDIR)/app/edit/edit \ 102 104 $(USPACEDIR)/app/getvc/getvc \ 103 105 $(USPACEDIR)/app/tetris/tetris \ -
boot/arch/ia32/Makefile.inc
r2f636b6 rf6b5593 52 52 $(USPACEDIR)/srv/fs/fat/fat \ 53 53 $(USPACEDIR)/srv/bd/ata_bd/ata_bd \ 54 $(USPACEDIR)/srv/bd/file_bd/file_bd 54 $(USPACEDIR)/srv/bd/file_bd/file_bd \ 55 $(USPACEDIR)/srv/part/mbr_part/mbr_part 55 56 56 57 RD_APPS = \ 58 $(USPACEDIR)/app/edit/edit \ 57 59 $(USPACEDIR)/app/getvc/getvc \ 58 60 $(USPACEDIR)/app/tetris/tetris \ -
boot/arch/ia64/loader/Makefile
r2f636b6 rf6b5593 51 51 endif 52 52 53 ifeq ($(COMPILER),icc _native)53 ifeq ($(COMPILER),icc) 54 54 CC = icc 55 55 AS = as … … 104 104 $(USPACEDIR)/srv/fs/tmpfs/tmpfs \ 105 105 $(USPACEDIR)/srv/fs/fat/fat \ 106 $(USPACEDIR)/srv/bd/file_bd/file_bd 106 $(USPACEDIR)/srv/bd/file_bd/file_bd \ 107 $(USPACEDIR)/srv/part/mbr_part/mbr_part 107 108 108 109 RD_APPS = \ 110 $(USPACEDIR)/app/edit/edit \ 109 111 $(USPACEDIR)/app/getvc/getvc \ 110 112 $(USPACEDIR)/app/tetris/tetris \ -
boot/arch/mips32/loader/Makefile
r2f636b6 rf6b5593 106 106 $(USPACEDIR)/srv/fs/fat/fat \ 107 107 $(USPACEDIR)/srv/bd/file_bd/file_bd \ 108 $(USPACEDIR)/srv/bd/gxe_bd/gxe_bd 108 $(USPACEDIR)/srv/bd/gxe_bd/gxe_bd \ 109 $(USPACEDIR)/srv/part/mbr_part/mbr_part 109 110 110 111 RD_APPS = \ 112 $(USPACEDIR)/app/edit/edit \ 111 113 $(USPACEDIR)/app/getvc/getvc \ 112 114 $(USPACEDIR)/app/tetris/tetris \ -
boot/arch/ppc32/loader/Makefile
r2f636b6 rf6b5593 94 94 $(USPACEDIR)/srv/fs/tmpfs/tmpfs \ 95 95 $(USPACEDIR)/srv/fs/fat/fat \ 96 $(USPACEDIR)/srv/bd/file_bd/file_bd 96 $(USPACEDIR)/srv/bd/file_bd/file_bd \ 97 $(USPACEDIR)/srv/part/mbr_part/mbr_part 97 98 98 99 RD_APPS = \ 100 $(USPACEDIR)/app/edit/edit \ 99 101 $(USPACEDIR)/app/getvc/getvc \ 100 102 $(USPACEDIR)/app/tetris/tetris \ -
boot/arch/sparc64/loader/Makefile
r2f636b6 rf6b5593 104 104 $(USPACEDIR)/srv/fs/devfs/devfs \ 105 105 $(USPACEDIR)/srv/fs/tmpfs/tmpfs \ 106 $(USPACEDIR)/srv/bd/file_bd/file_bd 106 $(USPACEDIR)/srv/bd/file_bd/file_bd \ 107 $(USPACEDIR)/srv/part/mbr_part/mbr_part 107 108 108 109 ifeq ($(MACHINE),generic) … … 114 115 115 116 RD_APPS = \ 117 $(USPACEDIR)/app/edit/edit \ 116 118 $(USPACEDIR)/app/getvc/getvc \ 117 119 $(USPACEDIR)/app/tetris/tetris \ -
kernel/Makefile
r2f636b6 rf6b5593 54 54 ICC_CFLAGS = -I$(INCLUDES) -O$(OPTIMIZATION) -imacros ../config.h \ 55 55 -fno-builtin -Wall -Wmissing-prototypes -Werror \ 56 -nostdlib -nostdinc \ 57 -wd170 56 -nostdlib -nostdinc -wd170 58 57 59 58 SUNCC_CFLAGS = -I$(INCLUDES) -xO$(OPTIMIZATION) \ 60 59 -xnolib -xc99=all -features=extensions \ 61 60 -erroff=E_ZERO_SIZED_STRUCT_UNION 61 62 CLANG_CFLAGS = -I$(INCLUDES) -O$(OPTIMIZATION) -imacros ../config.h \ 63 -fexec-charset=UTF-8 -fwide-exec-charset=UTF-32$(ENDIANESS) \ 64 -finput-charset=UTF-8 -fno-builtin -Wall -Wextra -Wno-unused-parameter \ 65 -Wmissing-prototypes -nostdlib -nostdinc -pipe 62 66 63 67 LFLAGS = -M … … 109 113 endif 110 114 111 ifeq ($(COMPILER),icc_native) 115 ifeq ($(COMPILER),gcc_cross) 116 CC = $(TOOLCHAIN_DIR)/bin/$(TARGET)-gcc 117 GCC = $(CC) 118 AS = $(TOOLCHAIN_DIR)/bin/$(TARGET)-as 119 LD = $(TOOLCHAIN_DIR)/bin/$(TARGET)-ld 120 OBJCOPY = $(TOOLCHAIN_DIR)/bin/$(TARGET)-objcopy 121 OBJDUMP = $(TOOLCHAIN_DIR)/bin/$(TARGET)-objdump 122 LIBDIR = $(TOOLCHAIN_DIR)/lib 123 CFLAGS = $(GCC_CFLAGS) 124 DEPEND_DEFS = $(DEFS) $(CONFIG_DEFS) 125 endif 126 127 ifeq ($(COMPILER),icc) 112 128 CC = icc 113 129 GCC = gcc … … 121 137 endif 122 138 123 ifeq ($(COMPILER),suncc _native)139 ifeq ($(COMPILER),suncc) 124 140 CC = suncc 125 141 GCC = gcc … … 134 150 endif 135 151 136 ifeq ($(COMPILER), gcc_cross)137 CC = $(TOOLCHAIN_DIR)/bin/$(TARGET)-gcc138 GCC = $(CC)139 AS = $( TOOLCHAIN_DIR)/bin/$(TARGET)-as140 LD = $( TOOLCHAIN_DIR)/bin/$(TARGET)-ld141 OBJCOPY = $( TOOLCHAIN_DIR)/bin/$(TARGET)-objcopy142 OBJDUMP = $( TOOLCHAIN_DIR)/bin/$(TARGET)-objdump143 LIBDIR = $(TOOLCHAIN_DIR)/lib144 CFLAGS = $( GCC_CFLAGS)152 ifeq ($(COMPILER),clang) 153 CC = clang 154 GCC = gcc 155 AS = $(BINUTILS_PREFIX)as 156 LD = $(BINUTILS_PREFIX)ld 157 OBJCOPY = $(BINUTILS_PREFIX)objcopy 158 OBJDUMP = $(BINUTILS_PREFIX)objdump 159 LIBDIR = /usr/lib 160 CFLAGS = $(CLANG_CFLAGS) 145 161 DEPEND_DEFS = $(DEFS) $(CONFIG_DEFS) 146 162 endif -
kernel/arch/arm32/include/elf.h
r2f636b6 rf6b5593 27 27 */ 28 28 29 /** @addtogroup arm32 29 /** @addtogroup arm32 30 30 * @{ 31 31 */ … … 37 37 #define KERN_arm32_ELF_H_ 38 38 39 #define ELF_MACHINEEM_ARM39 #define ELF_MACHINE EM_ARM 40 40 41 #ifdef BIG_ENDIAN42 #define ELF_DATA_ENCODINGELFDATA2MSB41 #ifdef __BE__ 42 #define ELF_DATA_ENCODING ELFDATA2MSB 43 43 #else 44 #define ELF_DATA_ENCODINGELFDATA2LSB44 #define ELF_DATA_ENCODING ELFDATA2LSB 45 45 #endif 46 46 47 #define ELF_CLASS 47 #define ELF_CLASS ELFCLASS32 48 48 49 49 #endif -
kernel/arch/ia32/Makefile.inc
r2f636b6 rf6b5593 43 43 ICC_CFLAGS += $(CMN1) 44 44 SUNCC_CFLAGS += $(CMN1) 45 CLANG_CFLAGS += $(CMN1) 45 46 46 47 ## Accepted CPUs … … 71 72 GCC_CFLAGS += $(CMN2) 72 73 ICC_CFLAGS += $(CMN2) 74 CLANG_CFLAGS += $(CMN2) 73 75 74 76 ARCH_SOURCES = \ -
kernel/arch/ia64/_link.ld.in
r2f636b6 rf6b5593 7 7 */ 8 8 9 #define LOAD_ADDRESS_V 0xe000000004404000 10 #define LOAD_ADDRESS_P 0x0000000004404000 11 9 12 ENTRY(kernel_image_start) 10 13 11 14 SECTIONS { 12 .image 0xe000000004404000: AT (0x0000000004404000) {15 .image LOAD_ADDRESS_V: AT (LOAD_ADDRESS_P) { 13 16 ktext_start = .; 14 17 *(K_TEXT_START); … … 21 24 *(.opd) 22 25 *(.data .data.*) 26 hardcoded_load_address = .; 27 QUAD(LOAD_ADDRESS_V); 28 hardcoded_ktext_size = .; 29 QUAD(ktext_end - ktext_start); 30 hardcoded_kdata_size = .; 31 QUAD(kdata_end - kdata_start); 23 32 *(.got .got.*) 24 33 *(.sdata) … … 38 47 } 39 48 40 _hardcoded_ktext_size = ktext_end - ktext_start;41 _hardcoded_kdata_size = kdata_end - kdata_start;42 _hardcoded_load_address = 0xe000000004404000;43 44 49 } -
kernel/arch/ia64/include/asm.h
r2f636b6 rf6b5593 27 27 */ 28 28 29 /** @addtogroup ia64 29 /** @addtogroup ia64 30 30 * @{ 31 31 */ … … 41 41 #include <arch/register.h> 42 42 43 #define IA64_IOSPACE_ADDRESS 0xE001000000000000ULL43 #define IA64_IOSPACE_ADDRESS 0xE001000000000000ULL 44 44 45 45 static inline void pio_write_8(ioport8_t *port, uint8_t v) 46 46 { 47 47 uintptr_t prt = (uintptr_t) port; 48 49 *((ioport8_t *) (IA64_IOSPACE_ADDRESS +48 49 *((ioport8_t *) (IA64_IOSPACE_ADDRESS + 50 50 ((prt & 0xfff) | ((prt >> 2) << 12)))) = v; 51 52 asm volatile ("mf\n" ::: "memory"); 51 52 asm volatile ( 53 "mf\n" 54 ::: "memory" 55 ); 53 56 } 54 57 … … 56 59 { 57 60 uintptr_t prt = (uintptr_t) port; 58 59 *((ioport16_t *) (IA64_IOSPACE_ADDRESS +61 62 *((ioport16_t *) (IA64_IOSPACE_ADDRESS + 60 63 ((prt & 0xfff) | ((prt >> 2) << 12)))) = v; 61 62 asm volatile ("mf\n" ::: "memory"); 64 65 asm volatile ( 66 "mf\n" 67 ::: "memory" 68 ); 63 69 } 64 70 … … 66 72 { 67 73 uintptr_t prt = (uintptr_t) port; 68 69 *((ioport32_t *) (IA64_IOSPACE_ADDRESS +74 75 *((ioport32_t *) (IA64_IOSPACE_ADDRESS + 70 76 ((prt & 0xfff) | ((prt >> 2) << 12)))) = v; 71 72 asm volatile ("mf\n" ::: "memory"); 77 78 asm volatile ( 79 "mf\n" 80 ::: "memory" 81 ); 73 82 } 74 83 … … 76 85 { 77 86 uintptr_t prt = (uintptr_t) port; 78 79 asm volatile ("mf\n" ::: "memory"); 80 81 return *((ioport8_t *)(IA64_IOSPACE_ADDRESS + 87 88 asm volatile ( 89 "mf\n" 90 ::: "memory" 91 ); 92 93 return *((ioport8_t *) (IA64_IOSPACE_ADDRESS + 82 94 ((prt & 0xfff) | ((prt >> 2) << 12)))); 83 95 } … … 86 98 { 87 99 uintptr_t prt = (uintptr_t) port; 88 89 asm volatile ("mf\n" ::: "memory"); 90 91 return *((ioport16_t *)(IA64_IOSPACE_ADDRESS + 100 101 asm volatile ( 102 "mf\n" 103 ::: "memory" 104 ); 105 106 return *((ioport16_t *) (IA64_IOSPACE_ADDRESS + 92 107 ((prt & 0xfff) | ((prt >> 2) << 12)))); 93 108 } … … 96 111 { 97 112 uintptr_t prt = (uintptr_t) port; 98 99 asm volatile ("mf\n" ::: "memory"); 100 101 return *((ioport32_t *)(IA64_IOSPACE_ADDRESS + 113 114 asm volatile ( 115 "mf\n" 116 ::: "memory" 117 ); 118 119 return *((ioport32_t *) (IA64_IOSPACE_ADDRESS + 102 120 ((prt & 0xfff) | ((prt >> 2) << 12)))); 103 121 } … … 112 130 { 113 131 uint64_t v; 114 115 //I'm not sure why but this code bad inlines in scheduler, 116 //so THE shifts about 16B and causes kernel panic 117 //asm volatile ("and %0 = %1, r12" : "=r" (v) : "r" (~(STACK_SIZE-1))); 118 //return v; 119 120 //this code have the same meaning but inlines well 121 asm volatile ("mov %0 = r12" : "=r" (v) ); 122 return v & (~(STACK_SIZE-1)); 132 133 /* I'm not sure why but this code bad inlines in scheduler, 134 so THE shifts about 16B and causes kernel panic 135 136 asm volatile ( 137 "and %[value] = %[mask], r12" 138 : [value] "=r" (v) 139 : [mask] "r" (~(STACK_SIZE - 1)) 140 ); 141 return v; 142 143 This code have the same meaning but inlines well. 144 */ 145 146 asm volatile ( 147 "mov %[value] = r12" 148 : [value] "=r" (v) 149 ); 150 151 return (v & (~(STACK_SIZE - 1))); 123 152 } 124 153 … … 131 160 uint64_t v; 132 161 133 asm volatile ("mov %0 = psr\n" : "=r" (v)); 162 asm volatile ( 163 "mov %[value] = psr\n" 164 : [value] "=r" (v) 165 ); 134 166 135 167 return v; … … 144 176 uint64_t v; 145 177 146 asm volatile ("mov %0 = cr.iva\n" : "=r" (v)); 178 asm volatile ( 179 "mov %[value] = cr.iva\n" 180 : [value] "=r" (v) 181 ); 147 182 148 183 return v; … … 155 190 static inline void iva_write(uint64_t v) 156 191 { 157 asm volatile ("mov cr.iva = %0\n" : : "r" (v)); 192 asm volatile ( 193 "mov cr.iva = %[value]\n" 194 :: [value] "r" (v) 195 ); 158 196 } 159 197 … … 167 205 uint64_t v; 168 206 169 asm volatile ("mov %0 = cr.ivr\n" : "=r" (v)); 207 asm volatile ( 208 "mov %[value] = cr.ivr\n" 209 : [value] "=r" (v) 210 ); 170 211 171 212 return v; … … 176 217 uint64_t v; 177 218 178 asm volatile ("mov %0 = cr64\n" : "=r" (v)); 219 asm volatile ( 220 "mov %[value] = cr64\n" 221 : [value] "=r" (v) 222 ); 179 223 180 224 return v; … … 188 232 static inline void itc_write(uint64_t v) 189 233 { 190 asm volatile ("mov ar.itc = %0\n" : : "r" (v)); 234 asm volatile ( 235 "mov ar.itc = %[value]\n" 236 :: [value] "r" (v) 237 ); 191 238 } 192 239 … … 199 246 uint64_t v; 200 247 201 asm volatile ("mov %0 = ar.itc\n" : "=r" (v)); 248 asm volatile ( 249 "mov %[value] = ar.itc\n" 250 : [value] "=r" (v) 251 ); 202 252 203 253 return v; … … 210 260 static inline void itm_write(uint64_t v) 211 261 { 212 asm volatile ("mov cr.itm = %0\n" : : "r" (v)); 262 asm volatile ( 263 "mov cr.itm = %[value]\n" 264 :: [value] "r" (v) 265 ); 213 266 } 214 267 … … 221 274 uint64_t v; 222 275 223 asm volatile ("mov %0 = cr.itm\n" : "=r" (v)); 276 asm volatile ( 277 "mov %[value] = cr.itm\n" 278 : [value] "=r" (v) 279 ); 224 280 225 281 return v; … … 234 290 uint64_t v; 235 291 236 asm volatile ("mov %0 = cr.itv\n" : "=r" (v)); 292 asm volatile ( 293 "mov %[value] = cr.itv\n" 294 : [value] "=r" (v) 295 ); 237 296 238 297 return v; … … 245 304 static inline void itv_write(uint64_t v) 246 305 { 247 asm volatile ("mov cr.itv = %0\n" : : "r" (v)); 306 asm volatile ( 307 "mov cr.itv = %[value]\n" 308 :: [value] "r" (v) 309 ); 248 310 } 249 311 … … 254 316 static inline void eoi_write(uint64_t v) 255 317 { 256 asm volatile ("mov cr.eoi = %0\n" : : "r" (v)); 318 asm volatile ( 319 "mov cr.eoi = %[value]\n" 320 :: [value] "r" (v) 321 ); 257 322 } 258 323 … … 264 329 { 265 330 uint64_t v; 266 267 asm volatile ("mov %0 = cr.tpr\n" : "=r" (v)); 331 332 asm volatile ( 333 "mov %[value] = cr.tpr\n" 334 : [value] "=r" (v) 335 ); 268 336 269 337 return v; … … 276 344 static inline void tpr_write(uint64_t v) 277 345 { 278 asm volatile ("mov cr.tpr = %0\n" : : "r" (v)); 346 asm volatile ( 347 "mov cr.tpr = %[value]\n" 348 :: [value] "r" (v) 349 ); 279 350 } 280 351 … … 291 362 292 363 asm volatile ( 293 "mov % 0= psr\n"294 "rsm % 1\n"295 : "=r" (v)296 : "i" (PSR_I_MASK)364 "mov %[value] = psr\n" 365 "rsm %[mask]\n" 366 : [value] "=r" (v) 367 : [mask] "i" (PSR_I_MASK) 297 368 ); 298 369 … … 312 383 313 384 asm volatile ( 314 "mov % 0= psr\n"315 "ssm % 1\n"385 "mov %[value] = psr\n" 386 "ssm %[mask]\n" 316 387 ";;\n" 317 388 "srlz.d\n" 318 : "=r" (v)319 : "i" (PSR_I_MASK)389 : [value] "=r" (v) 390 : [mask] "i" (PSR_I_MASK) 320 391 ); 321 392 … … 349 420 static inline void pk_disable(void) 350 421 { 351 asm volatile ("rsm %0\n" : : "i" (PSR_PK_MASK)); 422 asm volatile ( 423 "rsm %[mask]\n" 424 ";;\n" 425 "srlz.d\n" 426 :: [mask] "i" (PSR_PK_MASK) 427 ); 352 428 } 353 429 -
kernel/arch/ia64/include/atomic.h
r2f636b6 rf6b5593 27 27 */ 28 28 29 /** @addtogroup ia64 29 /** @addtogroup ia64 30 30 * @{ 31 31 */ … … 36 36 #define KERN_ia64_ATOMIC_H_ 37 37 38 /** Atomic addition.39 *40 * @param val Atomic value.41 * @param imm Value to add.42 *43 * @return Value before addition.44 */45 static inline long atomic_add(atomic_t *val, int imm)46 {47 long v;48 49 asm volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v),50 "+m" (val->count) : "i" (imm));51 52 return v;53 }54 55 38 static inline uint64_t test_and_set(atomic_t *val) 56 39 { … … 58 41 59 42 asm volatile ( 60 "movl %0 = 0x1;;\n" 61 "xchg8 %0 = %1, %0;;\n" 62 : "=r" (v), "+m" (val->count) 43 "movl %[v] = 0x1;;\n" 44 "xchg8 %[v] = %[count], %[v];;\n" 45 : [v] "=r" (v), 46 [count] "+m" (val->count) 63 47 ); 64 48 … … 76 60 static inline void atomic_inc(atomic_t *val) 77 61 { 78 atomic_add(val, 1); 62 long v; 63 64 asm volatile ( 65 "fetchadd8.rel %[v] = %[count], 1\n" 66 : [v] "=r" (v), 67 [count] "+m" (val->count) 68 ); 79 69 } 80 70 81 71 static inline void atomic_dec(atomic_t *val) 82 72 { 83 atomic_add(val, -1); 73 long v; 74 75 asm volatile ( 76 "fetchadd8.rel %[v] = %[count], -1\n" 77 : [v] "=r" (v), 78 [count] "+m" (val->count) 79 ); 84 80 } 85 81 86 82 static inline long atomic_preinc(atomic_t *val) 87 83 { 88 return atomic_add(val, 1) + 1; 84 long v; 85 86 asm volatile ( 87 "fetchadd8.rel %[v] = %[count], 1\n" 88 : [v] "=r" (v), 89 [count] "+m" (val->count) 90 ); 91 92 return (v + 1); 89 93 } 90 94 91 95 static inline long atomic_predec(atomic_t *val) 92 96 { 93 return atomic_add(val, -1) - 1; 97 long v; 98 99 asm volatile ( 100 "fetchadd8.rel %[v] = %[count], -1\n" 101 : [v] "=r" (v), 102 [count] "+m" (val->count) 103 ); 104 105 return (v - 1); 94 106 } 95 107 96 108 static inline long atomic_postinc(atomic_t *val) 97 109 { 98 return atomic_add(val, 1); 110 long v; 111 112 asm volatile ( 113 "fetchadd8.rel %[v] = %[count], 1\n" 114 : [v] "=r" (v), 115 [count] "+m" (val->count) 116 ); 117 118 return v; 99 119 } 100 120 101 121 static inline long atomic_postdec(atomic_t *val) 102 122 { 103 return atomic_add(val, -1); 123 long v; 124 125 asm volatile ( 126 "fetchadd8.rel %[v] = %[count], -1\n" 127 : [v] "=r" (v), 128 [count] "+m" (val->count) 129 ); 130 131 return v; 104 132 } 105 133 -
kernel/arch/ia64/include/interrupt.h
r2f636b6 rf6b5593 40 40 41 41 /** ia64 has 256 INRs. */ 42 #define INR_COUNT 42 #define INR_COUNT 256 43 43 44 44 /* … … 47 47 * to genarch. 48 48 */ 49 #define IVT_ITEMS 50 #define IVT_FIRST 49 #define IVT_ITEMS 0 50 #define IVT_FIRST 0 51 51 52 52 /** External Interrupt vectors. */ 53 53 54 #define VECTOR_TLB_SHOOTDOWN_IPI 0xf0 55 #define INTERRUPT_TIMER 255 56 #define IRQ_KBD (0x01 + LEGACY_INTERRUPT_BASE) 57 #define IRQ_MOUSE (0x0c + LEGACY_INTERRUPT_BASE) 58 #define INTERRUPT_SPURIOUS 15 59 #define LEGACY_INTERRUPT_BASE 0x20 54 #define VECTOR_TLB_SHOOTDOWN_IPI 0xf0 55 56 #define INTERRUPT_SPURIOUS 15 57 #define INTERRUPT_TIMER 255 58 59 #define LEGACY_INTERRUPT_BASE 0x20 60 61 #define IRQ_KBD (0x01 + LEGACY_INTERRUPT_BASE) 62 #define IRQ_MOUSE (0x0c + LEGACY_INTERRUPT_BASE) 60 63 61 64 /** General Exception codes. */ 62 #define GE_ILLEGALOP 63 #define GE_PRIVOP 64 #define GE_PRIVREG 65 #define GE_RESREGFLD 66 #define GE_DISBLDISTRAN 67 #define GE_ILLEGALDEP 65 #define GE_ILLEGALOP 0 66 #define GE_PRIVOP 1 67 #define GE_PRIVREG 2 68 #define GE_RESREGFLD 3 69 #define GE_DISBLDISTRAN 4 70 #define GE_ILLEGALDEP 8 68 71 69 #define EOI 0/**< The actual value doesn't matter. */72 #define EOI 0 /**< The actual value doesn't matter. */ 70 73 71 74 typedef struct { … … 100 103 uint128_t f30; 101 104 uint128_t f31; 102 105 103 106 uintptr_t ar_bsp; 104 107 uintptr_t ar_bspstore; … … 132 135 { 133 136 istate->cr_iip = retaddr; 134 istate->cr_ipsr.ri = 0; 137 istate->cr_ipsr.ri = 0; /* return to instruction slot #0 */ 135 138 } 136 139 -
kernel/arch/ia64/include/mm/as.h
r2f636b6 rf6b5593 27 27 */ 28 28 29 /** @addtogroup ia64mm 29 /** @addtogroup ia64mm 30 30 * @{ 31 31 */ … … 36 36 #define KERN_ia64_AS_H_ 37 37 38 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 38 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 0 39 39 40 #define KERNEL_ADDRESS_SPACE_START_ARCH (unsigned long) 0xe000000000000000ULL41 #define KERNEL_ADDRESS_SPACE_END_ARCH (unsigned long) 0xffffffffffffffffULL42 #define USER_ADDRESS_SPACE_START_ARCH (unsigned long) 0x0000000000000000ULL43 #define USER_ADDRESS_SPACE_END_ARCH (unsigned long) 0xdfffffffffffffffULL40 #define KERNEL_ADDRESS_SPACE_START_ARCH ((unsigned long) 0xe000000000000000ULL) 41 #define KERNEL_ADDRESS_SPACE_END_ARCH ((unsigned long) 0xffffffffffffffffULL) 42 #define USER_ADDRESS_SPACE_START_ARCH ((unsigned long) 0x0000000000000000ULL) 43 #define USER_ADDRESS_SPACE_END_ARCH ((unsigned long) 0xdfffffffffffffffULL) 44 44 45 #define USTACK_ADDRESS_ARCH 45 #define USTACK_ADDRESS_ARCH 0x0000000ff0000000ULL 46 46 47 47 typedef struct { … … 50 50 #include <genarch/mm/as_ht.h> 51 51 52 #define as_constructor_arch(as, flags) 53 #define as_destructor_arch(as) 54 #define as_create_arch(as, flags) 52 #define as_constructor_arch(as, flags) (as != as) 53 #define as_destructor_arch(as) (as != as) 54 #define as_create_arch(as, flags) (as != as) 55 55 #define as_deinstall_arch(as) 56 56 #define as_invalidate_translation_cache(as, page, cnt) -
kernel/arch/ia64/include/mm/page.h
r2f636b6 rf6b5593 28 28 */ 29 29 30 /** @addtogroup ia64mm 30 /** @addtogroup ia64mm 31 31 * @{ 32 32 */ … … 39 39 #include <arch/mm/frame.h> 40 40 41 #define PAGE_SIZE 42 #define PAGE_WIDTH 41 #define PAGE_SIZE FRAME_SIZE 42 #define PAGE_WIDTH FRAME_WIDTH 43 43 44 44 #ifdef KERNEL 45 45 46 46 /** Bit width of the TLB-locked portion of kernel address space. */ 47 #define KERNEL_PAGE_WIDTH 28/* 256M */48 #define IO_PAGE_WIDTH 26/* 64M */49 #define FW_PAGE_WIDTH 28/* 256M */50 51 #define USPACE_IO_PAGE_WIDTH 12/* 4K */47 #define KERNEL_PAGE_WIDTH 28 /* 256M */ 48 #define IO_PAGE_WIDTH 26 /* 64M */ 49 #define FW_PAGE_WIDTH 28 /* 256M */ 50 51 #define USPACE_IO_PAGE_WIDTH 12 /* 4K */ 52 52 53 53 … … 59 59 60 60 /* Firmware area (bellow 4GB in phys mem) */ 61 #define FW_OFFSET 61 #define FW_OFFSET 0x00000000F0000000 62 62 /* Legacy IO space */ 63 #define IO_OFFSET 63 #define IO_OFFSET 0x0001000000000000 64 64 /* Videoram - now mapped to 0 as VGA text mode vram on 0xb8000 */ 65 #define VIO_OFFSET 66 67 68 #define PPN_SHIFT 69 70 #define VRN_SHIFT 71 #define VRN_MASK (7LL << VRN_SHIFT)72 #define VA2VRN(va) ((va)>>VRN_SHIFT)65 #define VIO_OFFSET 0x0002000000000000 66 67 68 #define PPN_SHIFT 12 69 70 #define VRN_SHIFT 61 71 #define VRN_MASK (7ULL << VRN_SHIFT) 72 #define VA2VRN(va) ((va) >> VRN_SHIFT) 73 73 74 74 #ifdef __ASM__ 75 #define VRN_KERNEL775 #define VRN_KERNEL 7 76 76 #else 77 #define VRN_KERNEL 7LL77 #define VRN_KERNEL 7ULL 78 78 #endif 79 79 80 #define REGION_REGISTERS 81 82 #define KA2PA(x) ((uintptr_t) (x- (VRN_KERNEL << VRN_SHIFT)))83 #define PA2KA(x) ((uintptr_t) (x+ (VRN_KERNEL << VRN_SHIFT)))84 85 #define VHPT_WIDTH 20/* 1M */86 #define VHPT_SIZE 87 88 #define PTA_BASE_SHIFT 80 #define REGION_REGISTERS 8 81 82 #define KA2PA(x) ((uintptr_t) ((x) - (VRN_KERNEL << VRN_SHIFT))) 83 #define PA2KA(x) ((uintptr_t) ((x) + (VRN_KERNEL << VRN_SHIFT))) 84 85 #define VHPT_WIDTH 20 /* 1M */ 86 #define VHPT_SIZE (1 << VHPT_WIDTH) 87 88 #define PTA_BASE_SHIFT 15 89 89 90 90 /** Memory Attributes. */ 91 #define MA_WRITEBACK 0x092 #define MA_UNCACHEABLE 0x491 #define MA_WRITEBACK 0x00 92 #define MA_UNCACHEABLE 0x04 93 93 94 94 /** Privilege Levels. Only the most and the least privileged ones are ever used. */ 95 #define PL_KERNEL 0x096 #define PL_USER 0x395 #define PL_KERNEL 0x00 96 #define PL_USER 0x03 97 97 98 98 /* Access Rigths. Only certain combinations are used by the kernel. */ 99 #define AR_READ 0x0100 #define AR_EXECUTE 0x1101 #define AR_WRITE 0x299 #define AR_READ 0x00 100 #define AR_EXECUTE 0x01 101 #define AR_WRITE 0x02 102 102 103 103 #ifndef __ASM__ … … 113 113 struct vhpt_tag_info { 114 114 unsigned long long tag : 63; 115 unsigned ti : 1;115 unsigned int ti : 1; 116 116 } __attribute__ ((packed)); 117 117 … … 123 123 struct vhpt_entry_present { 124 124 /* Word 0 */ 125 unsigned p : 1;126 unsigned : 1;127 unsigned ma : 3;128 unsigned a : 1;129 unsigned d : 1;130 unsigned pl : 2;131 unsigned ar : 3;125 unsigned int p : 1; 126 unsigned int : 1; 127 unsigned int ma : 3; 128 unsigned int a : 1; 129 unsigned int d : 1; 130 unsigned int pl : 2; 131 unsigned int ar : 3; 132 132 unsigned long long ppn : 38; 133 unsigned : 2;134 unsigned ed : 1;135 unsigned i g1 : 11;133 unsigned int : 2; 134 unsigned int ed : 1; 135 unsigned int ig1 : 11; 136 136 137 137 /* Word 1 */ 138 unsigned : 2;139 unsigned ps : 6;140 unsigned key : 24;141 unsigned : 32;138 unsigned int : 2; 139 unsigned int ps : 6; 140 unsigned int key : 24; 141 unsigned int : 32; 142 142 143 143 /* Word 2 */ 144 144 union vhpt_tag tag; 145 145 146 /* Word 3 */ 146 /* Word 3 */ 147 147 uint64_t ig3 : 64; 148 148 } __attribute__ ((packed)); … … 150 150 struct vhpt_entry_not_present { 151 151 /* Word 0 */ 152 unsigned p : 1;152 unsigned int p : 1; 153 153 unsigned long long ig0 : 52; 154 unsigned i g1 : 11;154 unsigned int ig1 : 11; 155 155 156 156 /* Word 1 */ 157 unsigned : 2;158 unsigned ps : 6;157 unsigned int : 2; 158 unsigned int ps : 6; 159 159 unsigned long long ig2 : 56; 160 160 161 161 /* Word 2 */ 162 162 union vhpt_tag tag; 163 163 164 /* Word 3 */ 164 /* Word 3 */ 165 165 uint64_t ig3 : 64; 166 166 } __attribute__ ((packed)); 167 167 168 typedef union vhpt_entry{168 typedef union { 169 169 struct vhpt_entry_present present; 170 170 struct vhpt_entry_not_present not_present; … … 173 173 174 174 struct region_register_map { 175 unsigned ve : 1;176 unsigned : 1;177 unsigned ps : 6;178 unsigned rid : 24;179 unsigned : 32;180 } __attribute__ ((packed)); 181 182 typedef union region_register{175 unsigned int ve : 1; 176 unsigned int : 1; 177 unsigned int ps : 6; 178 unsigned int rid : 24; 179 unsigned int : 32; 180 } __attribute__ ((packed)); 181 182 typedef union { 183 183 struct region_register_map map; 184 184 unsigned long long word; 185 } region_register ;185 } region_register_t; 186 186 187 187 struct pta_register_map { 188 unsigned ve : 1;189 unsigned : 1;190 unsigned size : 6;191 unsigned vf : 1;192 unsigned : 6;188 unsigned int ve : 1; 189 unsigned int : 1; 190 unsigned int size : 6; 191 unsigned int vf : 1; 192 unsigned int : 6; 193 193 unsigned long long base : 49; 194 194 } __attribute__ ((packed)); … … 197 197 struct pta_register_map map; 198 198 uint64_t word; 199 } pta_register ;199 } pta_register_t; 200 200 201 201 /** Return Translation Hashed Entry Address. … … 211 211 { 212 212 uint64_t ret; 213 214 asm volatile ("thash %0 = %1\n" : "=r" (ret) : "r" (va)); 215 213 214 asm volatile ( 215 "thash %[ret] = %[va]\n" 216 : [ret] "=r" (ret) 217 : [va] "r" (va) 218 ); 219 216 220 return ret; 217 221 } … … 229 233 { 230 234 uint64_t ret; 231 232 asm volatile ("ttag %0 = %1\n" : "=r" (ret) : "r" (va)); 233 235 236 asm volatile ( 237 "ttag %[ret] = %[va]\n" 238 : [ret] "=r" (ret) 239 : [va] "r" (va) 240 ); 241 234 242 return ret; 235 243 } … … 244 252 { 245 253 uint64_t ret; 254 246 255 ASSERT(i < REGION_REGISTERS); 247 asm volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i << VRN_SHIFT)); 256 257 asm volatile ( 258 "mov %[ret] = rr[%[index]]\n" 259 : [ret] "=r" (ret) 260 : [index] "r" (i << VRN_SHIFT) 261 ); 262 248 263 return ret; 249 264 } … … 257 272 { 258 273 ASSERT(i < REGION_REGISTERS); 259 asm volatile ( 260 "mov rr[%0] = %1\n" 261 : 262 : "r" (i << VRN_SHIFT), "r" (v) 263 ); 264 } 265 274 275 asm volatile ( 276 "mov rr[%[index]] = %[value]\n" 277 :: [index] "r" (i << VRN_SHIFT), 278 [value] "r" (v) 279 ); 280 } 281 266 282 /** Read Page Table Register. 267 283 * … … 272 288 uint64_t ret; 273 289 274 asm volatile ("mov %0 = cr.pta\n" : "=r" (ret)); 290 asm volatile ( 291 "mov %[ret] = cr.pta\n" 292 : [ret] "=r" (ret) 293 ); 275 294 276 295 return ret; … … 283 302 static inline void pta_write(uint64_t v) 284 303 { 285 asm volatile ("mov cr.pta = %0\n" : : "r" (v)); 304 asm volatile ( 305 "mov cr.pta = %[value]\n" 306 :: [value] "r" (v) 307 ); 286 308 } 287 309 -
kernel/arch/ia64/include/mm/tlb.h
r2f636b6 rf6b5593 27 27 */ 28 28 29 /** @addtogroup ia64mm 29 /** @addtogroup ia64mm 30 30 * @{ 31 31 */ … … 42 42 43 43 /** Data and instruction Translation Register indices. */ 44 #define DTR_KERNEL 45 #define ITR_KERNEL 46 #define DTR_KSTACK1 47 #define DTR_KSTACK2 44 #define DTR_KERNEL 0 45 #define ITR_KERNEL 0 46 #define DTR_KSTACK1 4 47 #define DTR_KSTACK2 5 48 48 49 49 /** Portion of TLB insertion format data structure. */ 50 union tlb_entry{50 typedef union { 51 51 uint64_t word[2]; 52 52 struct { 53 53 /* Word 0 */ 54 unsigned p : 1;/**< Present. */55 unsigned : 1;56 unsigned ma : 3;/**< Memory attribute. */57 unsigned a : 1;/**< Accessed. */58 unsigned d : 1;/**< Dirty. */59 unsigned pl : 2;/**< Privilege level. */60 unsigned ar : 3;/**< Access rights. */61 unsigned long long ppn : 38; 62 unsigned : 2;63 unsigned ed : 1;64 unsigned i g1 : 11;65 54 unsigned int p : 1; /**< Present. */ 55 unsigned int : 1; 56 unsigned int ma : 3; /**< Memory attribute. */ 57 unsigned int a : 1; /**< Accessed. */ 58 unsigned int d : 1; /**< Dirty. */ 59 unsigned int pl : 2; /**< Privilege level. */ 60 unsigned int ar : 3; /**< Access rights. */ 61 unsigned long long ppn : 38; /**< Physical Page Number, a.k.a. PFN. */ 62 unsigned int : 2; 63 unsigned int ed : 1; 64 unsigned int ig1 : 11; 65 66 66 /* Word 1 */ 67 unsigned : 2;68 unsigned ps : 6;/**< Page size will be 2^ps. */69 unsigned key : 24;/**< Protection key, unused. */70 unsigned : 32;67 unsigned int : 2; 68 unsigned int ps : 6; /**< Page size will be 2^ps. */ 69 unsigned int key : 24; /**< Protection key, unused. */ 70 unsigned int : 32; 71 71 } __attribute__ ((packed)); 72 } __attribute__ ((packed)); 73 typedef union tlb_entry tlb_entry_t; 72 } __attribute__ ((packed)) tlb_entry_t; 74 73 75 74 extern void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc); -
kernel/arch/ia64/include/register.h
r2f636b6 rf6b5593 27 27 */ 28 28 29 /** @addtogroup ia64 29 /** @addtogroup ia64 30 30 * @{ 31 31 */ … … 36 36 #define KERN_ia64_REGISTER_H_ 37 37 38 #define CR_IVR_MASK 0xf 39 #define PSR_IC_MASK 0x2000 40 #define PSR_I_MASK 0x4000 41 #define PSR_PK_MASK 0x8000 42 43 #define PSR_DT_MASK (1 << 17) 44 #define PSR_RT_MASK (1 << 27) 45 46 #define PSR_DFL_MASK (1 << 18) 47 #define PSR_DFH_MASK (1 << 19) 48 49 #define PSR_IT_MASK 0x0000001000000000 50 51 #define PSR_CPL_SHIFT 32 52 #define PSR_CPL_MASK_SHIFTED 3 53 54 #define PFM_MASK (~0x3fffffffff) 55 56 #define RSC_MODE_MASK 3 57 #define RSC_PL_MASK 12 38 #define DCR_PP_MASK (1 << 0) 39 #define DCR_BE_MASK (1 << 1) 40 #define DCR_LC_MASK (1 << 2) 41 #define DCR_DM_MASK (1 << 8) 42 #define DCR_DP_MASK (1 << 9) 43 #define DCR_DK_MASK (1 << 10) 44 #define DCR_DX_MASK (1 << 11) 45 #define DCR_DR_MASK (1 << 12) 46 #define DCR_DA_MASK (1 << 13) 47 #define DCR_DD_MASK (1 << 14) 48 49 #define CR_IVR_MASK 0x0f 50 51 #define PSR_IC_MASK (1 << 13) 52 #define PSR_I_MASK (1 << 14) 53 #define PSR_PK_MASK (1 << 15) 54 #define PSR_DT_MASK (1 << 17) 55 #define PSR_DFL_MASK (1 << 18) 56 #define PSR_DFH_MASK (1 << 19) 57 #define PSR_RT_MASK (1 << 27) 58 #define PSR_IT_MASK (1 << 36) 59 60 #define PSR_CPL_SHIFT 32 61 #define PSR_CPL_MASK_SHIFTED 3 62 63 #define PFM_MASK (~0x3fffffffff) 64 65 #define RSC_MODE_MASK 3 66 #define RSC_PL_MASK 12 58 67 59 68 /** Application registers. */ 60 #define AR_KR0 61 #define AR_KR1 62 #define AR_KR2 63 #define AR_KR3 64 #define AR_KR4 65 #define AR_KR5 66 #define AR_KR6 67 #define AR_KR7 68 /* AR 8-15reserved */69 #define AR_RSC 70 #define AR_BSP 71 #define AR_BSPSTORE 72 #define AR_RNAT 73 /* AR 20 reserved */74 #define AR_FCR 75 /* AR 22-23reserved */76 #define AR_EFLAG 77 #define AR_CSD 78 #define AR_SSD 79 #define AR_CFLG 80 #define AR_FSR 81 #define AR_FIR 82 #define AR_FDR 83 /* AR 31 reserved */84 #define AR_CCV 85 /* AR 33-35reserved */86 #define AR_UNAT 87 /* AR 37-39reserved */88 #define AR_FPSR 89 /* AR 41-43reserved */90 #define AR_ITC 91 /* AR 45-47reserved */92 /* AR 48-63ignored */93 #define AR_PFS 94 #define AR_LC 95 #define AR_EC 96 /* AR 67-111reserved */97 /* AR 112-127ignored */69 #define AR_KR0 0 70 #define AR_KR1 1 71 #define AR_KR2 2 72 #define AR_KR3 3 73 #define AR_KR4 4 74 #define AR_KR5 5 75 #define AR_KR6 6 76 #define AR_KR7 7 77 /* ARs 8-15 are reserved */ 78 #define AR_RSC 16 79 #define AR_BSP 17 80 #define AR_BSPSTORE 18 81 #define AR_RNAT 19 82 /* AR 20 is reserved */ 83 #define AR_FCR 21 84 /* ARs 22-23 are reserved */ 85 #define AR_EFLAG 24 86 #define AR_CSD 25 87 #define AR_SSD 26 88 #define AR_CFLG 27 89 #define AR_FSR 28 90 #define AR_FIR 29 91 #define AR_FDR 30 92 /* AR 31 is reserved */ 93 #define AR_CCV 32 94 /* ARs 33-35 are reserved */ 95 #define AR_UNAT 36 96 /* ARs 37-39 are reserved */ 97 #define AR_FPSR 40 98 /* ARs 41-43 are reserved */ 99 #define AR_ITC 44 100 /* ARs 45-47 are reserved */ 101 /* ARs 48-63 are ignored */ 102 #define AR_PFS 64 103 #define AR_LC 65 104 #define AR_EC 66 105 /* ARs 67-111 are reserved */ 106 /* ARs 112-127 are ignored */ 98 107 99 108 /** Control registers. */ 100 #define CR_DCR 101 #define CR_ITM 102 #define CR_IVA 103 /* CR3-CR7 reserved */104 #define CR_PTA 105 /* CR9-CR15 reserved */106 #define CR_IPSR 107 #define CR_ISR 108 /* CR18 reserved */109 #define CR_IIP 110 #define CR_IFA 111 #define CR_ITIR 112 #define CR_IIPA 113 #define CR_IFS 114 #define CR_IIM 115 #define CR_IHA 116 /* CR26-CR63 reserved */117 #define CR_LID 118 #define CR_IVR 119 #define CR_TPR 120 #define CR_EOI 121 #define CR_IRR0 122 #define CR_IRR1 123 #define CR_IRR2 124 #define CR_IRR3 125 #define CR_ITV 126 #define CR_PMV 127 #define CR_CMCV 128 /* CR75-CR79 reserved */129 #define CR_LRR0 130 #define CR_LRR1 131 /* CR82-CR127 reserved */109 #define CR_DCR 0 110 #define CR_ITM 1 111 #define CR_IVA 2 112 /* CR3-CR7 are reserved */ 113 #define CR_PTA 8 114 /* CR9-CR15 are reserved */ 115 #define CR_IPSR 16 116 #define CR_ISR 17 117 /* CR18 is reserved */ 118 #define CR_IIP 19 119 #define CR_IFA 20 120 #define CR_ITIR 21 121 #define CR_IIPA 22 122 #define CR_IFS 23 123 #define CR_IIM 24 124 #define CR_IHA 25 125 /* CR26-CR63 are reserved */ 126 #define CR_LID 64 127 #define CR_IVR 65 128 #define CR_TPR 66 129 #define CR_EOI 67 130 #define CR_IRR0 68 131 #define CR_IRR1 69 132 #define CR_IRR2 70 133 #define CR_IRR3 71 134 #define CR_ITV 72 135 #define CR_PMV 73 136 #define CR_CMCV 74 137 /* CR75-CR79 are reserved */ 138 #define CR_LRR0 80 139 #define CR_LRR1 81 140 /* CR82-CR127 are reserved */ 132 141 133 142 #ifndef __ASM__ … … 136 145 137 146 /** Processor Status Register. */ 138 union psr { 139 uint64_t value; 140 struct { 141 unsigned : 1; 142 unsigned be : 1; /**< Big-Endian data accesses. */ 143 unsigned up : 1; /**< User Performance monitor enable. */ 144 unsigned ac : 1; /**< Alignment Check. */ 145 unsigned mfl : 1; /**< Lower floating-point register written. */ 146 unsigned mfh : 1; /**< Upper floating-point register written. */ 147 unsigned : 7; 148 unsigned ic : 1; /**< Interruption Collection. */ 149 unsigned i : 1; /**< Interrupt Bit. */ 150 unsigned pk : 1; /**< Protection Key enable. */ 151 unsigned : 1; 152 unsigned dt : 1; /**< Data address Translation. */ 153 unsigned dfl : 1; /**< Disabled Floating-point Low register set. */ 154 unsigned dfh : 1; /**< Disabled Floating-point High register set. */ 155 unsigned sp : 1; /**< Secure Performance monitors. */ 156 unsigned pp : 1; /**< Privileged Performance monitor enable. */ 157 unsigned di : 1; /**< Disable Instruction set transition. */ 158 unsigned si : 1; /**< Secure Interval timer. */ 159 unsigned db : 1; /**< Debug Breakpoint fault. */ 160 unsigned lp : 1; /**< Lower Privilege transfer trap. */ 161 unsigned tb : 1; /**< Taken Branch trap. */ 162 unsigned rt : 1; /**< Register Stack Translation. */ 163 unsigned : 4; 164 unsigned cpl : 2; /**< Current Privilege Level. */ 165 unsigned is : 1; /**< Instruction Set. */ 166 unsigned mc : 1; /**< Machine Check abort mask. */ 167 unsigned it : 1; /**< Instruction address Translation. */ 168 unsigned id : 1; /**< Instruction Debug fault disable. */ 169 unsigned da : 1; /**< Disable Data Access and Dirty-bit faults. */ 170 unsigned dd : 1; /**< Data Debug fault disable. */ 171 unsigned ss : 1; /**< Single Step enable. */ 172 unsigned ri : 2; /**< Restart Instruction. */ 173 unsigned ed : 1; /**< Exception Deferral. */ 174 unsigned bn : 1; /**< Register Bank. */ 175 unsigned ia : 1; /**< Disable Instruction Access-bit faults. */ 176 } __attribute__ ((packed)); 177 }; 178 typedef union psr psr_t; 147 typedef union { 148 uint64_t value; 149 struct { 150 unsigned int : 1; 151 unsigned int be : 1; /**< Big-Endian data accesses. */ 152 unsigned int up : 1; /**< User Performance monitor enable. */ 153 unsigned int ac : 1; /**< Alignment Check. */ 154 unsigned int mfl : 1; /**< Lower floating-point register written. */ 155 unsigned int mfh : 1; /**< Upper floating-point register written. */ 156 unsigned int : 7; 157 unsigned int ic : 1; /**< Interruption Collection. */ 158 unsigned int i : 1; /**< Interrupt Bit. */ 159 unsigned int pk : 1; /**< Protection Key enable. */ 160 unsigned int : 1; 161 unsigned int dt : 1; /**< Data address Translation. */ 162 unsigned int dfl : 1; /**< Disabled Floating-point Low register set. */ 163 unsigned int dfh : 1; /**< Disabled Floating-point High register set. */ 164 unsigned int sp : 1; /**< Secure Performance monitors. */ 165 unsigned int pp : 1; /**< Privileged Performance monitor enable. */ 166 unsigned int di : 1; /**< Disable Instruction set transition. */ 167 unsigned int si : 1; /**< Secure Interval timer. */ 168 unsigned int db : 1; /**< Debug Breakpoint fault. */ 169 unsigned int lp : 1; /**< Lower Privilege transfer trap. */ 170 unsigned int tb : 1; /**< Taken Branch trap. */ 171 unsigned int rt : 1; /**< Register Stack Translation. */ 172 unsigned int : 4; 173 unsigned int cpl : 2; /**< Current Privilege Level. */ 174 unsigned int is : 1; /**< Instruction Set. */ 175 unsigned int mc : 1; /**< Machine Check abort mask. */ 176 unsigned int it : 1; /**< Instruction address Translation. */ 177 unsigned int id : 1; /**< Instruction Debug fault disable. */ 178 unsigned int da : 1; /**< Disable Data Access and Dirty-bit faults. */ 179 unsigned int dd : 1; /**< Data Debug fault disable. */ 180 unsigned int ss : 1; /**< Single Step enable. */ 181 unsigned int ri : 2; /**< Restart Instruction. */ 182 unsigned int ed : 1; /**< Exception Deferral. */ 183 unsigned int bn : 1; /**< Register Bank. */ 184 unsigned int ia : 1; /**< Disable Instruction Access-bit faults. */ 185 } __attribute__ ((packed)); 186 } psr_t; 179 187 180 188 /** Register Stack Configuration Register */ 181 union rsc { 182 uint64_t value; 183 struct { 184 unsigned mode : 2; 185 unsigned pl : 2; /**< Privilege Level. */ 186 unsigned be : 1; /**< Big-endian. */ 187 unsigned : 11; 188 unsigned loadrs : 14; 189 } __attribute__ ((packed)); 190 }; 191 typedef union rsc rsc_t; 189 typedef union { 190 uint64_t value; 191 struct { 192 unsigned int mode : 2; 193 unsigned int pl : 2; /**< Privilege Level. */ 194 unsigned int be : 1; /**< Big-endian. */ 195 unsigned int : 11; 196 unsigned int loadrs : 14; 197 } __attribute__ ((packed)); 198 } rsc_t; 192 199 193 200 /** External Interrupt Vector Register */ 194 union cr_ivr { 195 uint8_t vector; 196 uint64_t value; 197 }; 198 199 typedef union cr_ivr cr_ivr_t; 201 typedef union { 202 uint8_t vector; 203 uint64_t value; 204 } cr_ivr_t; 200 205 201 206 /** Task Priority Register */ 202 union cr_tpr { 203 struct { 204 unsigned : 4; 205 unsigned mic: 4; /**< Mask Interrupt Class. */ 206 unsigned : 8; 207 unsigned mmi: 1; /**< Mask Maskable Interrupts. */ 208 } __attribute__ ((packed)); 209 uint64_t value; 210 }; 211 212 typedef union cr_tpr cr_tpr_t; 207 typedef union { 208 uint64_t value; 209 struct { 210 unsigned int : 4; 211 unsigned int mic: 4; /**< Mask Interrupt Class. */ 212 unsigned int : 8; 213 unsigned int mmi: 1; /**< Mask Maskable Interrupts. */ 214 } __attribute__ ((packed)); 215 } cr_tpr_t; 213 216 214 217 /** Interval Timer Vector */ 215 union cr_itv { 216 struct { 217 unsigned vector : 8; 218 unsigned : 4; 219 unsigned : 1; 220 unsigned : 3; 221 unsigned m : 1; /**< Mask. */ 222 } __attribute__ ((packed)); 223 uint64_t value; 224 }; 225 226 typedef union cr_itv cr_itv_t; 218 typedef union { 219 uint64_t value; 220 struct { 221 unsigned int vector : 8; 222 unsigned int : 4; 223 unsigned int : 1; 224 unsigned int : 3; 225 unsigned int m : 1; /**< Mask. */ 226 } __attribute__ ((packed)); 227 } cr_itv_t; 227 228 228 229 /** Interruption Status Register */ 229 union cr_isr { 230 typedef union { 231 uint64_t value; 230 232 struct { 231 233 union { 232 234 /** General Exception code field structuring. */ 235 uint16_t code; 233 236 struct { 234 unsigned ge_na : 4;235 unsigned ge_code : 4;237 unsigned int ge_na : 4; 238 unsigned int ge_code : 4; 236 239 } __attribute__ ((packed)); 237 uint16_t code;238 240 }; 239 241 uint8_t vector; 240 unsigned : 8; 241 unsigned x : 1; /**< Execute exception. */ 242 unsigned w : 1; /**< Write exception. */ 243 unsigned r : 1; /**< Read exception. */ 244 unsigned na : 1; /**< Non-access exception. */ 245 unsigned sp : 1; /**< Speculative load exception. */ 246 unsigned rs : 1; /**< Register stack. */ 247 unsigned ir : 1; /**< Incomplete Register frame. */ 248 unsigned ni : 1; /**< Nested Interruption. */ 249 unsigned so : 1; /**< IA-32 Supervisor Override. */ 250 unsigned ei : 2; /**< Excepting Instruction. */ 251 unsigned ed : 1; /**< Exception Deferral. */ 252 unsigned : 20; 253 } __attribute__ ((packed)); 254 uint64_t value; 255 }; 256 257 typedef union cr_isr cr_isr_t; 242 unsigned int : 8; 243 unsigned int x : 1; /**< Execute exception. */ 244 unsigned int w : 1; /**< Write exception. */ 245 unsigned int r : 1; /**< Read exception. */ 246 unsigned int na : 1; /**< Non-access exception. */ 247 unsigned int sp : 1; /**< Speculative load exception. */ 248 unsigned int rs : 1; /**< Register stack. */ 249 unsigned int ir : 1; /**< Incomplete Register frame. */ 250 unsigned int ni : 1; /**< Nested Interruption. */ 251 unsigned int so : 1; /**< IA-32 Supervisor Override. */ 252 unsigned int ei : 2; /**< Excepting Instruction. */ 253 unsigned int ed : 1; /**< Exception Deferral. */ 254 unsigned int : 20; 255 } __attribute__ ((packed)); 256 } cr_isr_t; 258 257 259 258 /** CPUID Register 3 */ 260 union cpuid3 { 259 typedef union { 260 uint64_t value; 261 261 struct { 262 262 uint8_t number; … … 266 266 uint8_t archrev; 267 267 } __attribute__ ((packed)); 268 uint64_t value; 269 }; 270 271 typedef union cpuid3 cpuid3_t; 268 } cpuid3_t; 272 269 273 270 #endif /* !__ASM__ */ -
kernel/arch/ia64/src/ivt.S
r2f636b6 rf6b5593 391 391 392 392 /* 10. call handler */ 393 movl r1 = _hardcoded_load_address393 movl r1 = kernel_image_start 394 394 395 395 mov b1 = loc2 -
kernel/arch/ia64/src/mm/as.c
r2f636b6 rf6b5593 55 55 void as_install_arch(as_t *as) 56 56 { 57 region_register rr;57 region_register_t rr; 58 58 int i; 59 59 … … 73 73 rr.map.ps = PAGE_WIDTH; 74 74 rr_write(i, rr.word); 75 srlz_d(); 76 srlz_i(); 75 77 } 76 srlz_d();77 srlz_i();78 78 } 79 79 -
kernel/arch/ia64/src/mm/page.c
r2f636b6 rf6b5593 63 63 void set_environment(void) 64 64 { 65 region_register rr;66 pta_register pta;65 region_register_t rr; 66 pta_register_t pta; 67 67 int i; 68 68 #ifdef CONFIG_VHPT … … 131 131 vhpt_entry_t *vhpt_hash(uintptr_t page, asid_t asid) 132 132 { 133 region_register rr_save, rr;133 region_register_t rr_save, rr; 134 134 size_t vrn; 135 135 rid_t rid; … … 176 176 bool vhpt_compare(uintptr_t page, asid_t asid, vhpt_entry_t *v) 177 177 { 178 region_register rr_save, rr;178 region_register_t rr_save, rr; 179 179 size_t vrn; 180 180 rid_t rid; … … 223 223 int flags) 224 224 { 225 region_register rr_save, rr;225 region_register_t rr_save, rr; 226 226 size_t vrn; 227 227 rid_t rid; … … 257 257 v->present.ma = (flags & PAGE_CACHEABLE) ? 258 258 MA_WRITEBACK : MA_UNCACHEABLE; 259 v->present.a = false; 260 v->present.d = false; 259 v->present.a = false; /* not accessed */ 260 v->present.d = false; /* not dirty */ 261 261 v->present.pl = (flags & PAGE_USER) ? PL_USER : PL_KERNEL; 262 262 v->present.ar = (flags & PAGE_WRITE) ? AR_WRITE : AR_READ; 263 263 v->present.ar |= (flags & PAGE_EXEC) ? AR_EXECUTE : 0; 264 264 v->present.ppn = frame >> PPN_SHIFT; 265 v->present.ed = false; 265 v->present.ed = false; /* exception not deffered */ 266 266 v->present.ps = PAGE_WIDTH; 267 267 v->present.key = 0; -
kernel/arch/ia64/src/mm/tlb.c
r2f636b6 rf6b5593 27 27 */ 28 28 29 /** @addtogroup ia64mm 29 /** @addtogroup ia64mm 30 30 * @{ 31 31 */ … … 53 53 #include <interrupt.h> 54 54 55 #define IO_FRAME_BASE 0xFFFFC000000 56 55 57 /** Invalidate all TLB entries. */ 56 58 void tlb_invalidate_all(void) … … 59 61 uintptr_t adr; 60 62 uint32_t count1, count2, stride1, stride2; 61 63 62 64 unsigned int i, j; 63 65 64 66 adr = PAL_PTCE_INFO_BASE(); 65 67 count1 = PAL_PTCE_INFO_COUNT1(); … … 67 69 stride1 = PAL_PTCE_INFO_STRIDE1(); 68 70 stride2 = PAL_PTCE_INFO_STRIDE2(); 69 71 70 72 ipl = interrupts_disable(); 71 73 72 74 for (i = 0; i < count1; i++) { 73 75 for (j = 0; j < count2; j++) { 74 76 asm volatile ( 75 "ptc.e %0 ;;" 76 : 77 : "r" (adr) 77 "ptc.e %[adr] ;;" 78 :: [adr] "r" (adr) 78 79 ); 79 80 adr += stride2; … … 81 82 adr += stride1; 82 83 } 83 84 84 85 interrupts_restore(ipl); 85 86 86 87 srlz_d(); 87 88 srlz_i(); 89 88 90 #ifdef CONFIG_VHPT 89 91 vhpt_invalidate_all(); 90 #endif 92 #endif 91 93 } 92 94 93 95 /** Invalidate entries belonging to an address space. 94 96 * 95 * @param asid Address space identifier. 97 * @param asid Address space identifier. 98 * 96 99 */ 97 100 void tlb_invalidate_asid(asid_t asid) … … 103 106 void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt) 104 107 { 105 region_register rr;108 region_register_t rr; 106 109 bool restore_rr = false; 107 110 int b = 0; 108 111 int c = cnt; 109 112 110 113 uintptr_t va; 111 114 va = page; 112 115 113 116 rr.word = rr_read(VA2VRN(va)); 114 117 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { … … 117 120 * Save the old content of the register and replace the RID. 118 121 */ 119 region_register rr0;120 122 region_register_t rr0; 123 121 124 rr0 = rr; 122 125 rr0.map.rid = ASID2RID(asid, VA2VRN(va)); … … 126 129 } 127 130 128 while (c >>= 1)131 while (c >>= 1) 129 132 b++; 130 133 b >>= 1; … … 169 172 break; 170 173 } 171 for(; va < (page + cnt * PAGE_SIZE); va += (1 << ps)) 172 asm volatile ("ptc.l %0, %1;;" :: "r" (va), "r" (ps << 2)); 174 175 for (; va < (page + cnt * PAGE_SIZE); va += (1 << ps)) 176 asm volatile ( 177 "ptc.l %[va], %[ps] ;;" 178 :: [va]"r" (va), 179 [ps] "r" (ps << 2) 180 ); 181 173 182 srlz_d(); 174 183 srlz_i(); … … 183 192 /** Insert data into data translation cache. 184 193 * 185 * @param va Virtual page address. 186 * @param asid Address space identifier. 187 * @param entry The rest of TLB entry as required by TLB insertion 188 * format. 194 * @param va Virtual page address. 195 * @param asid Address space identifier. 196 * @param entry The rest of TLB entry as required by TLB insertion 197 * format. 198 * 189 199 */ 190 200 void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) … … 195 205 /** Insert data into instruction translation cache. 196 206 * 197 * @param va 198 * @param asid 199 * @param entry 200 * 207 * @param va Virtual page address. 208 * @param asid Address space identifier. 209 * @param entry The rest of TLB entry as required by TLB insertion 210 * format. 201 211 */ 202 212 void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) … … 207 217 /** Insert data into instruction or data translation cache. 208 218 * 209 * @param va Virtual page address. 210 * @param asid Address space identifier. 211 * @param entry The rest of TLB entry as required by TLB insertion 212 * format. 213 * @param dtc If true, insert into data translation cache, use 214 * instruction translation cache otherwise. 219 * @param va Virtual page address. 220 * @param asid Address space identifier. 221 * @param entry The rest of TLB entry as required by TLB insertion 222 * format. 223 * @param dtc If true, insert into data translation cache, use 224 * instruction translation cache otherwise. 225 * 215 226 */ 216 227 void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc) 217 228 { 218 region_register rr;229 region_register_t rr; 219 230 bool restore_rr = false; 220 231 221 232 rr.word = rr_read(VA2VRN(va)); 222 233 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { … … 225 236 * Save the old content of the register and replace the RID. 226 237 */ 227 region_register rr0;228 238 region_register_t rr0; 239 229 240 rr0 = rr; 230 241 rr0.map.rid = ASID2RID(asid, VA2VRN(va)); … … 235 246 236 247 asm volatile ( 237 "mov r8 = psr;;\n" 238 "rsm %0;;\n" /* PSR_IC_MASK */ 239 "srlz.d;;\n" 240 "srlz.i;;\n" 241 "mov cr.ifa = %1\n" /* va */ 242 "mov cr.itir = %2;;\n" /* entry.word[1] */ 243 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */ 244 "(p6) itc.i %3;;\n" 245 "(p7) itc.d %3;;\n" 246 "mov psr.l = r8;;\n" 247 "srlz.d;;\n" 248 : 249 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), 250 "r" (entry.word[0]), "r" (dtc) 248 "mov r8 = psr ;;\n" 249 "rsm %[mask] ;;\n" /* PSR_IC_MASK */ 250 "srlz.d ;;\n" 251 "srlz.i ;;\n" 252 "mov cr.ifa = %[va]\n" /* va */ 253 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */ 254 "cmp.eq p6, p7 = %[dtc], r0 ;;\n" /* decide between itc and dtc */ 255 "(p6) itc.i %[word0] ;;\n" 256 "(p7) itc.d %[word0] ;;\n" 257 "mov psr.l = r8 ;;\n" 258 "srlz.d ;;\n" 259 :: [mask] "i" (PSR_IC_MASK), 260 [va] "r" (va), 261 [word0] "r" (entry.word[0]), 262 [word1] "r" (entry.word[1]), 263 [dtc] "r" (dtc) 251 264 : "p6", "p7", "r8" 252 265 ); … … 261 274 /** Insert data into instruction translation register. 262 275 * 263 * @param va 264 * @param asid 265 * @param entry 266 * 267 * @param tr 268 * /269 void 270 itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)276 * @param va Virtual page address. 277 * @param asid Address space identifier. 278 * @param entry The rest of TLB entry as required by TLB insertion 279 * format. 280 * @param tr Translation register. 281 * 282 */ 283 void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr) 271 284 { 272 285 tr_mapping_insert(va, asid, entry, false, tr); … … 275 288 /** Insert data into data translation register. 276 289 * 277 * @param va 278 * @param asid 279 * @param entry 280 * 281 * @param tr 282 * /283 void 284 dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)290 * @param va Virtual page address. 291 * @param asid Address space identifier. 292 * @param entry The rest of TLB entry as required by TLB insertion 293 * format. 294 * @param tr Translation register. 295 * 296 */ 297 void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr) 285 298 { 286 299 tr_mapping_insert(va, asid, entry, true, tr); … … 289 302 /** Insert data into instruction or data translation register. 290 303 * 291 * @param va 292 * @param asid 293 * @param entry 294 * 295 * @param dtr 296 * 297 * @param tr 298 * /299 void 300 tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,304 * @param va Virtual page address. 305 * @param asid Address space identifier. 306 * @param entry The rest of TLB entry as required by TLB insertion 307 * format. 308 * @param dtr If true, insert into data translation register, use 309 * instruction translation register otherwise. 310 * @param tr Translation register. 311 * 312 */ 313 void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, 301 314 size_t tr) 302 315 { 303 region_register rr;316 region_register_t rr; 304 317 bool restore_rr = false; 305 318 306 319 rr.word = rr_read(VA2VRN(va)); 307 320 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { … … 310 323 * Save the old content of the register and replace the RID. 311 324 */ 312 region_register rr0;313 325 region_register_t rr0; 326 314 327 rr0 = rr; 315 328 rr0.map.rid = ASID2RID(asid, VA2VRN(va)); … … 318 331 srlz_i(); 319 332 } 320 333 321 334 asm volatile ( 322 "mov r8 = psr;;\n" 323 "rsm %0;;\n" /* PSR_IC_MASK */ 324 "srlz.d;;\n" 325 "srlz.i;;\n" 326 "mov cr.ifa = %1\n" /* va */ 327 "mov cr.itir = %2;;\n" /* entry.word[1] */ 328 "cmp.eq p6,p7 = %5,r0;;\n" /* decide between itr and dtr */ 329 "(p6) itr.i itr[%4] = %3;;\n" 330 "(p7) itr.d dtr[%4] = %3;;\n" 331 "mov psr.l = r8;;\n" 332 "srlz.d;;\n" 333 : 334 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), 335 "r" (entry.word[0]), "r" (tr), "r" (dtr) 335 "mov r8 = psr ;;\n" 336 "rsm %[mask] ;;\n" /* PSR_IC_MASK */ 337 "srlz.d ;;\n" 338 "srlz.i ;;\n" 339 "mov cr.ifa = %[va]\n" /* va */ 340 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */ 341 "cmp.eq p6, p7 = %[dtr], r0 ;;\n" /* decide between itr and dtr */ 342 "(p6) itr.i itr[%[tr]] = %[word0] ;;\n" 343 "(p7) itr.d dtr[%[tr]] = %[word0] ;;\n" 344 "mov psr.l = r8 ;;\n" 345 "srlz.d ;;\n" 346 :: [mask] "i" (PSR_IC_MASK), 347 [va] "r" (va), 348 [word1] "r" (entry.word[1]), 349 [word0] "r" (entry.word[0]), 350 [tr] "r" (tr), 351 [dtr] "r" (dtr) 336 352 : "p6", "p7", "r8" 337 353 ); … … 346 362 /** Insert data into DTLB. 347 363 * 348 * @param page 349 * @param frame 350 * @param dtr 351 * 352 * @param tr 353 * /354 void 355 dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,364 * @param page Virtual page address including VRN bits. 365 * @param frame Physical frame address. 366 * @param dtr If true, insert into data translation register, use data 367 * translation cache otherwise. 368 * @param tr Translation register if dtr is true, ignored otherwise. 369 * 370 */ 371 void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, 356 372 size_t tr) 357 373 { … … 361 377 entry.word[1] = 0; 362 378 363 entry.p = true; 379 entry.p = true; /* present */ 364 380 entry.ma = MA_WRITEBACK; 365 entry.a = true; 366 entry.d = true; 381 entry.a = true; /* already accessed */ 382 entry.d = true; /* already dirty */ 367 383 entry.pl = PL_KERNEL; 368 384 entry.ar = AR_READ | AR_WRITE; … … 380 396 * Purge DTR entries used by the kernel. 381 397 * 382 * @param page Virtual page address including VRN bits. 383 * @param width Width of the purge in bits. 398 * @param page Virtual page address including VRN bits. 399 * @param width Width of the purge in bits. 400 * 384 401 */ 385 402 void dtr_purge(uintptr_t page, size_t width) 386 403 { 387 asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width << 2)); 404 asm volatile ( 405 "ptr.d %[page], %[width]\n" 406 :: [page] "r" (page), 407 [width] "r" (width << 2) 408 ); 388 409 } 389 410 … … 391 412 /** Copy content of PTE into data translation cache. 392 413 * 393 * @param t PTE. 414 * @param t PTE. 415 * 394 416 */ 395 417 void dtc_pte_copy(pte_t *t) 396 418 { 397 419 tlb_entry_t entry; 398 420 399 421 entry.word[0] = 0; 400 422 entry.word[1] = 0; … … 410 432 411 433 dtc_mapping_insert(t->page, t->as->asid, entry); 434 412 435 #ifdef CONFIG_VHPT 413 436 vhpt_mapping_insert(t->page, t->as->asid, entry); 414 #endif 437 #endif 415 438 } 416 439 417 440 /** Copy content of PTE into instruction translation cache. 418 441 * 419 * @param t PTE. 442 * @param t PTE. 443 * 420 444 */ 421 445 void itc_pte_copy(pte_t *t) 422 446 { 423 447 tlb_entry_t entry; 424 448 425 449 entry.word[0] = 0; 426 450 entry.word[1] = 0; … … 437 461 438 462 itc_mapping_insert(t->page, t->as->asid, entry); 463 439 464 #ifdef CONFIG_VHPT 440 465 vhpt_mapping_insert(t->page, t->as->asid, entry); 441 #endif 466 #endif 442 467 } 443 468 444 469 /** Instruction TLB fault handler for faults with VHPT turned off. 445 470 * 446 * @param vector Interruption vector. 447 * @param istate Structure with saved interruption state. 471 * @param vector Interruption vector. 472 * @param istate Structure with saved interruption state. 473 * 448 474 */ 449 475 void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate) 450 476 { 451 region_register rr;477 region_register_t rr; 452 478 rid_t rid; 453 479 uintptr_t va; 454 480 pte_t *t; 455 481 456 va = istate->cr_ifa; 482 va = istate->cr_ifa; /* faulting address */ 457 483 rr.word = rr_read(VA2VRN(va)); 458 484 rid = rr.map.rid; 459 485 460 486 page_table_lock(AS, true); 461 487 t = page_mapping_find(AS, va); … … 473 499 page_table_unlock(AS, true); 474 500 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 475 fault_if_from_uspace(istate, "Page fault at %p.",va);501 fault_if_from_uspace(istate, "Page fault at %p.", va); 476 502 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, 477 503 istate->cr_iip); … … 488 514 } 489 515 490 #define IO_FRAME_BASE 0xFFFFC000000491 492 516 /** 493 517 * There is special handling of memory mapped legacy io, because of 4KB sized 494 518 * access for userspace. 495 519 * 496 * @param va Virtual address of page fault. 497 * @param istate Structure with saved interruption state. 498 * 499 * @return One on success, zero on failure. 520 * @param va Virtual address of page fault. 521 * @param istate Structure with saved interruption state. 522 * 523 * @return One on success, zero on failure. 524 * 500 525 */ 501 526 static int try_memmap_io_insertion(uintptr_t va, istate_t *istate) … … 505 530 uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >> 506 531 USPACE_IO_PAGE_WIDTH; 507 532 508 533 if (is_io_page_accessible(io_page)) { 509 534 uint64_t page, frame; 510 535 511 536 page = IO_OFFSET + 512 537 (1 << USPACE_IO_PAGE_WIDTH) * io_page; 513 538 frame = IO_FRAME_BASE + 514 539 (1 << USPACE_IO_PAGE_WIDTH) * io_page; 515 540 516 541 tlb_entry_t entry; 517 542 518 543 entry.word[0] = 0; 519 544 entry.word[1] = 0; 520 521 entry.p = true; 522 entry.ma = MA_UNCACHEABLE; 523 entry.a = true; 524 entry.d = true; 545 546 entry.p = true; /* present */ 547 entry.ma = MA_UNCACHEABLE; 548 entry.a = true; /* already accessed */ 549 entry.d = true; /* already dirty */ 525 550 entry.pl = PL_USER; 526 551 entry.ar = AR_READ | AR_WRITE; 527 552 entry.ppn = frame >> PPN_SHIFT; 528 553 entry.ps = USPACE_IO_PAGE_WIDTH; 529 554 530 555 dtc_mapping_insert(page, TASK->as->asid, entry); 531 556 return 1; … … 536 561 } 537 562 } 538 563 539 564 return 0; 540 565 } … … 542 567 /** Data TLB fault handler for faults with VHPT turned off. 543 568 * 544 * @param vector Interruption vector. 545 * @param istate Structure with saved interruption state. 569 * @param vector Interruption vector. 570 * @param istate Structure with saved interruption state. 571 * 546 572 */ 547 573 void alternate_data_tlb_fault(uint64_t vector, istate_t *istate) 548 574 { 549 region_register rr; 550 rid_t rid; 551 uintptr_t va; 552 pte_t *t; 553 554 va = istate->cr_ifa; /* faulting address */ 555 rr.word = rr_read(VA2VRN(va)); 556 rid = rr.map.rid; 575 if (istate->cr_isr.sp) { 576 /* Speculative load. Deffer the exception 577 until a more clever approach can be used. 578 579 Currently if we try to find the mapping 580 for the speculative load while in the kernel, 581 we might introduce a livelock because of 582 the possibly invalid values of the address. */ 583 istate->cr_ipsr.ed = true; 584 return; 585 } 586 587 uintptr_t va = istate->cr_ifa; /* faulting address */ 588 589 region_register_t rr; 590 rr.word = rr_read(VA2VRN(va)); 591 rid_t rid = rr.map.rid; 557 592 if (RID2ASID(rid) == ASID_KERNEL) { 558 593 if (VA2VRN(va) == VRN_KERNEL) { … … 565 600 } 566 601 } 567 602 603 568 604 page_table_lock(AS, true); 569 t= page_mapping_find(AS, va);570 if ( t) {605 pte_t *entry = page_mapping_find(AS, va); 606 if (entry) { 571 607 /* 572 608 * The mapping was found in the software page hash table. 573 609 * Insert it into data translation cache. 574 610 */ 575 dtc_pte_copy( t);611 dtc_pte_copy(entry); 576 612 page_table_unlock(AS, true); 577 613 } else { … … 579 615 if (try_memmap_io_insertion(va, istate)) 580 616 return; 581 /* 582 * Forward the page fault to the address space page fault 617 618 /* 619 * Forward the page fault to the address space page fault 583 620 * handler. 584 621 */ 585 622 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 586 fault_if_from_uspace(istate, "Page fault at %p.",va);623 fault_if_from_uspace(istate, "Page fault at %p.", va); 587 624 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, 588 625 istate->cr_iip); … … 595 632 * This fault should not occur. 596 633 * 597 * @param vector Interruption vector. 598 * @param istate Structure with saved interruption state. 634 * @param vector Interruption vector. 635 * @param istate Structure with saved interruption state. 636 * 599 637 */ 600 638 void data_nested_tlb_fault(uint64_t vector, istate_t *istate) 601 639 { 602 panic("%s.", __func__);640 ASSERT(false); 603 641 } 604 642 605 643 /** Data Dirty bit fault handler. 606 644 * 607 * @param vector Interruption vector. 608 * @param istate Structure with saved interruption state. 645 * @param vector Interruption vector. 646 * @param istate Structure with saved interruption state. 647 * 609 648 */ 610 649 void data_dirty_bit_fault(uint64_t vector, istate_t *istate) 611 650 { 612 region_register rr;651 region_register_t rr; 613 652 rid_t rid; 614 653 uintptr_t va; 615 654 pte_t *t; 616 655 617 va = istate->cr_ifa; 656 va = istate->cr_ifa; /* faulting address */ 618 657 rr.word = rr_read(VA2VRN(va)); 619 658 rid = rr.map.rid; 620 659 621 660 page_table_lock(AS, true); 622 661 t = page_mapping_find(AS, va); 623 ASSERT( t && t->p);624 if ( t && t->p && t->w) {662 ASSERT((t) && (t->p)); 663 if ((t) && (t->p) && (t->w)) { 625 664 /* 626 665 * Update the Dirty bit in page tables and reinsert … … 631 670 } else { 632 671 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 633 fault_if_from_uspace(istate, "Page fault at %p.",va);672 fault_if_from_uspace(istate, "Page fault at %p.", va); 634 673 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, 635 674 istate->cr_iip); … … 641 680 /** Instruction access bit fault handler. 642 681 * 643 * @param vector Interruption vector. 644 * @param istate Structure with saved interruption state. 682 * @param vector Interruption vector. 683 * @param istate Structure with saved interruption state. 684 * 645 685 */ 646 686 void instruction_access_bit_fault(uint64_t vector, istate_t *istate) 647 687 { 648 region_register rr;688 region_register_t rr; 649 689 rid_t rid; 650 690 uintptr_t va; 651 pte_t *t; 652 653 va = istate->cr_ifa; 691 pte_t *t; 692 693 va = istate->cr_ifa; /* faulting address */ 654 694 rr.word = rr_read(VA2VRN(va)); 655 695 rid = rr.map.rid; 656 696 657 697 page_table_lock(AS, true); 658 698 t = page_mapping_find(AS, va); 659 ASSERT( t && t->p);660 if ( t && t->p && t->x) {699 ASSERT((t) && (t->p)); 700 if ((t) && (t->p) && (t->x)) { 661 701 /* 662 702 * Update the Accessed bit in page tables and reinsert … … 679 719 * @param vector Interruption vector. 680 720 * @param istate Structure with saved interruption state. 721 * 681 722 */ 682 723 void data_access_bit_fault(uint64_t vector, istate_t *istate) 683 724 { 684 region_register rr;725 region_register_t rr; 685 726 rid_t rid; 686 727 uintptr_t va; 687 728 pte_t *t; 688 689 va = istate->cr_ifa; 729 730 va = istate->cr_ifa; /* faulting address */ 690 731 rr.word = rr_read(VA2VRN(va)); 691 732 rid = rr.map.rid; 692 733 693 734 page_table_lock(AS, true); 694 735 t = page_mapping_find(AS, va); 695 ASSERT( t && t->p);696 if ( t && t->p) {736 ASSERT((t) && (t->p)); 737 if ((t) && (t->p)) { 697 738 /* 698 739 * Update the Accessed bit in page tables and reinsert … … 715 756 * @param vector Interruption vector. 716 757 * @param istate Structure with saved interruption state. 758 * 717 759 */ 718 760 void data_access_rights_fault(uint64_t vector, istate_t *istate) 719 761 { 720 region_register rr;762 region_register_t rr; 721 763 rid_t rid; 722 764 uintptr_t va; 723 765 pte_t *t; 724 725 va = istate->cr_ifa; 766 767 va = istate->cr_ifa; /* faulting address */ 726 768 rr.word = rr_read(VA2VRN(va)); 727 769 rid = rr.map.rid; 728 770 729 771 /* 730 772 * Assume a write to a read-only page. … … 732 774 page_table_lock(AS, true); 733 775 t = page_mapping_find(AS, va); 734 ASSERT( t && t->p);776 ASSERT((t) && (t->p)); 735 777 ASSERT(!t->w); 736 778 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { … … 746 788 * @param vector Interruption vector. 747 789 * @param istate Structure with saved interruption state. 790 * 748 791 */ 749 792 void page_not_present(uint64_t vector, istate_t *istate) 750 793 { 751 region_register rr;794 region_register_t rr; 752 795 rid_t rid; 753 796 uintptr_t va; 754 797 pte_t *t; 755 798 756 va = istate->cr_ifa; 799 va = istate->cr_ifa; /* faulting address */ 757 800 rr.word = rr_read(VA2VRN(va)); 758 801 rid = rr.map.rid; 759 802 760 803 page_table_lock(AS, true); 761 804 t = page_mapping_find(AS, va); -
kernel/arch/ia64/src/mm/vhpt.c
r2f636b6 rf6b5593 27 27 */ 28 28 29 /** @addtogroup ia64mm 29 /** @addtogroup ia64mm 30 30 * @{ 31 31 */ … … 44 44 vhpt_base = frame_alloc(VHPT_WIDTH - FRAME_WIDTH, 45 45 FRAME_KA | FRAME_ATOMIC); 46 if (!vhpt_base) 46 if (!vhpt_base) 47 47 panic("Kernel configured with VHPT but no memory for table."); 48 48 vhpt_invalidate_all(); … … 53 53 void vhpt_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) 54 54 { 55 region_register rr_save, rr;55 region_register_t rr_save, rr; 56 56 size_t vrn; 57 57 rid_t rid; 58 58 uint64_t tag; 59 59 60 60 vhpt_entry_t *ventry; 61 62 61 63 62 vrn = va >> VRN_SHIFT; 64 63 rid = ASID2RID(asid, vrn); 65 64 66 65 rr_save.word = rr_read(vrn); 67 66 rr.word = rr_save.word; … … 75 74 srlz_i(); 76 75 srlz_d(); 77 76 78 77 ventry->word[0] = entry.word[0]; 79 78 ventry->word[1] = entry.word[1]; -
kernel/arch/ia64/src/start.S
r2f636b6 rf6b5593 32 32 #include <mm/asid.h> 33 33 34 #define RR_MASK (0xFFFFFFFF00000002)35 #define RID_SHIFT 36 #define PS_SHIFT 37 38 #define KERNEL_TRANSLATION_I 39 #define KERNEL_TRANSLATION_D 40 #define KERNEL_TRANSLATION_VIO 41 #define KERNEL_TRANSLATION_IO 0x00100FFFFC00067142 #define KERNEL_TRANSLATION_FW 0x00100000F000067134 #define RR_MASK (0xFFFFFFFF00000002) 35 #define RID_SHIFT 8 36 #define PS_SHIFT 2 37 38 #define KERNEL_TRANSLATION_I 0x0010000000000661 39 #define KERNEL_TRANSLATION_D 0x0010000000000661 40 #define KERNEL_TRANSLATION_VIO 0x0010000000000671 41 #define KERNEL_TRANSLATION_IO 0x00100FFFFC000671 42 #define KERNEL_TRANSLATION_FW 0x00100000F0000671 43 43 44 44 .section K_TEXT_START, "ax" … … 49 49 kernel_image_start: 50 50 .auto 51 51 52 52 #ifdef CONFIG_SMP 53 53 # Identify self(CPU) in OS structures by ID / EID 54 54 55 55 mov r9 = cr64 56 56 mov r10 = 1 … … 62 62 st1 [r8] = r10 63 63 #endif 64 64 65 65 mov psr.l = r0 66 66 srlz.i 67 67 srlz.d 68 68 69 69 # Fill TR.i and TR.d using Region Register #VRN_KERNEL 70 70 71 71 movl r8 = (VRN_KERNEL << VRN_SHIFT) 72 72 mov r9 = rr[r8] 73 73 74 74 movl r10 = (RR_MASK) 75 75 and r9 = r10, r9 76 76 movl r10 = ((RID_KERNEL << RID_SHIFT) | (KERNEL_PAGE_WIDTH << PS_SHIFT)) 77 or 78 77 or r9 = r10, r9 78 79 79 mov rr[r8] = r9 80 80 81 81 movl r8 = (VRN_KERNEL << VRN_SHIFT) 82 82 mov cr.ifa = r8 83 83 84 84 mov r11 = cr.itir 85 85 movl r10 = (KERNEL_PAGE_WIDTH << PS_SHIFT) 86 86 or r10 = r10, r11 87 87 mov cr.itir = r10 88 88 89 89 movl r10 = (KERNEL_TRANSLATION_I) 90 90 itr.i itr[r0] = r10 91 91 movl r10 = (KERNEL_TRANSLATION_D) 92 92 itr.d dtr[r0] = r10 93 93 94 94 movl r7 = 1 95 95 movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET … … 97 97 movl r10 = (KERNEL_TRANSLATION_VIO) 98 98 itr.d dtr[r7] = r10 99 99 100 100 mov r11 = cr.itir 101 101 movl r10 = ~0xfc … … 104 104 or r10 = r10, r11 105 105 mov cr.itir = r10 106 106 107 107 movl r7 = 2 108 108 movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET … … 110 110 movl r10 = (KERNEL_TRANSLATION_IO) 111 111 itr.d dtr[r7] = r10 112 113 # Setup mapping for fi mware arrea (also SAPIC)114 112 113 # Setup mapping for firmware area (also SAPIC) 114 115 115 mov r11 = cr.itir 116 116 movl r10 = ~0xfc … … 119 119 or r10 = r10, r11 120 120 mov cr.itir = r10 121 121 122 122 movl r7 = 3 123 123 movl r8 = (VRN_KERNEL << VRN_SHIFT) | FW_OFFSET … … 125 125 movl r10 = (KERNEL_TRANSLATION_FW) 126 126 itr.d dtr[r7] = r10 127 127 128 # Initialize DSR 129 130 movl r10 = (DCR_DP_MASK | DCR_DK_MASK | DCR_DX_MASK | DCR_DR_MASK | DCR_DA_MASK | DCR_DD_MASK | DCR_LC_MASK) 131 mov r9 = cr.dcr 132 or r10 = r10, r9 133 mov cr.dcr = r10 134 128 135 # Initialize PSR 129 136 130 137 movl r10 = (PSR_DT_MASK | PSR_RT_MASK | PSR_IT_MASK | PSR_IC_MASK) /* Enable paging */ 131 138 mov r9 = psr 132 139 133 140 or r10 = r10, r9 134 141 mov cr.ipsr = r10 … … 138 145 srlz.d 139 146 srlz.i 140 147 141 148 .explicit 142 149 143 150 /* 144 151 * Return From Interrupt is the only way to … … 147 154 rfi ;; 148 155 149 150 156 .global paging_start 151 157 paging_start: 152 158 153 159 /* 154 160 * Now we are paging. 155 161 */ 156 162 157 163 # Switch to register bank 1 158 164 bsw.1 159 165 160 166 #ifdef CONFIG_SMP 161 167 # Am I BSP or AP? … … 164 170 cmp.eq p3, p2 = r20, r0 ;; 165 171 #else 166 cmp.eq p3, p2 = r0, r0 ;; 167 #endif 172 cmp.eq p3, p2 = r0, r0 ;; /* you are BSP */ 173 #endif /* CONFIG_SMP */ 168 174 169 175 # Initialize register stack … … 172 178 mov ar.bspstore = r8 173 179 loadrs 174 180 175 181 # Initialize memory stack to some sane value 176 182 movl r12 = stack0 ;; 177 add r12 = -16, r12 178 183 add r12 = -16, r12 /* allocate a scratch area on the stack */ 184 179 185 # Initialize gp (Global Pointer) register 180 movl r20 = (VRN_KERNEL << VRN_SHIFT) ;;181 or r20 = r20, r1;;182 movl r1 = _hardcoded_load_address186 movl r20 = (VRN_KERNEL << VRN_SHIFT) ;; 187 or r20 = r20, r1 ;; 188 movl r1 = kernel_image_start 183 189 184 190 /* 185 * Initialize hardcoded_* variables. Do only BSP191 * Initialize bootinfo on BSP. 186 192 */ 187 (p3) movl r14 = _hardcoded_ktext_size 188 (p3) movl r15 = _hardcoded_kdata_size 189 (p3) movl r16 = _hardcoded_load_address ;; 190 (p3) addl r17 = @gprel(hardcoded_ktext_size), gp 191 (p3) addl r18 = @gprel(hardcoded_kdata_size), gp 192 (p3) addl r19 = @gprel(hardcoded_load_address), gp 193 (p3) addl r21 = @gprel(bootinfo), gp 194 ;; 195 (p3) st8 [r17] = r14 196 (p3) st8 [r18] = r15 197 (p3) st8 [r19] = r16 193 (p3) addl r21 = @gprel(bootinfo), gp ;; 198 194 (p3) st8 [r21] = r20 199 195 200 196 ssm (1 << 19) ;; /* Disable f32 - f127 */ 201 197 srlz.i 202 198 srlz.d ;; 203 199 204 200 #ifdef CONFIG_SMP 205 201 (p2) movl r18 = main_ap ;; 206 (p2) 202 (p2) mov b1 = r18 ;; 207 203 (p2) br.call.sptk.many b0 = b1 208 204 209 205 # Mark that BSP is on 206 210 207 mov r20 = 1 ;; 211 208 movl r21 = bsp_started ;; 212 209 st8 [r21] = r20 ;; 213 210 #endif 214 211 215 212 br.call.sptk.many b0 = arch_pre_main 216 213 217 214 movl r18 = main_bsp ;; 218 215 mov b1 = r18 ;; … … 227 224 kernel_image_ap_start: 228 225 .auto 229 226 230 227 # Identify self(CPU) in OS structures by ID / EID 231 228 232 229 mov r9 = cr64 233 230 mov r10 = 1 … … 240 237 241 238 # Wait for wakeup synchro signal (#3 in cpu_by_id_eid_list) 242 239 243 240 kernel_image_ap_start_loop: 244 241 movl r11 = kernel_image_ap_start_loop 245 242 and r11 = r11, r12 246 mov b1 = r11 247 248 ld1 r20 = [r8] ;;249 movl r21 = 3 ;;250 cmp.eq p2, p3 = r20, r21 ;;243 mov b1 = r11 244 245 ld1 r20 = [r8] 246 movl r21 = 3 247 cmp.eq p2, p3 = r20, r21 251 248 (p3) br.call.sptk.many b0 = b1 252 249 253 250 movl r11 = kernel_image_start 254 251 and r11 = r11, r12 255 mov b1 = r11 252 mov b1 = r11 256 253 br.call.sptk.many b0 = b1 257 254 … … 259 256 .global bsp_started 260 257 bsp_started: 261 .space 8258 .space 8 262 259 263 260 .align 4096 264 261 .global cpu_by_id_eid_list 265 262 cpu_by_id_eid_list: 266 .space 65536267 268 #endif 263 .space 65536 264 265 #endif /* CONFIG_SMP */ -
kernel/arch/mips32/include/mm/tlb.h
r2f636b6 rf6b5593 59 59 typedef union { 60 60 struct { 61 #ifdef BIG_ENDIAN61 #ifdef __BE__ 62 62 unsigned : 2; /* zero */ 63 63 unsigned pfn : 24; /* frame number */ … … 80 80 typedef union { 81 81 struct { 82 #ifdef BIG_ENDIAN82 #ifdef __BE__ 83 83 unsigned vpn2 : 19; 84 84 unsigned : 5; … … 95 95 typedef union { 96 96 struct { 97 #ifdef BIG_ENDIAN97 #ifdef __BE__ 98 98 unsigned : 7; 99 99 unsigned mask : 12; … … 110 110 typedef union { 111 111 struct { 112 #ifdef BIG_ENDIAN112 #ifdef __BE__ 113 113 unsigned p : 1; 114 114 unsigned : 27; -
kernel/generic/include/main/main.h
r2f636b6 rf6b5593 38 38 #include <arch/types.h> 39 39 40 extern size_t hardcoded_kdata_size; 41 extern size_t hardcoded_ktext_size; 42 extern uintptr_t hardcoded_load_address; 40 43 extern uintptr_t stack_safe; 41 44 -
kernel/generic/src/main/main.c
r2f636b6 rf6b5593 101 101 context_t ctx; 102 102 103 /*104 * These 'hardcoded' variables will be intialized by105 * the linker or the low level assembler code with106 * appropriate sizes and addresses.107 */108 109 /** Virtual address of where the kernel is loaded. */110 uintptr_t hardcoded_load_address = 0;111 /** Size of the kernel code in bytes. */112 size_t hardcoded_ktext_size = 0;113 /** Size of the kernel data in bytes. */114 size_t hardcoded_kdata_size = 0;115 103 /** Lowest safe stack virtual address. */ 116 104 uintptr_t stack_safe = 0; -
uspace/Makefile
r2f636b6 rf6b5593 52 52 srv/vfs \ 53 53 srv/devmap \ 54 srv/part/mbr_part \ 55 app/edit \ 54 56 app/tetris \ 55 57 app/tester \ -
uspace/app/bdsh/cmds/modules/bdd/bdd.c
r2f636b6 rf6b5593 42 42 #include <assert.h> 43 43 44 #define BLOCK_SIZE 512 45 #define BPR 16 44 enum { 45 /* Number of bytes per row */ 46 BPR = 16 47 }; 46 48 47 49 static const char *cmdname = "bdd"; … … 67 69 unsigned int i, j; 68 70 dev_handle_t handle; 69 block_t *block;70 71 uint8_t *blk; 71 72 size_t size, bytes, rows; 73 size_t block_size; 72 74 int rc; 73 bn_t b off;75 bn_t ba; 74 76 uint8_t b; 75 77 … … 83 85 84 86 if (argc >= 3) 85 b off= strtol(argv[2], NULL, 0);87 ba = strtol(argv[2], NULL, 0); 86 88 else 87 b off= 0;89 ba = 0; 88 90 89 91 if (argc >= 4) … … 94 96 rc = devmap_device_get_handle(argv[1], &handle, 0); 95 97 if (rc != EOK) { 96 printf(" Error: could not resolve device `%s'.\n", argv[1]);98 printf("%s: Error resolving device `%s'.\n", cmdname, argv[1]); 97 99 return CMD_FAILURE; 98 100 } 99 101 100 rc = block_init(handle, BLOCK_SIZE);102 rc = block_init(handle, 2048); 101 103 if (rc != EOK) { 102 printf(" Error: could not init libblock.\n");104 printf("%s: Error initializing libblock.\n", cmdname); 103 105 return CMD_FAILURE; 104 106 } 105 107 106 rc = block_ cache_init(handle, BLOCK_SIZE, 2, CACHE_MODE_WB);108 rc = block_get_bsize(handle, &block_size); 107 109 if (rc != EOK) { 108 printf("Error: could not init block cache.\n"); 110 printf("%s: Error determining device block size.\n", cmdname); 111 return CMD_FAILURE; 112 } 113 114 blk = malloc(block_size); 115 if (blk == NULL) { 116 printf("%s: Error allocating memory.\n", cmdname); 117 block_fini(handle); 109 118 return CMD_FAILURE; 110 119 } 111 120 112 121 while (size > 0) { 113 rc = block_ get(&block, handle, boff, 0);122 rc = block_read_direct(handle, ba, 1, blk); 114 123 if (rc != EOK) { 115 printf("Error: could not get block %u, device %u.\n", 116 boff, handle); 124 printf("%s: Error reading block %llu\n", cmdname, ba); 125 free(blk); 126 block_fini(handle); 117 127 return CMD_FAILURE; 118 128 } 119 blk = (uint8_t *) block->data;120 129 121 bytes = (size < BLOCK_SIZE) ? size : BLOCK_SIZE;130 bytes = (size < block_size) ? size : block_size; 122 131 rows = (bytes + BPR - 1) / BPR; 123 132 … … 145 154 } 146 155 147 rc = block_put(block);148 if (rc != EOK) {149 printf("Error: could not put block %p.\n",150 block);151 return CMD_FAILURE;152 }153 154 156 if (size > rows * BPR) 155 157 size -= rows * BPR; … … 157 159 size = 0; 158 160 159 boff += rows * BPR; 161 /* Next block */ 162 ba += 1; 160 163 } 161 164 165 free(blk); 162 166 block_fini(handle); 163 167 -
uspace/app/tetris/screen.h
r2f636b6 rf6b5593 51 51 52 52 typedef struct { 53 i pcarg_t ws_row;54 i pcarg_t ws_col;53 int ws_row; 54 int ws_col; 55 55 } winsize_t; 56 56 -
uspace/lib/libblock/libblock.c
r2f636b6 rf6b5593 50 50 #include <adt/list.h> 51 51 #include <adt/hash_table.h> 52 #include <macros.h> 52 53 #include <mem.h> 53 54 … … 62 63 typedef struct { 63 64 fibril_mutex_t lock; 64 size_t block_size; /**< Block size. */65 size_t lblock_size; /**< Logical block size. */ 65 66 unsigned block_count; /**< Total number of blocks. */ 66 67 unsigned blocks_cached; /**< Number of cached blocks. */ … … 74 75 dev_handle_t dev_handle; 75 76 int dev_phone; 76 fibril_mutex_t com _area_lock;77 void *com _area;78 size_t com _size;77 fibril_mutex_t comm_area_lock; 78 void *comm_area; 79 size_t comm_size; 79 80 void *bb_buf; 80 off_t bb_off;81 size_t bb_size;81 bn_t bb_addr; 82 size_t pblock_size; /**< Physical block size. */ 82 83 cache_t *cache; 83 84 } devcon_t; 84 85 85 static int read_block(devcon_t *devcon, bn_t boff, size_t block_size); 86 static int write_block(devcon_t *devcon, bn_t boff, size_t block_size); 86 static int read_blocks(devcon_t *devcon, bn_t ba, size_t cnt); 87 static int write_blocks(devcon_t *devcon, bn_t ba, size_t cnt); 88 static int get_block_size(int dev_phone, size_t *bsize); 87 89 88 90 static devcon_t *devcon_search(dev_handle_t dev_handle) … … 102 104 } 103 105 104 static int devcon_add(dev_handle_t dev_handle, int dev_phone, void *com_area,105 size_t com_size)106 static int devcon_add(dev_handle_t dev_handle, int dev_phone, size_t bsize, 107 void *comm_area, size_t comm_size) 106 108 { 107 109 link_t *cur; 108 110 devcon_t *devcon; 111 112 if (comm_size < bsize) 113 return EINVAL; 109 114 110 115 devcon = malloc(sizeof(devcon_t)); … … 115 120 devcon->dev_handle = dev_handle; 116 121 devcon->dev_phone = dev_phone; 117 fibril_mutex_initialize(&devcon->com _area_lock);118 devcon->com _area = com_area;119 devcon->com _size = com_size;122 fibril_mutex_initialize(&devcon->comm_area_lock); 123 devcon->comm_area = comm_area; 124 devcon->comm_size = comm_size; 120 125 devcon->bb_buf = NULL; 121 devcon->bb_ off= 0;122 devcon-> bb_size = 0;126 devcon->bb_addr = 0; 127 devcon->pblock_size = bsize; 123 128 devcon->cache = NULL; 124 129 … … 144 149 } 145 150 146 int block_init(dev_handle_t dev_handle, size_t com _size)151 int block_init(dev_handle_t dev_handle, size_t comm_size) 147 152 { 148 153 int rc; 149 154 int dev_phone; 150 void *com_area; 151 152 com_area = mmap(NULL, com_size, PROTO_READ | PROTO_WRITE, 155 void *comm_area; 156 size_t bsize; 157 158 comm_area = mmap(NULL, comm_size, PROTO_READ | PROTO_WRITE, 153 159 MAP_ANONYMOUS | MAP_PRIVATE, 0, 0); 154 if (!com _area) {160 if (!comm_area) { 155 161 return ENOMEM; 156 162 } … … 158 164 dev_phone = devmap_device_connect(dev_handle, IPC_FLAG_BLOCKING); 159 165 if (dev_phone < 0) { 160 munmap(com _area, com_size);166 munmap(comm_area, comm_size); 161 167 return dev_phone; 162 168 } 163 169 164 rc = ipc_share_out_start(dev_phone, com _area,170 rc = ipc_share_out_start(dev_phone, comm_area, 165 171 AS_AREA_READ | AS_AREA_WRITE); 166 172 if (rc != EOK) { 167 munmap(com _area, com_size);173 munmap(comm_area, comm_size); 168 174 ipc_hangup(dev_phone); 169 175 return rc; 170 176 } 171 172 rc = devcon_add(dev_handle, dev_phone, com_area, com_size); 177 178 if (get_block_size(dev_phone, &bsize) != EOK) { 179 munmap(comm_area, comm_size); 180 ipc_hangup(dev_phone); 181 return rc; 182 } 183 184 rc = devcon_add(dev_handle, dev_phone, bsize, comm_area, comm_size); 173 185 if (rc != EOK) { 174 munmap(com _area, com_size);186 munmap(comm_area, comm_size); 175 187 ipc_hangup(dev_phone); 176 188 return rc; … … 195 207 } 196 208 197 munmap(devcon->com _area, devcon->com_size);209 munmap(devcon->comm_area, devcon->comm_size); 198 210 ipc_hangup(devcon->dev_phone); 199 211 … … 201 213 } 202 214 203 int block_bb_read(dev_handle_t dev_handle, off_t off, size_t size)215 int block_bb_read(dev_handle_t dev_handle, bn_t ba) 204 216 { 205 217 void *bb_buf; … … 211 223 if (devcon->bb_buf) 212 224 return EEXIST; 213 bb_buf = malloc( size);225 bb_buf = malloc(devcon->pblock_size); 214 226 if (!bb_buf) 215 227 return ENOMEM; 216 217 fibril_mutex_lock(&devcon->com _area_lock);218 rc = read_block (devcon, 0, size);228 229 fibril_mutex_lock(&devcon->comm_area_lock); 230 rc = read_blocks(devcon, 0, 1); 219 231 if (rc != EOK) { 220 fibril_mutex_unlock(&devcon->com _area_lock);232 fibril_mutex_unlock(&devcon->comm_area_lock); 221 233 free(bb_buf); 222 234 return rc; 223 235 } 224 memcpy(bb_buf, devcon->com _area,size);225 fibril_mutex_unlock(&devcon->com _area_lock);236 memcpy(bb_buf, devcon->comm_area, devcon->pblock_size); 237 fibril_mutex_unlock(&devcon->comm_area_lock); 226 238 227 239 devcon->bb_buf = bb_buf; 228 devcon->bb_off = off; 229 devcon->bb_size = size; 240 devcon->bb_addr = ba; 230 241 231 242 return EOK; … … 275 286 fibril_mutex_initialize(&cache->lock); 276 287 list_initialize(&cache->free_head); 277 cache-> block_size = size;288 cache->lblock_size = size; 278 289 cache->block_count = blocks; 279 290 cache->blocks_cached = 0; 280 291 cache->mode = mode; 292 293 /* No block size translation a.t.m. */ 294 assert(cache->lblock_size == devcon->pblock_size); 281 295 282 296 if (!hash_table_create(&cache->block_hash, CACHE_BUCKETS, 1, … … 371 385 if (!b) 372 386 goto recycle; 373 b->data = malloc(cache-> block_size);387 b->data = malloc(cache->lblock_size); 374 388 if (!b->data) { 375 389 free(b); … … 405 419 list_append(&b->free_link, &cache->free_head); 406 420 fibril_mutex_unlock(&cache->lock); 407 fibril_mutex_lock(&devcon->com_area_lock); 408 memcpy(devcon->com_area, b->data, b->size); 409 rc = write_block(devcon, b->boff, 410 cache->block_size); 411 fibril_mutex_unlock(&devcon->com_area_lock); 421 fibril_mutex_lock(&devcon->comm_area_lock); 422 memcpy(devcon->comm_area, b->data, b->size); 423 rc = write_blocks(devcon, b->boff, 1); 424 fibril_mutex_unlock(&devcon->comm_area_lock); 412 425 if (rc != EOK) { 413 426 /* … … 444 457 block_initialize(b); 445 458 b->dev_handle = dev_handle; 446 b->size = cache-> block_size;459 b->size = cache->lblock_size; 447 460 b->boff = boff; 448 461 hash_table_insert(&cache->block_hash, &key, &b->hash_link); … … 461 474 * the new contents from the device. 462 475 */ 463 fibril_mutex_lock(&devcon->com _area_lock);464 rc = read_block (devcon, b->boff, cache->block_size);465 memcpy(b->data, devcon->com _area, cache->block_size);466 fibril_mutex_unlock(&devcon->com _area_lock);476 fibril_mutex_lock(&devcon->comm_area_lock); 477 rc = read_blocks(devcon, b->boff, 1); 478 memcpy(b->data, devcon->comm_area, cache->lblock_size); 479 fibril_mutex_unlock(&devcon->comm_area_lock); 467 480 if (rc != EOK) 468 481 b->toxic = true; … … 521 534 if (block->dirty && (block->refcnt == 1) && 522 535 (blocks_cached > CACHE_HI_WATERMARK || mode != CACHE_MODE_WB)) { 523 fibril_mutex_lock(&devcon->com _area_lock);524 memcpy(devcon->com _area, block->data, block->size);525 rc = write_block (devcon, block->boff, block->size);526 fibril_mutex_unlock(&devcon->com _area_lock);536 fibril_mutex_lock(&devcon->comm_area_lock); 537 memcpy(devcon->comm_area, block->data, block->size); 538 rc = write_blocks(devcon, block->boff, 1); 539 fibril_mutex_unlock(&devcon->comm_area_lock); 527 540 block->dirty = false; 528 541 } … … 601 614 */ 602 615 int block_seqread(dev_handle_t dev_handle, off_t *bufpos, size_t *buflen, 603 off_t *pos, void *dst, size_t size , size_t block_size)616 off_t *pos, void *dst, size_t size) 604 617 { 605 618 off_t offset = 0; 606 619 size_t left = size; 607 devcon_t *devcon = devcon_search(dev_handle); 608 assert(devcon); 609 610 fibril_mutex_lock(&devcon->com_area_lock); 620 size_t block_size; 621 devcon_t *devcon; 622 623 devcon = devcon_search(dev_handle); 624 assert(devcon); 625 block_size = devcon->pblock_size; 626 627 fibril_mutex_lock(&devcon->comm_area_lock); 611 628 while (left > 0) { 612 629 size_t rd; … … 622 639 * destination buffer. 623 640 */ 624 memcpy(dst + offset, devcon->com _area + *bufpos, rd);641 memcpy(dst + offset, devcon->comm_area + *bufpos, rd); 625 642 offset += rd; 626 643 *bufpos += rd; … … 633 650 int rc; 634 651 635 rc = read_block (devcon, *pos / block_size, block_size);652 rc = read_blocks(devcon, *pos / block_size, 1); 636 653 if (rc != EOK) { 637 fibril_mutex_unlock(&devcon->com _area_lock);654 fibril_mutex_unlock(&devcon->comm_area_lock); 638 655 return rc; 639 656 } … … 643 660 } 644 661 } 645 fibril_mutex_unlock(&devcon->com _area_lock);662 fibril_mutex_unlock(&devcon->comm_area_lock); 646 663 647 664 return EOK; 648 665 } 649 666 650 /** Read block from block device. 667 /** Read blocks directly from device (bypass cache). 668 * 669 * @param dev_handle Device handle of the block device. 670 * @param ba Address of first block. 671 * @param cnt Number of blocks. 672 * @param src Buffer for storing the data. 673 * 674 * @return EOK on success or negative error code on failure. 675 */ 676 int block_read_direct(dev_handle_t dev_handle, bn_t ba, size_t cnt, void *buf) 677 { 678 devcon_t *devcon; 679 int rc; 680 681 devcon = devcon_search(dev_handle); 682 assert(devcon); 683 684 fibril_mutex_lock(&devcon->comm_area_lock); 685 686 rc = read_blocks(devcon, ba, cnt); 687 if (rc == EOK) 688 memcpy(buf, devcon->comm_area, devcon->pblock_size * cnt); 689 690 fibril_mutex_unlock(&devcon->comm_area_lock); 691 692 return rc; 693 } 694 695 /** Write blocks directly to device (bypass cache). 696 * 697 * @param dev_handle Device handle of the block device. 698 * @param ba Address of first block. 699 * @param cnt Number of blocks. 700 * @param src The data to be written. 701 * 702 * @return EOK on success or negative error code on failure. 703 */ 704 int block_write_direct(dev_handle_t dev_handle, bn_t ba, size_t cnt, 705 const void *data) 706 { 707 devcon_t *devcon; 708 int rc; 709 710 devcon = devcon_search(dev_handle); 711 assert(devcon); 712 713 fibril_mutex_lock(&devcon->comm_area_lock); 714 715 memcpy(devcon->comm_area, data, devcon->pblock_size * cnt); 716 rc = read_blocks(devcon, ba, cnt); 717 718 fibril_mutex_unlock(&devcon->comm_area_lock); 719 720 return rc; 721 } 722 723 /** Get device block size. 724 * 725 * @param dev_handle Device handle of the block device. 726 * @param bsize Output block size. 727 * 728 * @return EOK on success or negative error code on failure. 729 */ 730 int block_get_bsize(dev_handle_t dev_handle, size_t *bsize) 731 { 732 devcon_t *devcon; 733 734 devcon = devcon_search(dev_handle); 735 assert(devcon); 736 737 return get_block_size(devcon->dev_phone, bsize); 738 } 739 740 /** Read blocks from block device. 651 741 * 652 742 * @param devcon Device connection. 653 * @param b off Block index.654 * @param block_size Block size.743 * @param ba Address of first block. 744 * @param cnt Number of blocks. 655 745 * @param src Buffer for storing the data. 656 746 * 657 747 * @return EOK on success or negative error code on failure. 658 748 */ 659 static int read_block(devcon_t *devcon, bn_t boff, size_t block_size) 660 { 661 ipcarg_t retval; 749 static int read_blocks(devcon_t *devcon, bn_t ba, size_t cnt) 750 { 662 751 int rc; 663 752 664 753 assert(devcon); 665 rc = async_req_2_1(devcon->dev_phone, BD_READ_BLOCK, boff, block_size, 666 &retval); 667 if ((rc != EOK) || (retval != EOK)) 668 return (rc != EOK ? rc : (int) retval); 669 670 return EOK; 754 rc = async_req_3_0(devcon->dev_phone, BD_READ_BLOCKS, LOWER32(ba), 755 UPPER32(ba), cnt); 756 return rc; 671 757 } 672 758 … … 674 760 * 675 761 * @param devcon Device connection. 676 * @param b off Block index.677 * @param block_size Block size.762 * @param ba Address of first block. 763 * @param cnt Number of blocks. 678 764 * @param src Buffer containing the data to write. 679 765 * 680 766 * @return EOK on success or negative error code on failure. 681 767 */ 682 static int write_block(devcon_t *devcon, bn_t boff, size_t block_size) 683 { 684 ipcarg_t retval; 768 static int write_blocks(devcon_t *devcon, bn_t ba, size_t cnt) 769 { 685 770 int rc; 686 771 687 772 assert(devcon); 688 rc = async_req_2_1(devcon->dev_phone, BD_WRITE_BLOCK, boff, block_size, 689 &retval); 690 if ((rc != EOK) || (retval != EOK)) 691 return (rc != EOK ? rc : (int) retval); 692 693 return EOK; 773 rc = async_req_3_0(devcon->dev_phone, BD_WRITE_BLOCKS, LOWER32(ba), 774 UPPER32(ba), cnt); 775 return rc; 776 } 777 778 /** Get block size used by the device. */ 779 static int get_block_size(int dev_phone, size_t *bsize) 780 { 781 ipcarg_t bs; 782 int rc; 783 784 rc = async_req_0_1(dev_phone, BD_GET_BLOCK_SIZE, &bs); 785 if (rc == EOK) 786 *bsize = (size_t) bs; 787 788 return rc; 694 789 } 695 790 -
uspace/lib/libblock/libblock.h
r2f636b6 rf6b5593 60 60 #define BLOCK_FLAGS_NOREAD 1 61 61 62 typedef u nsignedbn_t; /**< Block number type. */62 typedef uint64_t bn_t; /**< Block number type. */ 63 63 64 64 typedef struct block { … … 98 98 extern void block_fini(dev_handle_t); 99 99 100 extern int block_bb_read(dev_handle_t, off_t, size_t);100 extern int block_bb_read(dev_handle_t, bn_t); 101 101 extern void *block_bb_get(dev_handle_t); 102 102 … … 107 107 108 108 extern int block_seqread(dev_handle_t, off_t *, size_t *, off_t *, void *, 109 size_t, size_t); 109 size_t); 110 111 extern int block_get_bsize(dev_handle_t, size_t *); 112 extern int block_read_direct(dev_handle_t, bn_t, size_t, void *); 113 extern int block_write_direct(dev_handle_t, bn_t, size_t, const void *); 110 114 111 115 #endif -
uspace/lib/libc/Makefile.toolchain
r2f636b6 rf6b5593 64 64 # 65 65 66 ifeq ($(COMPILER),gcc_cross) 67 CC = $(TOOLCHAIN_DIR)/$(TARGET)-gcc 68 AS = $(TOOLCHAIN_DIR)/$(TARGET)-as 69 LD = $(TOOLCHAIN_DIR)/$(TARGET)-ld 70 AR = $(TOOLCHAIN_DIR)/$(TARGET)-ar 71 OBJCOPY = $(TOOLCHAIN_DIR)/$(TARGET)-objcopy 72 OBJDUMP = $(TOOLCHAIN_DIR)/$(TARGET)-objdump 73 DEPEND_DEFS = $(DEFS) $(CONFIG_DEFS) 74 endif 75 66 76 ifeq ($(COMPILER),gcc_native) 67 77 CC = gcc … … 74 84 endif 75 85 76 ifeq ($(COMPILER),icc _native)86 ifeq ($(COMPILER),icc) 77 87 CC = icc 78 88 AS = as … … 84 94 endif 85 95 86 ifeq ($(COMPILER), gcc_cross)87 CC = $(TOOLCHAIN_DIR)/$(TARGET)-gcc88 AS = $( TOOLCHAIN_DIR)/$(TARGET)-as89 LD = $( TOOLCHAIN_DIR)/$(TARGET)-ld90 AR = $( TOOLCHAIN_DIR)/$(TARGET)-ar91 OBJCOPY = $( TOOLCHAIN_DIR)/$(TARGET)-objcopy92 OBJDUMP = $( TOOLCHAIN_DIR)/$(TARGET)-objdump96 ifeq ($(COMPILER),clang) 97 CC = clang 98 AS = $(BINUTILS_PREFIX)as 99 LD = $(BINUTILS_PREFIX)ld 100 AR = $(BINUTILS_PREFIX)ar 101 OBJCOPY = $(BINUTILS_PREFIX)objcopy 102 OBJDUMP = $(BINUTILS_PREFIX)objdump 93 103 DEPEND_DEFS = $(DEFS) $(CONFIG_DEFS) 94 104 endif -
uspace/lib/libc/arch/ia64/include/atomic.h
r2f636b6 rf6b5593 27 27 */ 28 28 29 /** @addtogroup libcia64 29 /** @addtogroup libcia64 30 30 * @{ 31 31 */ … … 36 36 #define LIBC_ia64_ATOMIC_H_ 37 37 38 /** Atomic addition. 39 * 40 * @param val Atomic value. 41 * @param imm Value to add. 42 * 43 * @return Value before addition. 44 */ 45 static inline long atomic_add(atomic_t *val, int imm) 38 static inline void atomic_inc(atomic_t *val) 46 39 { 47 40 long v; 41 42 asm volatile ( 43 "fetchadd8.rel %[v] = %[count], 1\n" 44 : [v] "=r" (v), 45 [count] "+m" (val->count) 46 ); 47 } 48 48 49 asm volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm)); 50 49 static inline void atomic_dec(atomic_t *val) 50 { 51 long v; 52 53 asm volatile ( 54 "fetchadd8.rel %[v] = %[count], -1\n" 55 : [v] "=r" (v), 56 [count] "+m" (val->count) 57 ); 58 } 59 60 static inline long atomic_preinc(atomic_t *val) 61 { 62 long v; 63 64 asm volatile ( 65 "fetchadd8.rel %[v] = %[count], 1\n" 66 : [v] "=r" (v), 67 [count] "+m" (val->count) 68 ); 69 70 return (v + 1); 71 } 72 73 static inline long atomic_predec(atomic_t *val) 74 { 75 long v; 76 77 asm volatile ( 78 "fetchadd8.rel %[v] = %[count], -1\n" 79 : [v] "=r" (v), 80 [count] "+m" (val->count) 81 ); 82 83 return (v - 1); 84 } 85 86 static inline long atomic_postinc(atomic_t *val) 87 { 88 long v; 89 90 asm volatile ( 91 "fetchadd8.rel %[v] = %[count], 1\n" 92 : [v] "=r" (v), 93 [count] "+m" (val->count) 94 ); 95 51 96 return v; 52 97 } 53 98 54 static inline void atomic_inc(atomic_t *val) { atomic_add(val, 1); } 55 static inline void atomic_dec(atomic_t *val) { atomic_add(val, -1); } 56 57 static inline long atomic_preinc(atomic_t *val) { return atomic_add(val, 1) + 1; } 58 static inline long atomic_predec(atomic_t *val) { return atomic_add(val, -1) - 1; } 59 60 static inline long atomic_postinc(atomic_t *val) { return atomic_add(val, 1); } 61 static inline long atomic_postdec(atomic_t *val) { return atomic_add(val, -1); } 99 static inline long atomic_postdec(atomic_t *val) 100 { 101 long v; 102 103 asm volatile ( 104 "fetchadd8.rel %[v] = %[count], -1\n" 105 : [v] "=r" (v), 106 [count] "+m" (val->count) 107 ); 108 109 return v; 110 } 62 111 63 112 #endif -
uspace/lib/libc/generic/io/console.c
r2f636b6 rf6b5593 45 45 } 46 46 47 int console_get_size(int phone, i pcarg_t *rows, ipcarg_t *cols)47 int console_get_size(int phone, int *cols, int *rows) 48 48 { 49 return async_req_0_2(phone, CONSOLE_GET_SIZE, rows, cols); 49 ipcarg_t cols_v; 50 ipcarg_t rows_v; 51 int rc; 52 53 rc = async_req_0_2(phone, CONSOLE_GET_SIZE, &cols_v, &rows_v); 54 55 *cols = (int) cols_v; 56 *rows = (int) rows_v; 57 return rc; 50 58 } 51 59 … … 86 94 } 87 95 88 void console_goto(int phone, i pcarg_t row, ipcarg_t col)96 void console_goto(int phone, int col, int row) 89 97 { 90 async_msg_2(phone, CONSOLE_GOTO, row, col);98 async_msg_2(phone, CONSOLE_GOTO, col, row); 91 99 } 92 100 -
uspace/lib/libc/include/io/console.h
r2f636b6 rf6b5593 68 68 extern void console_clear(int phone); 69 69 70 extern int console_get_size(int phone, i pcarg_t *rows, ipcarg_t *cols);71 extern void console_goto(int phone, i pcarg_t row, ipcarg_t col);70 extern int console_get_size(int phone, int *cols, int *rows); 71 extern void console_goto(int phone, int col, int row); 72 72 73 73 extern void console_set_style(int phone, int style); -
uspace/lib/libc/include/ipc/bd.h
r2f636b6 rf6b5593 39 39 40 40 typedef enum { 41 BD_READ_BLOCK = IPC_FIRST_USER_METHOD, 42 BD_WRITE_BLOCK 41 BD_GET_BLOCK_SIZE = IPC_FIRST_USER_METHOD, 42 BD_READ_BLOCKS, 43 BD_WRITE_BLOCKS 43 44 } bd_request_t; 44 45 -
uspace/srv/bd/ata_bd/ata_bd.c
r2f636b6 rf6b5593 62 62 #include <bool.h> 63 63 #include <task.h> 64 #include <macros.h> 64 65 65 66 #include "ata_bd.h" … … 86 87 static int ata_bd_init(void); 87 88 static void ata_bd_connection(ipc_callid_t iid, ipc_call_t *icall); 88 static int ata_bd_r dwr(int disk_id, ipcarg_t method, off_t offset, size_t size,89 static int ata_bd_read_blocks(int disk_id, uint64_t ba, size_t cnt, 89 90 void *buf); 90 static int ata_bd_read_block(int disk_id, uint64_t blk_idx, size_t blk_cnt, 91 static int ata_bd_write_blocks(int disk_id, uint64_t ba, size_t cnt, 92 const void *buf); 93 static int ata_bd_read_block(int disk_id, uint64_t ba, size_t cnt, 91 94 void *buf); 92 static int ata_bd_write_block(int disk_id, uint64_t b lk_idx, size_t blk_cnt,95 static int ata_bd_write_block(int disk_id, uint64_t ba, size_t cnt, 93 96 const void *buf); 94 97 static int disk_init(disk_t *d, int disk_id); 95 98 static int drive_identify(int drive_id, void *buf); 96 99 static void disk_print_summary(disk_t *d); 97 static int coord_calc(disk_t *d, uint64_t b lk_idx, block_coord_t *bc);100 static int coord_calc(disk_t *d, uint64_t ba, block_coord_t *bc); 98 101 static void coord_sc_program(const block_coord_t *bc, uint16_t scnt); 99 102 static int wait_status(unsigned set, unsigned n_reset, uint8_t *pstatus, … … 228 231 int flags; 229 232 int retval; 230 off_t idx;231 size_t size;233 uint64_t ba; 234 size_t cnt; 232 235 int disk_id, i; 233 236 … … 270 273 ipc_answer_0(callid, EOK); 271 274 return; 272 case BD_READ_BLOCK :273 case BD_WRITE_BLOCK:274 idx = IPC_GET_ARG1(call);275 size = IPC_GET_ARG2(call);276 if ( size > comm_size) {277 retval = E INVAL;275 case BD_READ_BLOCKS: 276 ba = MERGE_LOUP32(IPC_GET_ARG1(call), 277 IPC_GET_ARG2(call)); 278 cnt = IPC_GET_ARG3(call); 279 if (cnt * block_size > comm_size) { 280 retval = ELIMIT; 278 281 break; 279 282 } 280 retval = ata_bd_rdwr(disk_id, method, idx, 281 size, fs_va); 283 retval = ata_bd_read_blocks(disk_id, ba, cnt, fs_va); 282 284 break; 285 case BD_WRITE_BLOCKS: 286 ba = MERGE_LOUP32(IPC_GET_ARG1(call), 287 IPC_GET_ARG2(call)); 288 cnt = IPC_GET_ARG3(call); 289 if (cnt * block_size > comm_size) { 290 retval = ELIMIT; 291 break; 292 } 293 retval = ata_bd_write_blocks(disk_id, ba, cnt, fs_va); 294 break; 295 case BD_GET_BLOCK_SIZE: 296 ipc_answer_1(callid, EOK, block_size); 297 continue; 283 298 default: 284 299 retval = EINVAL; … … 373 388 } 374 389 375 /** Transfer a logical block from/to the device. 376 * 377 * @param disk_id Device index (0 or 1) 378 * @param method @c BD_READ_BLOCK or @c BD_WRITE_BLOCK 379 * @param blk_idx Index of the first block. 380 * @param size Size of the logical block. 381 * @param buf Data buffer. 382 * 383 * @return EOK on success, EIO on error. 384 */ 385 static int ata_bd_rdwr(int disk_id, ipcarg_t method, off_t blk_idx, size_t size, 386 void *buf) 387 { 390 /** Read multiple blocks from the device. */ 391 static int ata_bd_read_blocks(int disk_id, uint64_t ba, size_t cnt, 392 void *buf) { 393 388 394 int rc; 389 size_t now; 390 391 while (size > 0) { 392 now = size < block_size ? size : block_size; 393 if (now != block_size) 394 return EINVAL; 395 396 if (method == BD_READ_BLOCK) 397 rc = ata_bd_read_block(disk_id, blk_idx, 1, buf); 398 else 399 rc = ata_bd_write_block(disk_id, blk_idx, 1, buf); 400 395 396 while (cnt > 0) { 397 rc = ata_bd_read_block(disk_id, ba, 1, buf); 401 398 if (rc != EOK) 402 399 return rc; 403 400 401 ++ba; 402 --cnt; 404 403 buf += block_size; 405 blk_idx++; 406 407 if (size > block_size) 408 size -= block_size; 409 else 410 size = 0; 404 } 405 406 return EOK; 407 } 408 409 /** Write multiple blocks to the device. */ 410 static int ata_bd_write_blocks(int disk_id, uint64_t ba, size_t cnt, 411 const void *buf) { 412 413 int rc; 414 415 while (cnt > 0) { 416 rc = ata_bd_write_block(disk_id, ba, 1, buf); 417 if (rc != EOK) 418 return rc; 419 420 ++ba; 421 --cnt; 422 buf += block_size; 411 423 } 412 424 … … 466 478 * 467 479 * @param disk_id Device index (0 or 1) 468 * @param b lk_idx Index ofthe first block.469 * @param blk_cntNumber of blocks to transfer.480 * @param ba Address the first block. 481 * @param cnt Number of blocks to transfer. 470 482 * @param buf Buffer for holding the data. 471 483 * 472 484 * @return EOK on success, EIO on error. 473 485 */ 474 static int ata_bd_read_block(int disk_id, uint64_t b lk_idx, size_t blk_cnt,486 static int ata_bd_read_block(int disk_id, uint64_t ba, size_t blk_cnt, 475 487 void *buf) 476 488 { … … 486 498 487 499 /* Compute block coordinates. */ 488 if (coord_calc(d, b lk_idx, &bc) != EOK)500 if (coord_calc(d, ba, &bc) != EOK) 489 501 return EINVAL; 490 502 … … 541 553 * 542 554 * @param disk_id Device index (0 or 1) 543 * @param b lk_idx Indexof the first block.544 * @param blk_cntNumber of blocks to transfer.555 * @param ba Address of the first block. 556 * @param cnt Number of blocks to transfer. 545 557 * @param buf Buffer holding the data to write. 546 558 * 547 559 * @return EOK on success, EIO on error. 548 560 */ 549 static int ata_bd_write_block(int disk_id, uint64_t b lk_idx, size_t blk_cnt,561 static int ata_bd_write_block(int disk_id, uint64_t ba, size_t cnt, 550 562 const void *buf) 551 563 { … … 560 572 561 573 /* Compute block coordinates. */ 562 if (coord_calc(d, b lk_idx, &bc) != EOK)574 if (coord_calc(d, ba, &bc) != EOK) 563 575 return EINVAL; 564 576 … … 620 632 * @return EOK on success or EINVAL if block index is past end of device. 621 633 */ 622 static int coord_calc(disk_t *d, uint64_t b lk_idx, block_coord_t *bc)634 static int coord_calc(disk_t *d, uint64_t ba, block_coord_t *bc) 623 635 { 624 636 uint64_t c; … … 626 638 627 639 /* Check device bounds. */ 628 if (b lk_idx>= d->blocks)640 if (ba >= d->blocks) 629 641 return EINVAL; 630 642 … … 634 646 case am_chs: 635 647 /* Compute CHS coordinates. */ 636 c = b lk_idx/ (d->geom.heads * d->geom.sectors);637 idx = b lk_idx% (d->geom.heads * d->geom.sectors);648 c = ba / (d->geom.heads * d->geom.sectors); 649 idx = ba % (d->geom.heads * d->geom.sectors); 638 650 639 651 bc->cyl_lo = c & 0xff; … … 645 657 case am_lba28: 646 658 /* Compute LBA-28 coordinates. */ 647 bc->c0 = b lk_idx& 0xff; /* bits 0-7 */648 bc->c1 = (b lk_idx >> 8) & 0xff;/* bits 8-15 */649 bc->c2 = (b lk_idx>> 16) & 0xff; /* bits 16-23 */650 bc->h = (b lk_idx>> 24) & 0x0f; /* bits 24-27 */659 bc->c0 = ba & 0xff; /* bits 0-7 */ 660 bc->c1 = (ba >> 8) & 0xff; /* bits 8-15 */ 661 bc->c2 = (ba >> 16) & 0xff; /* bits 16-23 */ 662 bc->h = (ba >> 24) & 0x0f; /* bits 24-27 */ 651 663 break; 652 664 653 665 case am_lba48: 654 666 /* Compute LBA-48 coordinates. */ 655 bc->c0 = b lk_idx& 0xff; /* bits 0-7 */656 bc->c1 = (b lk_idx >> 8) & 0xff;/* bits 8-15 */657 bc->c2 = (b lk_idx>> 16) & 0xff; /* bits 16-23 */658 bc->c3 = (b lk_idx>> 24) & 0xff; /* bits 24-31 */659 bc->c4 = (b lk_idx>> 32) & 0xff; /* bits 32-39 */660 bc->c5 = (b lk_idx>> 40) & 0xff; /* bits 40-47 */667 bc->c0 = ba & 0xff; /* bits 0-7 */ 668 bc->c1 = (ba >> 8) & 0xff; /* bits 8-15 */ 669 bc->c2 = (ba >> 16) & 0xff; /* bits 16-23 */ 670 bc->c3 = (ba >> 24) & 0xff; /* bits 24-31 */ 671 bc->c4 = (ba >> 32) & 0xff; /* bits 32-39 */ 672 bc->c5 = (ba >> 40) & 0xff; /* bits 40-47 */ 661 673 bc->h = 0; 662 674 break; -
uspace/srv/bd/file_bd/file_bd.c
r2f636b6 rf6b5593 51 51 #include <bool.h> 52 52 #include <task.h> 53 #include <macros.h> 53 54 54 55 #define NAME "file_bd" 55 56 56 static size_t comm_size;57 static const size_t block_size = 512; 57 58 static FILE *img; 58 59 … … 62 63 static int file_bd_init(const char *fname); 63 64 static void file_bd_connection(ipc_callid_t iid, ipc_call_t *icall); 64 static int file_bd_read (off_t blk_idx, size_t size, void *buf);65 static int file_bd_write (off_t blk_idx, size_t size,void *buf);65 static int file_bd_read_blocks(uint64_t ba, size_t cnt, void *buf); 66 static int file_bd_write_blocks(uint64_t ba, size_t cnt, const void *buf); 66 67 67 68 int main(int argc, char **argv) … … 120 121 ipc_call_t call; 121 122 ipcarg_t method; 123 size_t comm_size; 122 124 int flags; 123 125 int retval; 124 off_t idx;125 size_t size;126 uint64_t ba; 127 size_t cnt; 126 128 127 129 /* Answer the IPC_M_CONNECT_ME_TO call. */ … … 149 151 ipc_answer_0(callid, EOK); 150 152 return; 151 case BD_READ_BLOCK :152 case BD_WRITE_BLOCK:153 idx = IPC_GET_ARG1(call);154 size = IPC_GET_ARG2(call);155 if ( size > comm_size) {156 retval = E INVAL;153 case BD_READ_BLOCKS: 154 ba = MERGE_LOUP32(IPC_GET_ARG1(call), 155 IPC_GET_ARG2(call)); 156 cnt = IPC_GET_ARG3(call); 157 if (cnt * block_size > comm_size) { 158 retval = ELIMIT; 157 159 break; 158 160 } 159 if (method == BD_READ_BLOCK) 160 retval = file_bd_read(idx, size, fs_va); 161 else 162 retval = file_bd_write(idx, size, fs_va); 161 retval = file_bd_read_blocks(ba, cnt, fs_va); 163 162 break; 163 case BD_WRITE_BLOCKS: 164 ba = MERGE_LOUP32(IPC_GET_ARG1(call), 165 IPC_GET_ARG2(call)); 166 cnt = IPC_GET_ARG3(call); 167 if (cnt * block_size > comm_size) { 168 retval = ELIMIT; 169 break; 170 } 171 retval = file_bd_write_blocks(ba, cnt, fs_va); 172 break; 173 case BD_GET_BLOCK_SIZE: 174 ipc_answer_1(callid, EOK, block_size); 175 continue; 164 176 default: 165 177 retval = EINVAL; … … 170 182 } 171 183 172 static int file_bd_read(off_t blk_idx, size_t size, void *buf) 184 /** Read blocks from the device. */ 185 static int file_bd_read_blocks(uint64_t ba, size_t cnt, void *buf) 173 186 { 174 187 size_t n_rd; … … 176 189 fibril_mutex_lock(&dev_lock); 177 190 178 fseek(img, b lk_idx *size, SEEK_SET);179 n_rd = fread(buf, 1, size, img);191 fseek(img, ba * block_size, SEEK_SET); 192 n_rd = fread(buf, block_size, cnt, img); 180 193 181 194 if (ferror(img)) { … … 186 199 fibril_mutex_unlock(&dev_lock); 187 200 188 if (n_rd < size)189 return EINVAL; /* Read beyond end of d isk*/201 if (n_rd < cnt) 202 return EINVAL; /* Read beyond end of device */ 190 203 191 204 return EOK; 192 205 } 193 206 194 static int file_bd_write(off_t blk_idx, size_t size, void *buf) 207 /** Write blocks to the device. */ 208 static int file_bd_write_blocks(uint64_t ba, size_t cnt, const void *buf) 195 209 { 196 210 size_t n_wr; … … 198 212 fibril_mutex_lock(&dev_lock); 199 213 200 fseek(img, b lk_idx *size, SEEK_SET);201 n_wr = fread(buf, 1, size, img);202 203 if (ferror(img) || n_wr < size) {214 fseek(img, ba * block_size, SEEK_SET); 215 n_wr = fread(buf, block_size, cnt, img); 216 217 if (ferror(img) || n_wr < cnt) { 204 218 fibril_mutex_unlock(&dev_lock); 205 219 return EIO; /* Write error */ -
uspace/srv/bd/gxe_bd/gxe_bd.c
r2f636b6 rf6b5593 47 47 #include <sys/types.h> 48 48 #include <errno.h> 49 #include <macros.h> 49 50 #include <task.h> 50 51 … … 97 98 static int gxe_bd_init(void); 98 99 static void gxe_bd_connection(ipc_callid_t iid, ipc_call_t *icall); 99 static int gx _bd_rdwr(int disk_id, ipcarg_t method, off_t offset, size_t size,100 static int gxe_bd_read_blocks(int disk_id, uint64_t ba, unsigned cnt, 100 101 void *buf); 101 static int gxe_bd_read_block(int disk_id, uint64_t offset, size_t size, 102 void *buf); 103 static int gxe_bd_write_block(int disk_id, uint64_t offset, size_t size, 102 static int gxe_bd_write_blocks(int disk_id, uint64_t ba, unsigned cnt, 104 103 const void *buf); 104 static int gxe_bd_read_block(int disk_id, uint64_t ba, void *buf); 105 static int gxe_bd_write_block(int disk_id, uint64_t ba, const void *buf); 105 106 106 107 int main(int argc, char **argv) … … 163 164 int flags; 164 165 int retval; 165 off_t idx;166 size_t size;166 uint64_t ba; 167 unsigned cnt; 167 168 int disk_id, i; 168 169 … … 185 186 186 187 if (!ipc_share_out_receive(&callid, &comm_size, &flags)) { 188 ipc_answer_0(callid, EHANGUP); 189 return; 190 } 191 192 if (comm_size < block_size) { 187 193 ipc_answer_0(callid, EHANGUP); 188 194 return; … … 205 211 ipc_answer_0(callid, EOK); 206 212 return; 207 case BD_READ_BLOCK :208 case BD_WRITE_BLOCK:209 idx = IPC_GET_ARG1(call);210 size = IPC_GET_ARG2(call);211 if ( size > comm_size) {212 retval = E INVAL;213 case BD_READ_BLOCKS: 214 ba = MERGE_LOUP32(IPC_GET_ARG1(call), 215 IPC_GET_ARG2(call)); 216 cnt = IPC_GET_ARG3(call); 217 if (cnt * block_size > comm_size) { 218 retval = ELIMIT; 213 219 break; 214 220 } 215 retval = gx_bd_rdwr(disk_id, method, idx * size, 216 size, fs_va); 221 retval = gxe_bd_read_blocks(disk_id, ba, cnt, fs_va); 217 222 break; 223 case BD_WRITE_BLOCKS: 224 ba = MERGE_LOUP32(IPC_GET_ARG1(call), 225 IPC_GET_ARG2(call)); 226 cnt = IPC_GET_ARG3(call); 227 if (cnt * block_size > comm_size) { 228 retval = ELIMIT; 229 break; 230 } 231 retval = gxe_bd_write_blocks(disk_id, ba, cnt, fs_va); 232 break; 233 case BD_GET_BLOCK_SIZE: 234 ipc_answer_1(callid, EOK, block_size); 235 continue; 218 236 default: 219 237 retval = EINVAL; … … 224 242 } 225 243 226 static int gx_bd_rdwr(int disk_id, ipcarg_t method, off_t offset, size_t size, 227 void *buf) 228 { 244 /** Read multiple blocks from the device. */ 245 static int gxe_bd_read_blocks(int disk_id, uint64_t ba, unsigned cnt, 246 void *buf) { 247 229 248 int rc; 230 size_t now; 231 232 while (size > 0) { 233 now = size < block_size ? size : block_size; 234 235 if (method == BD_READ_BLOCK) 236 rc = gxe_bd_read_block(disk_id, offset, now, buf); 237 else 238 rc = gxe_bd_write_block(disk_id, offset, now, buf); 239 249 250 while (cnt > 0) { 251 rc = gxe_bd_read_block(disk_id, ba, buf); 240 252 if (rc != EOK) 241 253 return rc; 242 254 255 ++ba; 256 --cnt; 243 257 buf += block_size; 244 offset += block_size; 245 246 if (size > block_size) 247 size -= block_size; 248 else 249 size = 0; 250 } 251 252 return EOK; 253 } 254 255 static int gxe_bd_read_block(int disk_id, uint64_t offset, size_t size, 256 void *buf) 258 } 259 260 return EOK; 261 } 262 263 /** Write multiple blocks to the device. */ 264 static int gxe_bd_write_blocks(int disk_id, uint64_t ba, unsigned cnt, 265 const void *buf) { 266 267 int rc; 268 269 while (cnt > 0) { 270 rc = gxe_bd_write_block(disk_id, ba, buf); 271 if (rc != EOK) 272 return rc; 273 274 ++ba; 275 --cnt; 276 buf += block_size; 277 } 278 279 return EOK; 280 } 281 282 /** Read a block from the device. */ 283 static int gxe_bd_read_block(int disk_id, uint64_t ba, void *buf) 257 284 { 258 285 uint32_t status; 286 uint64_t byte_addr; 259 287 size_t i; 260 288 uint32_t w; 261 289 290 byte_addr = ba * block_size; 291 262 292 fibril_mutex_lock(&dev_lock[disk_id]); 263 pio_write_32(&dev->offset_lo, (uint32_t) offset);264 pio_write_32(&dev->offset_hi, offset>> 32);293 pio_write_32(&dev->offset_lo, (uint32_t) byte_addr); 294 pio_write_32(&dev->offset_hi, byte_addr >> 32); 265 295 pio_write_32(&dev->disk_id, disk_id); 266 296 pio_write_32(&dev->control, CTL_READ_START); … … 272 302 } 273 303 274 for (i = 0; i < size; i++) {304 for (i = 0; i < block_size; i++) { 275 305 ((uint8_t *) buf)[i] = w = pio_read_8(&dev->buffer[i]); 276 306 } … … 280 310 } 281 311 282 static int gxe_bd_write_block(int disk_id, uint64_t offset, size_t size, 283 312 /** Write a block to the device. */ 313 static int gxe_bd_write_block(int disk_id, uint64_t ba, const void *buf) 284 314 { 285 315 uint32_t status; 316 uint64_t byte_addr; 286 317 size_t i; 287 318 288 for (i = 0; i < size; i++) { 319 byte_addr = ba * block_size; 320 321 fibril_mutex_lock(&dev_lock[disk_id]); 322 323 for (i = 0; i < block_size; i++) { 289 324 pio_write_8(&dev->buffer[i], ((const uint8_t *) buf)[i]); 290 325 } 291 326 292 fibril_mutex_lock(&dev_lock[disk_id]); 293 pio_write_32(&dev->offset_lo, (uint32_t) offset); 294 pio_write_32(&dev->offset_hi, offset >> 32); 327 pio_write_32(&dev->offset_lo, (uint32_t) byte_addr); 328 pio_write_32(&dev->offset_hi, byte_addr >> 32); 295 329 pio_write_32(&dev->disk_id, disk_id); 296 330 pio_write_32(&dev->control, CTL_WRITE_START); -
uspace/srv/bd/rd/rd.c
r2f636b6 rf6b5593 55 55 #include <devmap.h> 56 56 #include <ipc/bd.h> 57 #include <macros.h> 57 58 58 59 #define NAME "rd" 59 60 60 /** Pointer to the ramdisk's image .*/61 /** Pointer to the ramdisk's image */ 61 62 static void *rd_addr; 62 /** Size of the ramdisk .*/63 /** Size of the ramdisk */ 63 64 static size_t rd_size; 65 66 /** Block size */ 67 static const size_t block_size = 512; 68 69 static int rd_read_blocks(uint64_t ba, size_t cnt, void *buf); 70 static int rd_write_blocks(uint64_t ba, size_t cnt, const void *buf); 64 71 65 72 /** … … 82 89 int retval; 83 90 void *fs_va = NULL; 84 off_t offset;85 size_t block_size;86 size_t maxblock_size;91 uint64_t ba; 92 size_t cnt; 93 size_t comm_size; 87 94 88 95 /* … … 95 102 */ 96 103 int flags; 97 if (ipc_share_out_receive(&callid, & maxblock_size, &flags)) {98 fs_va = as_get_mappable_page( maxblock_size);104 if (ipc_share_out_receive(&callid, &comm_size, &flags)) { 105 fs_va = as_get_mappable_page(comm_size); 99 106 if (fs_va) { 100 107 (void) ipc_share_out_finalize(callid, fs_va); … … 123 130 ipc_answer_0(callid, EOK); 124 131 return; 125 case BD_READ_BLOCK: 126 offset = IPC_GET_ARG1(call); 127 block_size = IPC_GET_ARG2(call); 128 if (block_size > maxblock_size) { 129 /* 130 * Maximum block size exceeded. 131 */ 132 case BD_READ_BLOCKS: 133 ba = MERGE_LOUP32(IPC_GET_ARG1(call), 134 IPC_GET_ARG2(call)); 135 cnt = IPC_GET_ARG3(call); 136 if (cnt * block_size > comm_size) { 132 137 retval = ELIMIT; 133 138 break; 134 139 } 135 if (offset * block_size > rd_size - block_size) { 136 /* 137 * Reading past the end of the device. 138 */ 140 retval = rd_read_blocks(ba, cnt, fs_va); 141 break; 142 case BD_WRITE_BLOCKS: 143 ba = MERGE_LOUP32(IPC_GET_ARG1(call), 144 IPC_GET_ARG2(call)); 145 cnt = IPC_GET_ARG3(call); 146 if (cnt * block_size > comm_size) { 139 147 retval = ELIMIT; 140 148 break; 141 149 } 142 fibril_rwlock_read_lock(&rd_lock); 143 memcpy(fs_va, rd_addr + offset * block_size, block_size); 144 fibril_rwlock_read_unlock(&rd_lock); 145 retval = EOK; 150 retval = rd_write_blocks(ba, cnt, fs_va); 146 151 break; 147 case BD_WRITE_BLOCK: 148 offset = IPC_GET_ARG1(call); 149 block_size = IPC_GET_ARG2(call); 150 if (block_size > maxblock_size) { 151 /* 152 * Maximum block size exceeded. 153 */ 154 retval = ELIMIT; 155 break; 156 } 157 if (offset * block_size > rd_size - block_size) { 158 /* 159 * Writing past the end of the device. 160 */ 161 retval = ELIMIT; 162 break; 163 } 164 fibril_rwlock_write_lock(&rd_lock); 165 memcpy(rd_addr + offset * block_size, fs_va, block_size); 166 fibril_rwlock_write_unlock(&rd_lock); 167 retval = EOK; 168 break; 152 case BD_GET_BLOCK_SIZE: 153 ipc_answer_1(callid, EOK, block_size); 154 continue; 169 155 default: 170 156 /* … … 181 167 } 182 168 169 /** Read blocks from the device. */ 170 static int rd_read_blocks(uint64_t ba, size_t cnt, void *buf) 171 { 172 if ((ba + cnt) * block_size > rd_size) { 173 /* Reading past the end of the device. */ 174 return ELIMIT; 175 } 176 177 fibril_rwlock_read_lock(&rd_lock); 178 memcpy(buf, rd_addr + ba * block_size, block_size * cnt); 179 fibril_rwlock_read_unlock(&rd_lock); 180 181 return EOK; 182 } 183 184 /** Write blocks to the device. */ 185 static int rd_write_blocks(uint64_t ba, size_t cnt, const void *buf) 186 { 187 if ((ba + cnt) * block_size > rd_size) { 188 /* Writing past the end of the device. */ 189 return ELIMIT; 190 } 191 192 fibril_rwlock_write_lock(&rd_lock); 193 memcpy(rd_addr + ba * block_size, buf, block_size * cnt); 194 fibril_rwlock_write_unlock(&rd_lock); 195 196 return EOK; 197 } 198 183 199 /** Prepare the ramdisk image for operation. */ 184 200 static bool rd_init(void) -
uspace/srv/fs/fat/fat_ops.c
r2f636b6 rf6b5593 833 833 834 834 /* prepare the boot block */ 835 rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE);835 rc = block_bb_read(dev_handle, BS_BLOCK); 836 836 if (rc != EOK) { 837 837 block_fini(dev_handle); -
uspace/srv/fs/tmpfs/tmpfs_dump.c
r2f636b6 rf6b5593 47 47 #include <byteorder.h> 48 48 49 #define TMPFS_ BLOCK_SIZE102449 #define TMPFS_COMM_SIZE 1024 50 50 51 51 struct rdentry { … … 69 69 70 70 if (block_seqread(dev, bufpos, buflen, pos, &entry, 71 sizeof(entry) , TMPFS_BLOCK_SIZE) != EOK)71 sizeof(entry)) != EOK) 72 72 return false; 73 73 … … 89 89 90 90 if (block_seqread(dev, bufpos, buflen, pos, fname, 91 entry.len , TMPFS_BLOCK_SIZE) != EOK) {91 entry.len) != EOK) { 92 92 ops->destroy(fn); 93 93 free(fname); … … 105 105 106 106 if (block_seqread(dev, bufpos, buflen, pos, &size, 107 sizeof(size) , TMPFS_BLOCK_SIZE) != EOK)107 sizeof(size)) != EOK) 108 108 return false; 109 109 … … 117 117 nodep->size = size; 118 118 if (block_seqread(dev, bufpos, buflen, pos, nodep->data, 119 size , TMPFS_BLOCK_SIZE) != EOK)119 size) != EOK) 120 120 return false; 121 121 … … 133 133 134 134 if (block_seqread(dev, bufpos, buflen, pos, fname, 135 entry.len , TMPFS_BLOCK_SIZE) != EOK) {135 entry.len) != EOK) { 136 136 ops->destroy(fn); 137 137 free(fname); … … 166 166 int rc; 167 167 168 rc = block_init(dev, TMPFS_ BLOCK_SIZE);168 rc = block_init(dev, TMPFS_COMM_SIZE); 169 169 if (rc != EOK) 170 170 return false; … … 175 175 176 176 char tag[6]; 177 if (block_seqread(dev, &bufpos, &buflen, &pos, tag, 5, 178 TMPFS_BLOCK_SIZE) != EOK) 177 if (block_seqread(dev, &bufpos, &buflen, &pos, tag, 5) != EOK) 179 178 goto error; 180 179 -
uspace/srv/vfs/vfs_ops.c
r2f636b6 rf6b5593 934 934 { 935 935 int fd = IPC_GET_ARG1(*request); 936 size_t size = IPC_GET_ARG2(*request);937 936 ipcarg_t rc; 938 937
Note:
See TracChangeset
for help on using the changeset viewer.