Changeset e49e234 in mainline for kernel/generic/src/ddi
- Timestamp:
- 2009-02-27T11:32:31Z (17 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- c1f7f6ea
- Parents:
- 5f0f29ce
- File:
-
- 1 edited
-
kernel/generic/src/ddi/ddi.c (modified) (11 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/ddi/ddi.c
r5f0f29ce re49e234 30 30 * @{ 31 31 */ 32 32 33 33 /** 34 34 * @file 35 * @brief Device Driver Interface functions.35 * @brief Device Driver Interface functions. 36 36 * 37 37 * This file contains functions that comprise the Device Driver Interface. … … 48 48 #include <synch/spinlock.h> 49 49 #include <syscall/copy.h> 50 #include <adt/ list.h>50 #include <adt/btree.h> 51 51 #include <arch.h> 52 52 #include <align.h> … … 56 56 SPINLOCK_INITIALIZE(parea_lock); 57 57 58 /** Listwith enabled physical memory areas. */59 static LIST_INITIALIZE(parea_head);58 /** B+tree with enabled physical memory areas. */ 59 static btree_t parea_btree; 60 60 61 61 /** Initialize DDI. */ 62 62 void ddi_init(void) 63 63 { 64 hw_area();64 btree_create(&parea_btree); 65 65 } 66 66 … … 69 69 * @param parea Pointer to physical area structure. 70 70 * 71 * @todo This function doesn't check for overlaps. It depends on the kernel to72 * create disjunct physical memory areas.73 71 */ 74 72 void ddi_parea_register(parea_t *parea) 75 73 { 76 ipl_t ipl; 77 78 ipl = interrupts_disable(); 74 ipl_t ipl = interrupts_disable(); 79 75 spinlock_lock(&parea_lock); 80 76 81 77 /* 82 * TODO: we should really check for overlaps here. 83 * However, we should be safe because the kernel is pretty sane. 84 */ 85 link_initialize(&parea->link); 86 list_append(&parea->link, &parea_head); 78 * We don't check for overlaps here as the kernel is pretty sane. 79 */ 80 btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL); 87 81 88 82 spinlock_unlock(&parea_lock); … … 92 86 /** Map piece of physical memory into virtual address space of current task. 93 87 * 94 * @param pf Physical address of the starting frame.95 * @param vp Virtual address of the starting page.88 * @param pf Physical address of the starting frame. 89 * @param vp Virtual address of the starting page. 96 90 * @param pages Number of pages to map. 97 91 * @param flags Address space area flags for the mapping. 98 92 * 99 93 * @return 0 on success, EPERM if the caller lacks capabilities to use this 100 * syscall, ENOENT if there is no task matching the specified ID or the 101 * physical address space is not enabled for mapping and ENOMEM if there 102 * was a problem in creating address space area. 103 */ 104 static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, pfn_t pages, int flags) 105 { 106 ipl_t ipl; 107 cap_t caps; 94 * syscall, EBADMEM if pf or vf is not page aligned, ENOENT if there 95 * is no task matching the specified ID or the physical address space 96 * is not enabled for mapping and ENOMEM if there was a problem in 97 * creating address space area. 98 * 99 */ 100 static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags) 101 { 102 ASSERT(TASK); 103 ASSERT((pf % FRAME_SIZE) == 0); 104 ASSERT((vp % PAGE_SIZE) == 0); 105 106 /* 107 * Make sure the caller is authorised to make this syscall. 108 */ 109 cap_t caps = cap_get(TASK); 110 if (!(caps & CAP_MEM_MANAGER)) 111 return EPERM; 112 108 113 mem_backend_data_t backend_data; 109 110 114 backend_data.base = pf; 111 115 backend_data.frames = pages; 112 116 113 /* 114 * Make sure the caller is authorised to make this syscall. 115 */ 116 caps = cap_get(TASK); 117 if (!(caps & CAP_MEM_MANAGER)) 118 return EPERM; 119 120 ipl = interrupts_disable(); 121 122 /* 123 * Check if the physical memory area is enabled for mapping. 124 */ 125 spinlock_lock(&parea_lock); 126 127 bool fnd = false; 128 link_t *cur; 129 130 for (cur = parea_head.next; cur != &parea_head; cur = cur->next) { 131 parea_t *parea = list_get_instance(cur, parea_t, link); 132 if ((parea->pbase <= pf) && (ADDR2PFN(pf - parea->pbase) + pages <= parea->frames)) { 133 fnd = true; 134 break; 135 } 136 } 137 138 spinlock_unlock(&parea_lock); 139 140 if (!fnd) { 141 /* 142 * Physical memory area cannot be mapped. 143 */ 144 interrupts_restore(ipl); 145 return ENOENT; 146 } 147 117 ipl_t ipl = interrupts_disable(); 118 119 /* Find the zone of the physical memory */ 120 spinlock_lock(&zones.lock); 121 count_t znum = find_zone(ADDR2PFN(pf), pages, 0); 122 123 if (znum == (count_t) -1) { 124 /* Frames not found in any zones 125 * -> assume it is hardware device and allow mapping 126 */ 127 spinlock_unlock(&zones.lock); 128 goto map; 129 } 130 131 if (zones.info[znum].flags & ZONE_FIRMWARE) { 132 /* Frames are part of firmware */ 133 spinlock_unlock(&zones.lock); 134 goto map; 135 } 136 137 if (zone_flags_available(zones.info[znum].flags)) { 138 /* Frames are part of physical memory, check if the memory 139 * region is enabled for mapping. 140 */ 141 spinlock_unlock(&zones.lock); 142 143 spinlock_lock(&parea_lock); 144 btree_node_t *nodep; 145 parea_t *parea = (parea_t *) btree_search(&parea_btree, 146 (btree_key_t) pf, &nodep); 147 148 if ((!parea) || (parea->frames < pages)) 149 goto err; 150 151 spinlock_unlock(&parea_lock); 152 goto map; 153 } 154 155 err: 156 spinlock_unlock(&zones.lock); 157 interrupts_restore(ipl); 158 return ENOENT; 159 160 map: 148 161 spinlock_lock(&TASK->lock); 149 162 150 if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE,151 &phys_backend, &backend_data)) {163 if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, 164 AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) { 152 165 /* 153 166 * The address space area could not have been created. … … 175 188 * 176 189 * @return 0 on success, EPERM if the caller lacks capabilities to use this 177 * syscall, ENOENT if there is no task matching the specified ID. 190 * syscall, ENOENT if there is no task matching the specified ID. 191 * 178 192 */ 179 193 static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size) 180 194 { 181 ipl_t ipl;182 cap_t caps;183 task_t *t;184 int rc;185 186 195 /* 187 196 * Make sure the caller is authorised to make this syscall. 188 197 */ 189 cap s = cap_get(TASK);198 cap_t caps = cap_get(TASK); 190 199 if (!(caps & CAP_IO_MANAGER)) 191 200 return EPERM; 192 201 193 ipl = interrupts_disable();202 ipl_t ipl = interrupts_disable(); 194 203 spinlock_lock(&tasks_lock); 195 204 196 t = task_find_by_id(id);197 198 if ((!t ) || (!context_check(CONTEXT, t->context))) {205 task_t *task = task_find_by_id(id); 206 207 if ((!task) || (!context_check(CONTEXT, task->context))) { 199 208 /* 200 209 * There is no task with the specified ID … … 206 215 return ENOENT; 207 216 } 208 217 209 218 /* Lock the task and release the lock protecting tasks_btree. */ 210 spinlock_lock(&t ->lock);219 spinlock_lock(&task->lock); 211 220 spinlock_unlock(&tasks_lock); 212 213 rc = ddi_iospace_enable_arch(t, ioaddr, size); 214 215 spinlock_unlock(&t->lock); 216 interrupts_restore(ipl); 221 222 int rc = ddi_iospace_enable_arch(task, ioaddr, size); 223 224 spinlock_unlock(&task->lock); 225 interrupts_restore(ipl); 226 217 227 return rc; 218 228 } … … 226 236 * 227 237 * @return 0 on success, otherwise it returns error code found in errno.h 228 */ 238 * 239 */ 229 240 unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, 230 241 unative_t pages, unative_t flags) … … 232 243 return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base, 233 244 FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE), 234 ( pfn_t) pages, (int) flags);245 (count_t) pages, (int) flags); 235 246 } 236 247 … … 240 251 * 241 252 * @return 0 on success, otherwise it returns error code found in errno.h 242 */ 253 * 254 */ 243 255 unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg) 244 256 { 245 257 ddi_ioarg_t arg; 246 int rc; 247 248 rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t)); 258 int rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t)); 249 259 if (rc != 0) 250 260 return (unative_t) rc; 251 261 252 262 return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id, 253 263 (uintptr_t) arg.ioaddr, (size_t) arg.size); … … 257 267 * 258 268 * @param enable If non-zero, the preemption counter will be decremented, 259 * leading to potential enabling of preemption. Otherwise the preemption 260 * counter will be incremented, preventing preemption from occurring. 269 * leading to potential enabling of preemption. Otherwise 270 * the preemption counter will be incremented, preventing 271 * preemption from occurring. 261 272 * 262 273 * @return Zero on success or EPERM if callers capabilities are not sufficient. 263 */ 274 * 275 */ 264 276 unative_t sys_preempt_control(int enable) 265 277 { 266 278 if (!cap_get(TASK) & CAP_PREEMPT_CONTROL) 267 279 return EPERM; 280 268 281 if (enable) 269 282 preemption_enable(); 270 283 else 271 284 preemption_disable(); 285 272 286 return 0; 273 287 }
Note:
See TracChangeset
for help on using the changeset viewer.
