Changeset 1440eae in mainline for uspace/srv
- Timestamp:
- 2011-12-22T12:04:54Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 3819ce5
- Parents:
- 65d7b0a (diff), a438de48 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- uspace/srv
- Files:
-
- 29 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/srv/bd/ata_bd/ata_bd.c
r65d7b0a r1440eae 309 309 } 310 310 311 fs_va = as_get_mappable_page(comm_size);312 if (fs_va == NULL) {311 (void) async_share_out_finalize(callid, &fs_va); 312 if (fs_va == (void *) -1) { 313 313 async_answer_0(callid, EHANGUP); 314 314 return; 315 315 } 316 317 (void) async_share_out_finalize(callid, fs_va);318 316 319 317 while (true) { -
uspace/srv/bd/file_bd/file_bd.c
r65d7b0a r1440eae 190 190 } 191 191 192 fs_va = as_get_mappable_page(comm_size);193 if (fs_va == NULL) {192 (void) async_share_out_finalize(callid, &fs_va); 193 if (fs_va == (void *) -1) { 194 194 async_answer_0(callid, EHANGUP); 195 195 return; 196 196 } 197 198 (void) async_share_out_finalize(callid, fs_va);199 197 200 198 while (true) { -
uspace/srv/bd/gxe_bd/gxe_bd.c
r65d7b0a r1440eae 193 193 } 194 194 195 fs_va = as_get_mappable_page(comm_size);196 if (fs_va == NULL) {195 (void) async_share_out_finalize(callid, &fs_va); 196 if (fs_va == (void *) -1) { 197 197 async_answer_0(callid, EHANGUP); 198 198 return; 199 199 } 200 201 (void) async_share_out_finalize(callid, fs_va);202 200 203 201 while (true) { -
uspace/srv/bd/part/guid_part/guid_part.c
r65d7b0a r1440eae 348 348 } 349 349 350 fs_va = as_get_mappable_page(comm_size);351 if (fs_va == NULL) {350 (void) async_share_out_finalize(callid, &fs_va); 351 if (fs_va == (void *) -1) { 352 352 async_answer_0(callid, EHANGUP); 353 353 return; 354 354 } 355 356 (void) async_share_out_finalize(callid, fs_va);357 355 358 356 while (true) { -
uspace/srv/bd/part/mbr_part/mbr_part.c
r65d7b0a r1440eae 425 425 } 426 426 427 fs_va = as_get_mappable_page(comm_size);428 if (fs_va == NULL) {427 (void) async_share_out_finalize(callid, &fs_va); 428 if (fs_va == (void *) -1) { 429 429 async_answer_0(callid, EHANGUP); 430 430 return; 431 431 } 432 433 (void) async_share_out_finalize(callid, fs_va);434 432 435 433 while (1) { -
uspace/srv/bd/rd/rd.c
r65d7b0a r1440eae 105 105 unsigned int flags; 106 106 if (async_share_out_receive(&callid, &comm_size, &flags)) { 107 fs_va = as_get_mappable_page(comm_size); 108 if (fs_va) { 109 (void) async_share_out_finalize(callid, fs_va); 110 } else { 107 (void) async_share_out_finalize(callid, &fs_va); 108 if (fs_va == (void *) -1) { 111 109 async_answer_0(callid, EHANGUP); 112 110 return; … … 224 222 225 223 rd_size = ALIGN_UP(size, block_size); 226 rd_addr = as_get_mappable_page(rd_size);227 228 224 unsigned int flags = 229 225 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE; 230 ret = physmem_map((void *) addr_phys, rd_addr, 231 ALIGN_UP(rd_size, PAGE_SIZE) >> PAGE_WIDTH, flags); 232 if (ret < 0) { 226 227 ret = physmem_map((void *) addr_phys, 228 ALIGN_UP(rd_size, PAGE_SIZE) >> PAGE_WIDTH, flags, &rd_addr); 229 if (ret != EOK) { 233 230 printf("%s: Error mapping RAM disk\n", NAME); 234 231 return false; -
uspace/srv/hid/fb/fb.c
r65d7b0a r1440eae 304 304 } 305 305 306 frontbuf->data = as_get_mappable_page(frontbuf->size); 307 int rc = async_answer_1(callid, EOK, (sysarg_t) frontbuf->data); 308 if (rc != EOK) { 306 int rc = async_share_out_finalize(callid, &frontbuf->data); 307 if ((rc != EOK) || (frontbuf->data == (void *) -1)) { 309 308 free(frontbuf); 310 309 async_answer_0(iid, ENOMEM); … … 348 347 } 349 348 350 imagemap->data = as_get_mappable_page(imagemap->size); 351 int rc = async_answer_1(callid, EOK, (sysarg_t) imagemap->data); 352 if (rc != EOK) { 349 int rc = async_share_out_finalize(callid, &imagemap->data); 350 if ((rc != EOK) || (imagemap->data == (void *) -1)) { 353 351 free(imagemap); 354 352 async_answer_0(iid, ENOMEM); -
uspace/srv/hid/fb/port/ega.c
r65d7b0a r1440eae 280 280 281 281 ega.size = (width * height) << 1; 282 ega.addr = as_get_mappable_page(ega.size); 283 if (ega.addr == NULL) 284 return ENOMEM; 285 286 rc = physmem_map((void *) paddr, ega.addr, 287 ALIGN_UP(ega.size, PAGE_SIZE) >> PAGE_WIDTH, AS_AREA_READ | AS_AREA_WRITE); 282 283 rc = physmem_map((void *) paddr, 284 ALIGN_UP(ega.size, PAGE_SIZE) >> PAGE_WIDTH, 285 AS_AREA_READ | AS_AREA_WRITE, (void *) &ega.addr); 288 286 if (rc != EOK) 289 287 return rc; -
uspace/srv/hid/fb/port/kchar.c
r65d7b0a r1440eae 83 83 return rc; 84 84 85 kchar.addr = as_get_mappable_page(1); 86 if (kchar.addr == NULL) 87 return ENOMEM; 88 89 rc = physmem_map((void *) paddr, kchar.addr, 90 ALIGN_UP(1, PAGE_SIZE) >> PAGE_WIDTH, AS_AREA_READ | AS_AREA_WRITE); 85 rc = physmem_map((void *) paddr, 86 ALIGN_UP(1, PAGE_SIZE) >> PAGE_WIDTH, 87 AS_AREA_READ | AS_AREA_WRITE, (void *) &kchar.addr); 91 88 if (rc != EOK) 92 89 return rc; -
uspace/srv/hid/fb/port/kfb.c
r65d7b0a r1440eae 756 756 757 757 kfb.size = scanline * height; 758 kfb.addr = as_get_mappable_page(kfb.size); 759 if (kfb.addr == NULL) { 760 free(kfb.glyphs); 761 return ENOMEM; 762 } 763 764 rc = physmem_map((void *) paddr + offset, kfb.addr, 765 ALIGN_UP(kfb.size, PAGE_SIZE) >> PAGE_WIDTH, AS_AREA_READ | AS_AREA_WRITE); 758 759 rc = physmem_map((void *) paddr + offset, 760 ALIGN_UP(kfb.size, PAGE_SIZE) >> PAGE_WIDTH, 761 AS_AREA_READ | AS_AREA_WRITE, (void *) &kfb.addr); 766 762 if (rc != EOK) { 767 763 free(kfb.glyphs); -
uspace/srv/hid/fb/port/niagara.c
r65d7b0a r1440eae 103 103 return rc; 104 104 105 niagara.fifo = 106 (output_fifo_t *) as_get_mappable_page(sizeof(output_fifo_t)); 107 if (niagara.fifo == NULL) 108 return ENOMEM; 109 110 rc = physmem_map((void *) paddr, (void *) niagara.fifo, 1, 111 AS_AREA_READ | AS_AREA_WRITE); 105 rc = physmem_map((void *) paddr, 1, 106 AS_AREA_READ | AS_AREA_WRITE, (void *) &niagara.fifo); 112 107 if (rc != EOK) 113 108 return rc; -
uspace/srv/hid/input/port/gxemul.c
r65d7b0a r1440eae 90 90 async_set_interrupt_received(gxemul_irq_handler); 91 91 gxemul_cmds[0].addr = (void *) addr; 92 register_irq(inr, device_assign_devno(), 0, &gxemul_kbd);92 irq_register(inr, device_assign_devno(), 0, &gxemul_kbd); 93 93 return 0; 94 94 } -
uspace/srv/hid/input/port/msim.c
r65d7b0a r1440eae 89 89 msim_cmds[0].addr = (void *) vaddr; 90 90 async_set_interrupt_received(msim_irq_handler); 91 register_irq(inr, device_assign_devno(), 0, &msim_kbd);91 irq_register(inr, device_assign_devno(), 0, &msim_kbd); 92 92 93 93 return 0; -
uspace/srv/hid/input/port/niagara.c
r65d7b0a r1440eae 63 63 #define POLL_INTERVAL 10000 64 64 65 /**66 * Virtual address mapped to the buffer shared with the kernel counterpart.67 */68 static uintptr_t input_buffer_addr;69 70 65 /* 71 66 * Kernel counterpart of the driver pushes characters (it has read) here. … … 102 97 return -1; 103 98 104 input_buffer_addr = (uintptr_t) as_get_mappable_page(PAGE_SIZE); 105 int rc = physmem_map((void *) paddr, (void *) input_buffer_addr, 106 1, AS_AREA_READ | AS_AREA_WRITE); 107 99 int rc = physmem_map((void *) paddr, 1, 100 AS_AREA_READ | AS_AREA_WRITE, (void *) &input_buffer); 108 101 if (rc != 0) { 109 102 printf("Niagara: uspace driver couldn't map physical memory: %d\n", … … 111 104 return rc; 112 105 } 113 114 input_buffer = (input_buffer_t) input_buffer_addr;115 106 116 107 thread_id_t tid; -
uspace/srv/hid/input/port/ns16550.c
r65d7b0a r1440eae 135 135 136 136 async_set_interrupt_received(ns16550_irq_handler); 137 register_irq(inr, device_assign_devno(), inr, &ns16550_kbd);137 irq_register(inr, device_assign_devno(), inr, &ns16550_kbd); 138 138 139 139 return pio_enable((void *) ns16550_physical, 8, &vaddr); -
uspace/srv/hid/input/port/pl050.c
r65d7b0a r1440eae 117 117 118 118 async_set_interrupt_received(pl050_irq_handler); 119 register_irq(inr, device_assign_devno(), 0, &pl050_kbd);119 irq_register(inr, device_assign_devno(), 0, &pl050_kbd); 120 120 121 121 return 0; -
uspace/srv/hid/s3c24xx_ts/s3c24xx_ts.c
r65d7b0a r1440eae 139 139 140 140 async_set_interrupt_received(s3c24xx_ts_irq_handler); 141 register_irq(inr, device_assign_devno(), 0, &ts_irq_code);141 irq_register(inr, device_assign_devno(), 0, &ts_irq_code); 142 142 143 143 s3c24xx_ts_wait_for_int_mode(ts, updn_down); -
uspace/srv/hw/bus/cuda_adb/cuda_adb.c
r65d7b0a r1440eae 275 275 cuda_irq_code.cmds[0].addr = (void *) &((cuda_t *) instance->cuda_kernel)->ifr; 276 276 async_set_interrupt_received(cuda_irq_handler); 277 register_irq(10, device_assign_devno(), 0, &cuda_irq_code);277 irq_register(10, device_assign_devno(), 0, &cuda_irq_code); 278 278 279 279 /* Enable SR interrupt. */ -
uspace/srv/hw/char/i8042/i8042.c
r65d7b0a r1440eae 197 197 i8042_kbd.cmds[0].addr = (void *) &((i8042_t *) i8042_kernel)->status; 198 198 i8042_kbd.cmds[3].addr = (void *) &((i8042_t *) i8042_kernel)->data; 199 register_irq(inr_a, device_assign_devno(), 0, &i8042_kbd);200 register_irq(inr_b, device_assign_devno(), 0, &i8042_kbd);199 irq_register(inr_a, device_assign_devno(), 0, &i8042_kbd); 200 irq_register(inr_b, device_assign_devno(), 0, &i8042_kbd); 201 201 printf("%s: registered for interrupts %" PRIun " and %" PRIun "\n", 202 202 NAME, inr_a, inr_b); -
uspace/srv/hw/char/s3c24xx_uart/s3c24xx_uart.c
r65d7b0a r1440eae 194 194 async_set_interrupt_received(s3c24xx_uart_irq_handler); 195 195 196 register_irq(inr, device_assign_devno(), 0, &uart_irq_code);196 irq_register(inr, device_assign_devno(), 0, &uart_irq_code); 197 197 198 198 /* Enable FIFO, Tx trigger level: empty, Rx trigger level: 1 byte. */ -
uspace/srv/hw/irc/obio/obio.c
r65d7b0a r1440eae 124 124 125 125 base_phys = (void *) paddr; 126 base_virt = as_get_mappable_page(OBIO_SIZE);127 126 128 127 int flags = AS_AREA_READ | AS_AREA_WRITE; 129 int retval = physmem_map(base_phys, (void *) base_virt, 130 ALIGN_UP(OBIO_SIZE, PAGE_SIZE) >> PAGE_WIDTH, flags); 128 int retval = physmem_map(base_phys, 129 ALIGN_UP(OBIO_SIZE, PAGE_SIZE) >> PAGE_WIDTH, flags, 130 (void *) &base_virt); 131 131 132 132 if (retval < 0) { -
uspace/srv/net/tl/tcp/conn.c
r65d7b0a r1440eae 56 56 57 57 LIST_INITIALIZE(conn_list); 58 FIBRIL_MUTEX_INITIALIZE(conn_list_lock); 58 59 59 60 static void tcp_conn_seg_process(tcp_conn_t *conn, tcp_segment_t *seg); … … 61 62 static void tcp_conn_tw_timer_clear(tcp_conn_t *conn); 62 63 63 /** Create new segmentstructure.64 /** Create new connection structure. 64 65 * 65 66 * @param lsock Local socket (will be deeply copied) 66 67 * @param fsock Foreign socket (will be deeply copied) 67 * @return New segmentor NULL68 * @return New connection or NULL 68 69 */ 69 70 tcp_conn_t *tcp_conn_new(tcp_sock_t *lsock, tcp_sock_t *fsock) … … 81 82 goto error; 82 83 84 fibril_mutex_initialize(&conn->lock); 85 86 /* One for the user, one for not being in closed state */ 87 atomic_set(&conn->refcnt, 2); 88 83 89 /* Allocate receive buffer */ 84 fibril_mutex_initialize(&conn->rcv_buf_lock);85 90 fibril_condvar_initialize(&conn->rcv_buf_cv); 86 91 conn->rcv_buf_size = RCV_BUF_SIZE; … … 93 98 94 99 /** Allocate send buffer */ 100 fibril_condvar_initialize(&conn->snd_buf_cv); 95 101 conn->snd_buf_size = SND_BUF_SIZE; 96 102 conn->snd_buf_used = 0; … … 113 119 114 120 /* Connection state change signalling */ 115 fibril_mutex_initialize(&conn->cstate_lock);116 121 fibril_condvar_initialize(&conn->cstate_cv); 117 122 118 123 conn->cstate = st_listen; 119 124 conn->reset = false; 125 conn->deleted = false; 120 126 conn->ap = ap_passive; 121 127 conn->fin_is_acked = false; … … 141 147 } 142 148 149 /** Destroy connection structure. 150 * 151 * Connection structure should be destroyed when the folowing condtitions 152 * are met: 153 * (1) user has deleted the connection 154 * (2) the connection has entered closed state 155 * (3) nobody is holding references to the connection 156 * 157 * This happens when @a conn->refcnt is zero as we count (1) and (2) 158 * as special references. 159 * 160 * @param conn Connection 161 */ 162 static void tcp_conn_free(tcp_conn_t *conn) 163 { 164 log_msg(LVL_DEBUG, "%s: tcp_conn_free(%p)", conn->name, conn); 165 tcp_tqueue_fini(&conn->retransmit); 166 167 if (conn->rcv_buf != NULL) 168 free(conn->rcv_buf); 169 if (conn->snd_buf != NULL) 170 free(conn->snd_buf); 171 if (conn->tw_timer != NULL) 172 fibril_timer_destroy(conn->tw_timer); 173 free(conn); 174 } 175 176 /** Add reference to connection. 177 * 178 * Increase connection reference count by one. 179 * 180 * @param conn Connection 181 */ 182 void tcp_conn_addref(tcp_conn_t *conn) 183 { 184 log_msg(LVL_DEBUG, "%s: tcp_conn_addref(%p)", conn->name, conn); 185 atomic_inc(&conn->refcnt); 186 } 187 188 /** Remove reference from connection. 189 * 190 * Decrease connection reference count by one. 191 * 192 * @param conn Connection 193 */ 194 void tcp_conn_delref(tcp_conn_t *conn) 195 { 196 log_msg(LVL_DEBUG, "%s: tcp_conn_delref(%p)", conn->name, conn); 197 198 if (atomic_predec(&conn->refcnt) == 0) 199 tcp_conn_free(conn); 200 } 201 202 /** Delete connection. 203 * 204 * The caller promises not make no further references to @a conn. 205 * TCP will free @a conn eventually. 206 * 207 * @param conn Connection 208 */ 209 void tcp_conn_delete(tcp_conn_t *conn) 210 { 211 log_msg(LVL_DEBUG, "%s: tcp_conn_delete(%p)", conn->name, conn); 212 213 assert(conn->deleted == false); 214 tcp_conn_delref(conn); 215 } 216 143 217 /** Enlist connection. 144 218 * … … 147 221 void tcp_conn_add(tcp_conn_t *conn) 148 222 { 223 tcp_conn_addref(conn); 224 fibril_mutex_lock(&conn_list_lock); 149 225 list_append(&conn->link, &conn_list); 226 fibril_mutex_unlock(&conn_list_lock); 150 227 } 151 228 … … 156 233 void tcp_conn_remove(tcp_conn_t *conn) 157 234 { 235 fibril_mutex_lock(&conn_list_lock); 158 236 list_remove(&conn->link); 237 fibril_mutex_unlock(&conn_list_lock); 238 tcp_conn_delref(conn); 159 239 } 160 240 161 241 static void tcp_conn_state_set(tcp_conn_t *conn, tcp_cstate_t nstate) 162 242 { 163 fibril_mutex_lock(&conn->cstate_lock); 243 tcp_cstate_t old_state; 244 245 old_state = conn->cstate; 164 246 conn->cstate = nstate; 165 247 fibril_condvar_broadcast(&conn->cstate_cv); 166 fibril_mutex_unlock(&conn->cstate_lock); 248 249 assert(old_state != st_closed); 250 if (nstate == st_closed) { 251 /* Drop one reference for now being in closed state */ 252 tcp_conn_delref(conn); 253 } 167 254 } 168 255 … … 251 338 * A connection is uniquely identified by a socket pair. Look up our 252 339 * connection map and return connection structure based on socket pair. 340 * The connection reference count is bumped by one. 253 341 * 254 342 * @param sp Socket pair 255 343 * @return Connection structure or NULL if not found. 256 344 */ 257 tcp_conn_t *tcp_conn_find (tcp_sockpair_t *sp)345 tcp_conn_t *tcp_conn_find_ref(tcp_sockpair_t *sp) 258 346 { 259 347 log_msg(LVL_DEBUG, "tcp_conn_find(%p)", sp); 348 349 fibril_mutex_lock(&conn_list_lock); 260 350 261 351 list_foreach(conn_list, link) { … … 266 356 csp->local.addr.ipv4, csp->local.port); 267 357 if (tcp_sockpair_match(sp, csp)) { 358 tcp_conn_addref(conn); 359 fibril_mutex_unlock(&conn_list_lock); 268 360 return conn; 269 361 } 270 362 } 271 363 364 fibril_mutex_unlock(&conn_list_lock); 272 365 return NULL; 273 366 } … … 287 380 288 381 fibril_condvar_broadcast(&conn->rcv_buf_cv); 382 fibril_condvar_broadcast(&conn->snd_buf_cv); 289 383 } 290 384 … … 858 952 tcp_conn_trim_seg_to_wnd(conn, seg); 859 953 860 fibril_mutex_lock(&conn->rcv_buf_lock);861 862 954 /* Determine how many bytes to copy */ 863 955 text_size = tcp_segment_text_size(seg); … … 871 963 /* Signal to the receive function that new data has arrived */ 872 964 fibril_condvar_broadcast(&conn->rcv_buf_cv); 873 fibril_mutex_unlock(&conn->rcv_buf_lock);874 965 875 966 log_msg(LVL_DEBUG, "Received %zu bytes of data.", xfer_size); … … 961 1052 962 1053 /* Add FIN to the receive buffer */ 963 fibril_mutex_lock(&conn->rcv_buf_lock);964 1054 conn->rcv_buf_fin = true; 965 1055 fibril_condvar_broadcast(&conn->rcv_buf_cv); 966 fibril_mutex_unlock(&conn->rcv_buf_lock);967 1056 968 1057 tcp_segment_delete(seg); … … 1073 1162 log_msg(LVL_DEBUG, "tw_timeout_func(%p)", conn); 1074 1163 1164 fibril_mutex_lock(&conn->lock); 1165 1075 1166 if (conn->cstate == st_closed) { 1076 1167 log_msg(LVL_DEBUG, "Connection already closed."); 1168 fibril_mutex_unlock(&conn->lock); 1169 tcp_conn_delref(conn); 1077 1170 return; 1078 1171 } … … 1081 1174 tcp_conn_remove(conn); 1082 1175 tcp_conn_state_set(conn, st_closed); 1176 1177 fibril_mutex_unlock(&conn->lock); 1178 tcp_conn_delref(conn); 1083 1179 } 1084 1180 … … 1089 1185 void tcp_conn_tw_timer_set(tcp_conn_t *conn) 1090 1186 { 1187 tcp_conn_addref(conn); 1091 1188 fibril_timer_set(conn->tw_timer, TIME_WAIT_TIMEOUT, tw_timeout_func, 1092 1189 (void *)conn); … … 1099 1196 void tcp_conn_tw_timer_clear(tcp_conn_t *conn) 1100 1197 { 1101 fibril_timer_clear(conn->tw_timer); 1198 if (fibril_timer_clear(conn->tw_timer) == fts_active) 1199 tcp_conn_delref(conn); 1102 1200 } 1103 1201 -
uspace/srv/net/tl/tcp/conn.h
r65d7b0a r1440eae 40 40 41 41 extern tcp_conn_t *tcp_conn_new(tcp_sock_t *, tcp_sock_t *); 42 extern void tcp_conn_delete(tcp_conn_t *); 42 43 extern void tcp_conn_add(tcp_conn_t *); 43 44 extern void tcp_conn_remove(tcp_conn_t *); … … 45 46 extern void tcp_conn_fin_sent(tcp_conn_t *); 46 47 extern void tcp_conn_ack_of_fin_rcvd(tcp_conn_t *); 47 extern tcp_conn_t *tcp_conn_find(tcp_sockpair_t *); 48 extern tcp_conn_t *tcp_conn_find_ref(tcp_sockpair_t *); 49 extern void tcp_conn_addref(tcp_conn_t *); 50 extern void tcp_conn_delref(tcp_conn_t *); 48 51 extern bool tcp_conn_got_syn(tcp_conn_t *); 49 52 extern void tcp_conn_segment_arrived(tcp_conn_t *, tcp_segment_t *); -
uspace/srv/net/tl/tcp/sock.c
r65d7b0a r1440eae 603 603 604 604 socket = (tcp_sockdata_t *)sock_core->specific_data; 605 rc = tcp_uc_close(socket->conn); 606 if (rc != EOK) { 607 async_answer_0(callid, rc); 608 return; 609 } 610 611 /* Drain incoming data. This should really be done in the background. */ 612 do { 613 trc = tcp_uc_receive(socket->conn, buffer, FRAGMENT_SIZE, 614 &data_len, &xflags); 615 } while (trc == TCP_EOK); 605 606 if (socket->conn != NULL) { 607 trc = tcp_uc_close(socket->conn); 608 if (trc != TCP_EOK && trc != TCP_ENOTEXIST) { 609 async_answer_0(callid, EBADF); 610 return; 611 } 612 613 /* Drain incoming data. This should really be done in the background. */ 614 do { 615 trc = tcp_uc_receive(socket->conn, buffer, 616 FRAGMENT_SIZE, &data_len, &xflags); 617 } while (trc == TCP_EOK); 618 619 tcp_uc_delete(socket->conn); 620 } 616 621 617 622 rc = socket_destroy(net_sess, socket_id, &client->sockets, &gsock, -
uspace/srv/net/tl/tcp/tcp_type.h
r65d7b0a r1440eae 162 162 acpass_t ap; 163 163 164 /** Protects access to connection structure */ 165 fibril_mutex_t lock; 166 /** Reference count */ 167 atomic_t refcnt; 168 164 169 /** Connection state */ 165 170 tcp_cstate_t cstate; 166 171 /** True if connection was reset */ 167 172 bool reset; 168 /** Protects @c cstate*/169 fibril_mutex_t cstate_lock;173 /** True if connection was deleted by user */ 174 bool deleted; 170 175 /** Signalled when @c cstate changes */ 171 176 fibril_condvar_t cstate_cv; … … 191 196 /** Receive buffer contains FIN */ 192 197 bool rcv_buf_fin; 193 /** Receive buffer lock */194 fibril_mutex_t rcv_buf_lock;195 198 /** Receive buffer CV. Broadcast when new data is inserted */ 196 199 fibril_condvar_t rcv_buf_cv; … … 204 207 /** Send buffer contains FIN */ 205 208 bool snd_buf_fin; 209 /** Send buffer CV. Broadcast when space is made available in buffer */ 210 fibril_condvar_t snd_buf_cv; 206 211 207 212 /** Send unacknowledged */ -
uspace/srv/net/tl/tcp/tqueue.c
r65d7b0a r1440eae 188 188 /* We are sending out FIN */ 189 189 ctrl = CTL_FIN; 190 tcp_conn_fin_sent(conn);191 190 } else { 192 191 ctrl = 0; … … 206 205 if (send_fin) 207 206 conn->snd_buf_fin = false; 207 208 fibril_condvar_broadcast(&conn->snd_buf_cv); 209 210 if (send_fin) 211 tcp_conn_fin_sent(conn); 208 212 209 213 tcp_tqueue_seg(conn, seg); … … 313 317 log_msg(LVL_DEBUG, "### %s: retransmit_timeout_func(%p)", conn->name, conn); 314 318 319 fibril_mutex_lock(&conn->lock); 320 315 321 if (conn->cstate == st_closed) { 316 322 log_msg(LVL_DEBUG, "Connection already closed."); 323 fibril_mutex_unlock(&conn->lock); 324 tcp_conn_delref(conn); 317 325 return; 318 326 } … … 321 329 if (link == NULL) { 322 330 log_msg(LVL_DEBUG, "Nothing to retransmit"); 331 fibril_mutex_unlock(&conn->lock); 332 tcp_conn_delref(conn); 323 333 return; 324 334 } … … 329 339 if (rt_seg == NULL) { 330 340 log_msg(LVL_ERROR, "Memory allocation failed."); 341 fibril_mutex_unlock(&conn->lock); 342 tcp_conn_delref(conn); 331 343 /* XXX Handle properly */ 332 344 return; … … 338 350 /* Reset retransmission timer */ 339 351 tcp_tqueue_timer_set(tqe->conn); 352 353 fibril_mutex_unlock(&conn->lock); 354 tcp_conn_delref(conn); 340 355 } 341 356 … … 345 360 log_msg(LVL_DEBUG, "### %s: tcp_tqueue_timer_set()", conn->name); 346 361 347 (void) retransmit_timeout_func; 362 /* Clear first to make sure we update refcnt correctly */ 363 tcp_tqueue_timer_clear(conn); 364 365 tcp_conn_addref(conn); 348 366 fibril_timer_set(conn->retransmit.timer, RETRANSMIT_TIMEOUT, 349 367 retransmit_timeout_func, (void *) conn); … … 355 373 log_msg(LVL_DEBUG, "### %s: tcp_tqueue_timer_clear()", conn->name); 356 374 357 fibril_timer_clear(conn->retransmit.timer); 375 if (fibril_timer_clear(conn->retransmit.timer) == fts_active) 376 tcp_conn_delref(conn); 358 377 } 359 378 -
uspace/srv/net/tl/tcp/ucall.c
r65d7b0a r1440eae 83 83 /* Wait for connection to be established or reset */ 84 84 log_msg(LVL_DEBUG, "tcp_uc_open: Wait for connection."); 85 fibril_mutex_lock(&nconn-> cstate_lock);85 fibril_mutex_lock(&nconn->lock); 86 86 while (nconn->cstate == st_listen || 87 87 nconn->cstate == st_syn_sent || 88 88 nconn->cstate == st_syn_received) { 89 fibril_condvar_wait(&nconn->cstate_cv, &nconn-> cstate_lock);89 fibril_condvar_wait(&nconn->cstate_cv, &nconn->lock); 90 90 } 91 91 … … 93 93 log_msg(LVL_DEBUG, "tcp_uc_open: Connection was reset."); 94 94 assert(nconn->cstate == st_closed); 95 fibril_mutex_unlock(&nconn-> cstate_lock);95 fibril_mutex_unlock(&nconn->lock); 96 96 return TCP_ERESET; 97 97 } 98 98 99 fibril_mutex_unlock(&nconn-> cstate_lock);99 fibril_mutex_unlock(&nconn->lock); 100 100 log_msg(LVL_DEBUG, "tcp_uc_open: Connection was established."); 101 101 … … 113 113 log_msg(LVL_DEBUG, "%s: tcp_uc_send()", conn->name); 114 114 115 if (conn->cstate == st_closed) 115 fibril_mutex_lock(&conn->lock); 116 117 if (conn->cstate == st_closed) { 118 fibril_mutex_unlock(&conn->lock); 116 119 return TCP_ENOTEXIST; 120 } 117 121 118 122 if (conn->cstate == st_listen) { … … 121 125 } 122 126 123 if (conn->snd_buf_fin) 127 128 if (conn->snd_buf_fin) { 129 fibril_mutex_unlock(&conn->lock); 124 130 return TCP_ECLOSING; 131 } 125 132 126 133 while (size > 0) { 127 134 buf_free = conn->snd_buf_size - conn->snd_buf_used; 128 while (buf_free == 0 && !conn->reset) 129 tcp_tqueue_new_data(conn); 130 131 if (conn->reset) 135 while (buf_free == 0 && !conn->reset) { 136 log_msg(LVL_DEBUG, "%s: buf_free == 0, waiting.", 137 conn->name); 138 fibril_condvar_wait(&conn->snd_buf_cv, &conn->lock); 139 buf_free = conn->snd_buf_size - conn->snd_buf_used; 140 } 141 142 if (conn->reset) { 143 fibril_mutex_unlock(&conn->lock); 132 144 return TCP_ERESET; 145 } 133 146 134 147 xfer_size = min(size, buf_free); … … 139 152 conn->snd_buf_used += xfer_size; 140 153 size -= xfer_size; 154 155 tcp_tqueue_new_data(conn); 141 156 } 142 157 143 158 tcp_tqueue_new_data(conn); 159 fibril_mutex_unlock(&conn->lock); 144 160 145 161 return TCP_EOK; … … 154 170 log_msg(LVL_DEBUG, "%s: tcp_uc_receive()", conn->name); 155 171 156 if (conn->cstate == st_closed) 172 fibril_mutex_lock(&conn->lock); 173 174 if (conn->cstate == st_closed) { 175 fibril_mutex_unlock(&conn->lock); 157 176 return TCP_ENOTEXIST; 158 159 fibril_mutex_lock(&conn->rcv_buf_lock); 177 } 160 178 161 179 /* Wait for data to become available */ 162 180 while (conn->rcv_buf_used == 0 && !conn->rcv_buf_fin && !conn->reset) { 163 181 log_msg(LVL_DEBUG, "tcp_uc_receive() - wait for data"); 164 fibril_condvar_wait(&conn->rcv_buf_cv, &conn-> rcv_buf_lock);182 fibril_condvar_wait(&conn->rcv_buf_cv, &conn->lock); 165 183 } 166 184 167 185 if (conn->rcv_buf_used == 0) { 168 fibril_mutex_unlock(&conn->rcv_buf_lock);169 170 186 *rcvd = 0; 171 187 *xflags = 0; … … 173 189 if (conn->rcv_buf_fin) { 174 190 /* End of data, peer closed connection */ 191 fibril_mutex_unlock(&conn->lock); 175 192 return TCP_ECLOSING; 176 193 } else { 177 194 /* Connection was reset */ 178 195 assert(conn->reset); 196 fibril_mutex_unlock(&conn->lock); 179 197 return TCP_ERESET; 180 198 } … … 192 210 conn->rcv_wnd += xfer_size; 193 211 194 fibril_mutex_unlock(&conn->rcv_buf_lock);195 196 212 /* TODO */ 197 213 *xflags = 0; … … 203 219 conn->name, xfer_size); 204 220 221 fibril_mutex_unlock(&conn->lock); 222 205 223 return TCP_EOK; 206 224 } … … 211 229 log_msg(LVL_DEBUG, "%s: tcp_uc_close()", conn->name); 212 230 213 if (conn->cstate == st_closed) 231 fibril_mutex_lock(&conn->lock); 232 233 if (conn->cstate == st_closed) { 234 fibril_mutex_unlock(&conn->lock); 214 235 return TCP_ENOTEXIST; 215 216 if (conn->snd_buf_fin) 236 } 237 238 if (conn->snd_buf_fin) { 239 fibril_mutex_unlock(&conn->lock); 217 240 return TCP_ECLOSING; 241 } 218 242 219 243 conn->snd_buf_fin = true; 220 244 tcp_tqueue_new_data(conn); 221 245 246 fibril_mutex_unlock(&conn->lock); 222 247 return TCP_EOK; 223 248 } … … 235 260 } 236 261 262 /** Delete connection user call. 263 * 264 * (Not in spec.) Inform TCP that the user is done with this connection 265 * and will not make any further calls/references to it. TCP can deallocate 266 * the connection from now on. 267 */ 268 void tcp_uc_delete(tcp_conn_t *conn) 269 { 270 log_msg(LVL_DEBUG, "tcp_uc_delete()"); 271 tcp_conn_delete(conn); 272 } 237 273 238 274 /* … … 249 285 sp->local.addr.ipv4, sp->local.port); 250 286 251 conn = tcp_conn_find(sp); 252 if (conn != NULL && conn->cstate != st_closed) { 253 if (conn->ident.foreign.addr.ipv4 == TCP_IPV4_ANY) 254 conn->ident.foreign.addr.ipv4 = sp->foreign.addr.ipv4; 255 if (conn->ident.foreign.port == TCP_PORT_ANY) 256 conn->ident.foreign.port = sp->foreign.port; 257 if (conn->ident.local.addr.ipv4 == TCP_IPV4_ANY) 258 conn->ident.local.addr.ipv4 = sp->local.addr.ipv4; 259 260 tcp_conn_segment_arrived(conn, seg); 261 } else { 262 if (conn == NULL) 263 log_msg(LVL_WARN, "No connection found."); 264 else 265 log_msg(LVL_WARN, "Connection is closed."); 287 conn = tcp_conn_find_ref(sp); 288 if (conn == NULL) { 289 log_msg(LVL_WARN, "No connection found."); 266 290 tcp_unexpected_segment(sp, seg); 267 } 291 return; 292 } 293 294 fibril_mutex_lock(&conn->lock); 295 296 if (conn->cstate == st_closed) { 297 log_msg(LVL_WARN, "Connection is closed."); 298 tcp_unexpected_segment(sp, seg); 299 fibril_mutex_unlock(&conn->lock); 300 tcp_conn_delref(conn); 301 return; 302 } 303 304 if (conn->ident.foreign.addr.ipv4 == TCP_IPV4_ANY) 305 conn->ident.foreign.addr.ipv4 = sp->foreign.addr.ipv4; 306 if (conn->ident.foreign.port == TCP_PORT_ANY) 307 conn->ident.foreign.port = sp->foreign.port; 308 if (conn->ident.local.addr.ipv4 == TCP_IPV4_ANY) 309 conn->ident.local.addr.ipv4 = sp->local.addr.ipv4; 310 311 tcp_conn_segment_arrived(conn, seg); 312 313 fibril_mutex_unlock(&conn->lock); 314 tcp_conn_delref(conn); 268 315 } 269 316 -
uspace/srv/net/tl/tcp/ucall.h
r65d7b0a r1440eae 48 48 extern void tcp_uc_abort(tcp_conn_t *); 49 49 extern void tcp_uc_status(tcp_conn_t *, tcp_conn_status_t *); 50 extern void tcp_uc_delete(tcp_conn_t *); 50 51 51 52 /* -
uspace/srv/vfs/vfs.c
r65d7b0a r1440eae 172 172 * Allocate and initialize the Path Lookup Buffer. 173 173 */ 174 plb = as_get_mappable_page(PLB_SIZE); 175 if (!plb) { 176 printf(NAME ": Cannot allocate a mappable piece of address space\n"); 177 return ENOMEM; 178 } 179 180 if (as_area_create(plb, PLB_SIZE, AS_AREA_READ | AS_AREA_WRITE | 181 AS_AREA_CACHEABLE) != plb) { 174 plb = as_area_create((void *) -1, PLB_SIZE, 175 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE); 176 if (plb == (void *) -1) { 182 177 printf(NAME ": Cannot create address space area\n"); 183 178 return ENOMEM;
Note:
See TracChangeset
for help on using the changeset viewer.