Changes in / [b8d6783:f76696f] in mainline
- Location:
- uspace
- Files:
-
- 2 added
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/app/tester/Makefile
rb8d6783 rf76696f 50 50 vfs/vfs1.c \ 51 51 ipc/ping_pong.c \ 52 ipc/starve.c \ 52 53 loop/loop1.c \ 53 54 mm/common.c \ -
uspace/app/tester/tester.c
rb8d6783 rf76696f 60 60 #include "vfs/vfs1.def" 61 61 #include "ipc/ping_pong.def" 62 #include "ipc/starve.def" 62 63 #include "loop/loop1.def" 63 64 #include "mm/malloc1.def" -
uspace/app/tester/tester.h
rb8d6783 rf76696f 93 93 extern const char *test_vfs1(void); 94 94 extern const char *test_ping_pong(void); 95 extern const char *test_starve_ipc(void); 95 96 extern const char *test_loop1(void); 96 97 extern const char *test_malloc1(void); -
uspace/lib/c/generic/async.c
rb8d6783 rf76696f 249 249 static void to_event_initialize(to_event_t *to) 250 250 { 251 struct timeval tv = { 0 };251 struct timeval tv = { 0, 0 }; 252 252 253 253 to->inlist = false; … … 1017 1017 1018 1018 suseconds_t timeout; 1019 unsigned int flags = SYNCH_FLAGS_NONE; 1019 1020 if (!list_empty(&timeout_list)) { 1020 1021 awaiter_t *waiter = list_get_instance( … … 1027 1028 futex_up(&async_futex); 1028 1029 handle_expired_timeouts(); 1029 continue; 1030 } else 1030 /* 1031 * Notice that even if the event(s) already 1032 * expired (and thus the other fibril was 1033 * supposed to be running already), 1034 * we check for incoming IPC. 1035 * 1036 * Otherwise, a fibril that continuously 1037 * creates (almost) expired events could 1038 * prevent IPC retrieval from the kernel. 1039 */ 1040 timeout = 0; 1041 flags = SYNCH_FLAGS_NON_BLOCKING; 1042 1043 } else { 1031 1044 timeout = tv_sub(&waiter->to_event.expires, &tv); 1032 } else 1045 futex_up(&async_futex); 1046 } 1047 } else { 1048 futex_up(&async_futex); 1033 1049 timeout = SYNCH_NO_TIMEOUT; 1034 1035 futex_up(&async_futex); 1050 } 1036 1051 1037 1052 atomic_inc(&threads_in_ipc_wait); 1038 1053 1039 1054 ipc_call_t call; 1040 ipc_callid_t callid = ipc_wait_cycle(&call, timeout, 1041 SYNCH_FLAGS_NONE); 1055 ipc_callid_t callid = ipc_wait_cycle(&call, timeout, flags); 1042 1056 1043 1057 atomic_dec(&threads_in_ipc_wait); … … 1298 1312 1299 1313 amsg_t *msg = (amsg_t *) amsgid; 1300 1301 /* TODO: Let it go through the event read at least once */ 1302 if (timeout < 0) 1303 return ETIMEOUT; 1304 1314 1305 1315 futex_down(&async_futex); 1306 1316 … … 1313 1323 } 1314 1324 1325 /* 1326 * Negative timeout is converted to zero timeout to avoid 1327 * using tv_add with negative augmenter. 1328 */ 1329 if (timeout < 0) 1330 timeout = 0; 1331 1315 1332 gettimeofday(&msg->wdata.to_event.expires, NULL); 1316 1333 tv_add(&msg->wdata.to_event.expires, timeout); 1317 1334 1335 /* 1336 * Current fibril is inserted as waiting regardless of the 1337 * "size" of the timeout. 1338 * 1339 * Checking for msg->done and immediately bailing out when 1340 * timeout == 0 would mean that the manager fibril would never 1341 * run (consider single threaded program). 1342 * Thus the IPC answer would be never retrieved from the kernel. 1343 * 1344 * Notice that the actual delay would be very small because we 1345 * - switch to manager fibril 1346 * - the manager sees expired timeout 1347 * - and thus adds us back to ready queue 1348 * - manager switches back to some ready fibril 1349 * (prior it, it checks for incoming IPC). 1350 * 1351 */ 1318 1352 msg->wdata.fid = fibril_get_id(); 1319 1353 msg->wdata.active = false;
Note: See TracChangeset
for help on using the changeset viewer.