Changeset 63e27ef in mainline for kernel/generic/src/synch
- Timestamp:
- 2017-06-19T21:47:42Z (8 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- deacc58d
- Parents:
- 7354b5e
- Location:
- kernel/generic/src/synch
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/futex.c
r7354b5e r63e27ef 61 61 */ 62 62 63 #include <assert.h> 63 64 #include <synch/futex.h> 64 65 #include <synch/mutex.h> … … 241 242 static void futex_add_ref(futex_t *futex) 242 243 { 243 ASSERT(spinlock_locked(&futex_ht_lock));244 ASSERT(0 < futex->refcount);244 assert(spinlock_locked(&futex_ht_lock)); 245 assert(0 < futex->refcount); 245 246 ++futex->refcount; 246 247 } … … 249 250 static void futex_release_ref(futex_t *futex) 250 251 { 251 ASSERT(spinlock_locked(&futex_ht_lock));252 ASSERT(0 < futex->refcount);252 assert(spinlock_locked(&futex_ht_lock)); 253 assert(0 < futex->refcount); 253 254 254 255 --futex->refcount; … … 459 460 futex_t *futex; 460 461 461 ASSERT(keys == 1);462 assert(keys == 1); 462 463 463 464 futex = hash_table_get_instance(item, futex_t, ht_link); -
kernel/generic/src/synch/mutex.c
r7354b5e r63e27ef 36 36 */ 37 37 38 #include <assert.h> 38 39 #include <synch/mutex.h> 39 40 #include <synch/semaphore.h> 40 #include <debug.h>41 41 #include <arch.h> 42 42 #include <stacktrace.h> … … 88 88 rc = _semaphore_down_timeout(&mtx->sem, usec, flags); 89 89 } else { 90 ASSERT((mtx->type == MUTEX_ACTIVE) || (!THREAD));91 ASSERT(usec == SYNCH_NO_TIMEOUT);92 ASSERT(!(flags & SYNCH_FLAGS_INTERRUPTIBLE));90 assert((mtx->type == MUTEX_ACTIVE) || (!THREAD)); 91 assert(usec == SYNCH_NO_TIMEOUT); 92 assert(!(flags & SYNCH_FLAGS_INTERRUPTIBLE)); 93 93 94 94 unsigned int cnt = 0; -
kernel/generic/src/synch/rcu.c
r7354b5e r63e27ef 123 123 * 124 124 */ 125 125 126 #include <assert.h> 126 127 #include <synch/rcu.h> 127 128 #include <synch/condvar.h> … … 404 405 /* Stop and wait for reclaimers. */ 405 406 for (unsigned int cpu_id = 0; cpu_id < config.cpu_active; ++cpu_id) { 406 ASSERT(cpus[cpu_id].rcu.reclaimer_thr != NULL);407 assert(cpus[cpu_id].rcu.reclaimer_thr != NULL); 407 408 408 409 if (cpus[cpu_id].rcu.reclaimer_thr) { … … 487 488 static void read_unlock_impl(size_t *pnesting_cnt) 488 489 { 489 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());490 assert(PREEMPTION_DISABLED || interrupts_disabled()); 490 491 491 492 if (0 == --(*pnesting_cnt)) { … … 509 510 void _rcu_signal_read_unlock(void) 510 511 { 511 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());512 assert(PREEMPTION_DISABLED || interrupts_disabled()); 512 513 513 514 /* … … 531 532 */ 532 533 if (THREAD && local_atomic_exchange(&THREAD->rcu.was_preempted, false)) { 533 ASSERT(link_used(&THREAD->rcu.preempt_link));534 assert(link_used(&THREAD->rcu.preempt_link)); 534 535 535 536 rm_preempted_reader(); … … 563 564 { 564 565 /* Calling from a reader section will deadlock. */ 565 ASSERT(!rcu_read_locked());566 assert(!rcu_read_locked()); 566 567 567 568 synch_item_t completion; … … 576 577 { 577 578 synch_item_t *completion = member_to_inst(rcu_item, synch_item_t, rcu_item); 578 ASSERT(completion);579 assert(completion); 579 580 waitq_wakeup(&completion->wq, WAKEUP_FIRST); 580 581 } … … 615 616 static void add_barrier_cb(void *arg) 616 617 { 617 ASSERT(interrupts_disabled() || PREEMPTION_DISABLED);618 assert(interrupts_disabled() || PREEMPTION_DISABLED); 618 619 atomic_inc(&rcu.barrier_wait_cnt); 619 620 rcu_call(&CPU->rcu.barrier_item, barrier_complete); … … 657 658 rcu_func_t func) 658 659 { 659 ASSERT(rcu_item);660 assert(rcu_item); 660 661 661 662 rcu_item->func = func; … … 689 690 static bool cur_cbs_empty(void) 690 691 { 691 ASSERT(THREAD && THREAD->wired);692 assert(THREAD && THREAD->wired); 692 693 return NULL == CPU->rcu.cur_cbs; 693 694 } … … 695 696 static bool next_cbs_empty(void) 696 697 { 697 ASSERT(THREAD && THREAD->wired);698 assert(THREAD && THREAD->wired); 698 699 return NULL == CPU->rcu.next_cbs; 699 700 } … … 702 703 static bool arriving_cbs_empty(void) 703 704 { 704 ASSERT(THREAD && THREAD->wired);705 assert(THREAD && THREAD->wired); 705 706 /* 706 707 * Accessing with interrupts enabled may at worst lead to … … 719 720 static void reclaimer(void *arg) 720 721 { 721 ASSERT(THREAD && THREAD->wired);722 ASSERT(THREAD == CPU->rcu.reclaimer_thr);722 assert(THREAD && THREAD->wired); 723 assert(THREAD == CPU->rcu.reclaimer_thr); 723 724 724 725 rcu_gp_t last_compl_gp = 0; … … 726 727 727 728 while (ok && wait_for_pending_cbs()) { 728 ASSERT(CPU->rcu.reclaimer_thr == THREAD);729 assert(CPU->rcu.reclaimer_thr == THREAD); 729 730 730 731 exec_completed_cbs(last_compl_gp); … … 765 766 /* Both next_cbs and cur_cbs GP elapsed. */ 766 767 if (CPU->rcu.next_cbs_gp <= last_completed_gp) { 767 ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);768 assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp); 768 769 769 770 size_t exec_cnt = CPU->rcu.cur_cbs_cnt + CPU->rcu.next_cbs_cnt; … … 864 865 */ 865 866 if (CPU->rcu.next_cbs) { 866 ASSERT(CPU->rcu.parriving_cbs_tail != &CPU->rcu.arriving_cbs);867 assert(CPU->rcu.parriving_cbs_tail != &CPU->rcu.arriving_cbs); 867 868 868 869 CPU->rcu.arriving_cbs = NULL; … … 913 914 } 914 915 915 ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);916 assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp); 916 917 917 918 return expedite; … … 933 934 spinlock_lock(&rcu.gp_lock); 934 935 935 ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);936 ASSERT(CPU->rcu.cur_cbs_gp <= _rcu_cur_gp + 1);936 assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp); 937 assert(CPU->rcu.cur_cbs_gp <= _rcu_cur_gp + 1); 937 938 938 939 while (rcu.completed_gp < CPU->rcu.cur_cbs_gp) { … … 1029 1030 static void sample_local_cpu(void *arg) 1030 1031 { 1031 ASSERT(interrupts_disabled());1032 assert(interrupts_disabled()); 1032 1033 cpu_mask_t *reader_cpus = (cpu_mask_t *)arg; 1033 1034 … … 1054 1055 void rcu_after_thread_ran(void) 1055 1056 { 1056 ASSERT(interrupts_disabled());1057 assert(interrupts_disabled()); 1057 1058 1058 1059 /* … … 1116 1117 void rcu_before_thread_runs(void) 1117 1118 { 1118 ASSERT(!rcu_read_locked());1119 assert(!rcu_read_locked()); 1119 1120 1120 1121 /* Load the thread's saved nesting count from before it was preempted. */ … … 1129 1130 void rcu_thread_exiting(void) 1130 1131 { 1131 ASSERT(THE->rcu_nesting == 0);1132 assert(THE->rcu_nesting == 0); 1132 1133 1133 1134 /* … … 1157 1158 void _rcu_preempted_unlock(void) 1158 1159 { 1159 ASSERT(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting);1160 assert(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting); 1160 1161 1161 1162 size_t prev = local_atomic_exchange(&THE->rcu_nesting, 0); … … 1220 1221 } 1221 1222 1222 ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);1223 ASSERT(_rcu_cur_gp <= CPU->rcu.cur_cbs_gp);1223 assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp); 1224 assert(_rcu_cur_gp <= CPU->rcu.cur_cbs_gp); 1224 1225 1225 1226 /* … … 1262 1263 static bool cv_wait_for_gp(rcu_gp_t wait_on_gp) 1263 1264 { 1264 ASSERT(spinlock_locked(&rcu.gp_lock));1265 assert(spinlock_locked(&rcu.gp_lock)); 1265 1266 1266 1267 bool interrupted = false; … … 1284 1285 1285 1286 if (detector_idle) { 1286 ASSERT(_rcu_cur_gp == rcu.completed_gp);1287 assert(_rcu_cur_gp == rcu.completed_gp); 1287 1288 condvar_signal(&rcu.req_gp_changed); 1288 1289 } … … 1323 1324 static bool wait_for_detect_req(void) 1324 1325 { 1325 ASSERT(spinlock_locked(&rcu.gp_lock));1326 assert(spinlock_locked(&rcu.gp_lock)); 1326 1327 1327 1328 bool interrupted = false; … … 1340 1341 static void end_cur_gp(void) 1341 1342 { 1342 ASSERT(spinlock_locked(&rcu.gp_lock));1343 assert(spinlock_locked(&rcu.gp_lock)); 1343 1344 1344 1345 rcu.completed_gp = _rcu_cur_gp; … … 1423 1424 static void sample_local_cpu(void *arg) 1424 1425 { 1425 ASSERT(interrupts_disabled());1426 ASSERT(!CPU->rcu.is_delaying_gp);1426 assert(interrupts_disabled()); 1427 assert(!CPU->rcu.is_delaying_gp); 1427 1428 1428 1429 /* Cpu did not pass a quiescent state yet. */ … … 1430 1431 /* Interrupted a reader in a reader critical section. */ 1431 1432 if (0 < CPU->rcu.nesting_cnt) { 1432 ASSERT(!CPU->idle);1433 assert(!CPU->idle); 1433 1434 /* 1434 1435 * Note to notify the detector from rcu_read_unlock(). … … 1492 1493 void rcu_after_thread_ran(void) 1493 1494 { 1494 ASSERT(interrupts_disabled());1495 assert(interrupts_disabled()); 1495 1496 1496 1497 /* … … 1559 1560 void rcu_before_thread_runs(void) 1560 1561 { 1561 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());1562 ASSERT(0 == CPU->rcu.nesting_cnt);1562 assert(PREEMPTION_DISABLED || interrupts_disabled()); 1563 assert(0 == CPU->rcu.nesting_cnt); 1563 1564 1564 1565 /* Load the thread's saved nesting count from before it was preempted. */ … … 1590 1591 void rcu_thread_exiting(void) 1591 1592 { 1592 ASSERT(THREAD != NULL);1593 ASSERT(THREAD->state == Exiting);1594 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());1593 assert(THREAD != NULL); 1594 assert(THREAD->state == Exiting); 1595 assert(PREEMPTION_DISABLED || interrupts_disabled()); 1595 1596 1596 1597 /* … … 1615 1616 static void start_new_gp(void) 1616 1617 { 1617 ASSERT(spinlock_locked(&rcu.gp_lock));1618 assert(spinlock_locked(&rcu.gp_lock)); 1618 1619 1619 1620 irq_spinlock_lock(&rcu.preempt_lock, true); … … 1734 1735 static void upd_missed_gp_in_wait(rcu_gp_t completed_gp) 1735 1736 { 1736 ASSERT(CPU->rcu.cur_cbs_gp <= completed_gp);1737 assert(CPU->rcu.cur_cbs_gp <= completed_gp); 1737 1738 1738 1739 size_t delta = (size_t)(completed_gp - CPU->rcu.cur_cbs_gp); … … 1764 1765 irq_spinlock_lock(&rcu.preempt_lock, true); 1765 1766 1766 ASSERT(link_used(&THREAD->rcu.preempt_link));1767 assert(link_used(&THREAD->rcu.preempt_link)); 1767 1768 1768 1769 bool prev_empty = list_empty(&rcu.cur_preempted); -
kernel/generic/src/synch/waitq.c
r7354b5e r63e27ef 44 44 */ 45 45 46 #include <assert.h> 46 47 #include <synch/waitq.h> 47 48 #include <synch/spinlock.h> … … 203 204 irq_spinlock_lock(&thread->lock, false); 204 205 205 ASSERT(thread->sleep_interruptible);206 assert(thread->sleep_interruptible); 206 207 207 208 if ((thread->timeout_pending) && … … 264 265 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags) 265 266 { 266 ASSERT((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));267 assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec))); 267 268 268 269 ipl_t ipl = waitq_sleep_prepare(wq); … … 496 497 static void waitq_complete_wakeup(waitq_t *wq) 497 498 { 498 ASSERT(interrupts_disabled());499 assert(interrupts_disabled()); 499 500 500 501 irq_spinlock_lock(&wq->lock, false); … … 520 521 size_t count = 0; 521 522 522 ASSERT(interrupts_disabled());523 ASSERT(irq_spinlock_locked(&wq->lock));523 assert(interrupts_disabled()); 524 assert(irq_spinlock_locked(&wq->lock)); 524 525 525 526 loop: -
kernel/generic/src/synch/workqueue.c
r7354b5e r63e27ef 37 37 */ 38 38 39 #include <assert.h> 39 40 #include <synch/workqueue.h> 40 41 #include <synch/spinlock.h> … … 189 190 if (workq) { 190 191 if (workq_init(workq, name)) { 191 ASSERT(!workq_corrupted(workq));192 assert(!workq_corrupted(workq)); 192 193 return workq; 193 194 } … … 202 203 void workq_destroy(struct work_queue *workq) 203 204 { 204 ASSERT(!workq_corrupted(workq));205 assert(!workq_corrupted(workq)); 205 206 206 207 irq_spinlock_lock(&workq->lock, true); … … 214 215 workq_stop(workq); 215 216 } else { 216 ASSERT(0 == running_workers);217 assert(0 == running_workers); 217 218 } 218 219 … … 264 265 static bool add_worker(struct work_queue *workq) 265 266 { 266 ASSERT(!workq_corrupted(workq));267 assert(!workq_corrupted(workq)); 267 268 268 269 thread_t *thread = thread_create(worker_thread, workq, TASK, … … 273 274 274 275 /* cur_worker_cnt proactively increased in signal_worker_logic() .*/ 275 ASSERT(0 < workq->cur_worker_cnt);276 assert(0 < workq->cur_worker_cnt); 276 277 --workq->cur_worker_cnt; 277 278 … … 312 313 313 314 /* cur_worker_cnt proactively increased in signal_worker() .*/ 314 ASSERT(0 < workq->cur_worker_cnt);315 assert(0 < workq->cur_worker_cnt); 315 316 --workq->cur_worker_cnt; 316 317 } … … 334 335 void workq_stop(struct work_queue *workq) 335 336 { 336 ASSERT(!workq_corrupted(workq));337 assert(!workq_corrupted(workq)); 337 338 338 339 interrupt_workers(workq); … … 346 347 347 348 /* workq_stop() may only be called once. */ 348 ASSERT(!workq->stopping);349 assert(!workq->stopping); 349 350 workq->stopping = true; 350 351 … … 358 359 static void wait_for_workers(struct work_queue *workq) 359 360 { 360 ASSERT(!PREEMPTION_DISABLED);361 assert(!PREEMPTION_DISABLED); 361 362 362 363 irq_spinlock_lock(&workq->lock, true); … … 375 376 } 376 377 377 ASSERT(list_empty(&workq->workers));378 assert(list_empty(&workq->workers)); 378 379 379 380 /* Wait for deferred add_worker_op(), signal_worker_op() to finish. */ … … 473 474 work_func_t func, bool can_block) 474 475 { 475 ASSERT(!workq_corrupted(workq));476 assert(!workq_corrupted(workq)); 476 477 477 478 bool success = true; … … 521 522 static size_t active_workers_now(struct work_queue *workq) 522 523 { 523 ASSERT(irq_spinlock_locked(&workq->lock));524 assert(irq_spinlock_locked(&workq->lock)); 524 525 525 526 /* Workers blocked are sleeping in the work function (ie not idle). */ 526 ASSERT(workq->blocked_worker_cnt <= workq->cur_worker_cnt);527 assert(workq->blocked_worker_cnt <= workq->cur_worker_cnt); 527 528 /* Idle workers are waiting for more work to arrive in condvar_wait. */ 528 ASSERT(workq->idle_worker_cnt <= workq->cur_worker_cnt);529 assert(workq->idle_worker_cnt <= workq->cur_worker_cnt); 529 530 530 531 /* Idle + blocked workers == sleeping worker threads. */ 531 532 size_t sleeping_workers = workq->blocked_worker_cnt + workq->idle_worker_cnt; 532 533 533 ASSERT(sleeping_workers <= workq->cur_worker_cnt);534 assert(sleeping_workers <= workq->cur_worker_cnt); 534 535 /* Workers pending activation are idle workers not yet given a time slice. */ 535 ASSERT(workq->activate_pending <= workq->idle_worker_cnt);536 assert(workq->activate_pending <= workq->idle_worker_cnt); 536 537 537 538 /* … … 550 551 static size_t active_workers(struct work_queue *workq) 551 552 { 552 ASSERT(irq_spinlock_locked(&workq->lock));553 assert(irq_spinlock_locked(&workq->lock)); 553 554 554 555 /* … … 573 574 static void signal_worker_op(struct work_queue *workq) 574 575 { 575 ASSERT(!workq_corrupted(workq));576 assert(!workq_corrupted(workq)); 576 577 577 578 condvar_signal(&workq->activate_worker); 578 579 579 580 irq_spinlock_lock(&workq->lock, true); 580 ASSERT(0 < workq->pending_op_cnt);581 assert(0 < workq->pending_op_cnt); 581 582 --workq->pending_op_cnt; 582 583 irq_spinlock_unlock(&workq->lock, true); … … 593 594 static signal_op_t signal_worker_logic(struct work_queue *workq, bool can_block) 594 595 { 595 ASSERT(!workq_corrupted(workq));596 ASSERT(irq_spinlock_locked(&workq->lock));596 assert(!workq_corrupted(workq)); 597 assert(irq_spinlock_locked(&workq->lock)); 597 598 598 599 /* Only signal workers if really necessary. */ … … 645 646 */ 646 647 if (need_worker && !can_block && 0 == active) { 647 ASSERT(0 == workq->idle_worker_cnt);648 assert(0 == workq->idle_worker_cnt); 648 649 649 650 irq_spinlock_lock(&nonblock_adder.lock, true); … … 681 682 } 682 683 683 ASSERT(arg != NULL);684 assert(arg != NULL); 684 685 685 686 struct work_queue *workq = arg; … … 697 698 static bool dequeue_work(struct work_queue *workq, work_t **pwork_item) 698 699 { 699 ASSERT(!workq_corrupted(workq));700 assert(!workq_corrupted(workq)); 700 701 701 702 irq_spinlock_lock(&workq->lock, true); … … 704 705 if (!workq->stopping && worker_unnecessary(workq)) { 705 706 /* There are too many workers for this load. Exit. */ 706 ASSERT(0 < workq->cur_worker_cnt);707 assert(0 < workq->cur_worker_cnt); 707 708 --workq->cur_worker_cnt; 708 709 list_remove(&THREAD->workq_link); … … 729 730 730 731 #ifdef CONFIG_DEBUG 731 ASSERT(!work_item_corrupted(*pwork_item));732 assert(!work_item_corrupted(*pwork_item)); 732 733 (*pwork_item)->cookie = 0; 733 734 #endif … … 738 739 } else { 739 740 /* Requested to stop and no more work queued. */ 740 ASSERT(workq->stopping);741 assert(workq->stopping); 741 742 --workq->cur_worker_cnt; 742 743 stop = true; … … 751 752 static bool worker_unnecessary(struct work_queue *workq) 752 753 { 753 ASSERT(irq_spinlock_locked(&workq->lock));754 assert(irq_spinlock_locked(&workq->lock)); 754 755 755 756 /* No work is pending. We don't need too many idle threads. */ … … 775 776 776 777 /* Ignore lock ordering just here. */ 777 ASSERT(irq_spinlock_locked(&workq->lock));778 assert(irq_spinlock_locked(&workq->lock)); 778 779 779 780 _condvar_wait_timeout_irq_spinlock(&workq->activate_worker, 780 781 &workq->lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 781 782 782 ASSERT(!workq_corrupted(workq));783 ASSERT(irq_spinlock_locked(&workq->lock));783 assert(!workq_corrupted(workq)); 784 assert(irq_spinlock_locked(&workq->lock)); 784 785 785 786 THREAD->workq_idling = false; … … 791 792 void workq_before_thread_is_ready(thread_t *thread) 792 793 { 793 ASSERT(thread);794 ASSERT(irq_spinlock_locked(&thread->lock));794 assert(thread); 795 assert(irq_spinlock_locked(&thread->lock)); 795 796 796 797 /* Worker's work func() is about to wake up from sleeping. */ 797 798 if (thread->workq && thread->workq_blocked) { 798 799 /* Must be blocked in user work func() and not be waiting for work. */ 799 ASSERT(!thread->workq_idling);800 ASSERT(thread->state == Sleeping);801 ASSERT(THREAD != thread);802 ASSERT(!workq_corrupted(thread->workq));800 assert(!thread->workq_idling); 801 assert(thread->state == Sleeping); 802 assert(THREAD != thread); 803 assert(!workq_corrupted(thread->workq)); 803 804 804 805 /* Protected by thread->lock */ … … 814 815 void workq_after_thread_ran(void) 815 816 { 816 ASSERT(THREAD);817 ASSERT(irq_spinlock_locked(&THREAD->lock));817 assert(THREAD); 818 assert(irq_spinlock_locked(&THREAD->lock)); 818 819 819 820 /* Worker's work func() is about to sleep/block. */ 820 821 if (THREAD->workq && THREAD->state == Sleeping && !THREAD->workq_idling) { 821 ASSERT(!THREAD->workq_blocked);822 ASSERT(!workq_corrupted(THREAD->workq));822 assert(!THREAD->workq_blocked); 823 assert(!workq_corrupted(THREAD->workq)); 823 824 824 825 THREAD->workq_blocked = true; … … 834 835 835 836 if (op) { 836 ASSERT(add_worker_noblock_op == op || signal_worker_op == op);837 assert(add_worker_noblock_op == op || signal_worker_op == op); 837 838 op(THREAD->workq); 838 839 } … … 903 904 struct work_queue, nb_link); 904 905 905 ASSERT(!workq_corrupted(*pworkq));906 assert(!workq_corrupted(*pworkq)); 906 907 907 908 list_remove(&(*pworkq)->nb_link);
Note:
See TracChangeset
for help on using the changeset viewer.