Changeset 1b20da0 in mainline for kernel/generic/include


Ignore:
Timestamp:
2018-02-28T17:52:03Z (7 years ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
3061bc1
Parents:
df6ded8
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:26:03)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:52:03)
Message:

style: Remove trailing whitespace on non-empty lines, in certain file types.

Command used: tools/srepl '\([^[:space:]]\)\s\+$' '\1' -- *.c *.h *.py *.sh *.s *.S *.ag

Location:
kernel/generic/include
Files:
22 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/adt/avl.h

    rdf6ded8 r1b20da0  
    6262struct avltree_node
    6363{
    64         /** 
     64        /**
    6565         * Pointer to the left descendant of this node.
    6666         *
     
    7070        struct avltree_node *lft;
    7171       
    72         /** 
     72        /**
    7373         * Pointer to the right descendant of this node.
    7474         *
     
    8282       
    8383        /** Node's key. */
    84         avltree_key_t key; 
     84        avltree_key_t key;
    8585       
    8686        /**
     
    9797        struct avltree_node *root;
    9898
    99         /** 
     99        /**
    100100         * Base of the tree is a value that is smaller or equal than every value
    101101         * in the tree (valid for positive keys otherwise ignore this atribute).
    102          * 
     102         *
    103103         * The base is added to the current key when a new node is inserted into
    104104         * the tree. The base is changed to the key of the node which is deleted
    105105         * with avltree_delete_min().
    106106         */
    107         avltree_key_t base; 
     107        avltree_key_t base;
    108108};
    109109
  • kernel/generic/include/adt/btree.h

    rdf6ded8 r1b20da0  
    5555        btree_key_t key[BTREE_MAX_KEYS + 1];
    5656
    57         /** 
     57        /**
    5858         * Pointers to values. Sorted according to the key array. Defined only in
    5959         * leaf-level. There is room for storing value for the extra key.
  • kernel/generic/include/adt/cht.h

    rdf6ded8 r1b20da0  
    4646/** Concurrent hash table node link. */
    4747typedef struct cht_link {
    48         /* Must be placed first. 
    49          * 
    50          * The function pointer (rcu_link.func) is used to store the item's 
    51          * mixed memoized hash. If in use by RCU (ie waiting for deferred 
    52          * destruction) the hash will contain the value of 
     48        /* Must be placed first.
     49         *
     50         * The function pointer (rcu_link.func) is used to store the item's
     51         * mixed memoized hash. If in use by RCU (ie waiting for deferred
     52         * destruction) the hash will contain the value of
    5353         * cht_t.op->remove_callback.
    5454         */
     
    6464typedef struct cht_ops {
    6565        /** Returns the hash of the item.
    66          * 
     66         *
    6767         * Applicable also to items that were logically deleted from the table
    6868         * but have yet to be physically removed by means of remove_callback().
     
    8080
    8181/** Groups hash table buckets with their count.
    82  * 
     82 *
    8383 * It allows both the number of buckets as well as the bucket array
    8484 * to be swapped atomically when resing the table.
     
    100100        /** Resized table buckets that will replace b once resize is complete. */
    101101        cht_buckets_t *new_b;
    102         /** Invalid memoized hash value. 
    103          * 
     102        /** Invalid memoized hash value.
     103         *
    104104         * If cht_link.hash contains this value the item had been logically
    105105         * removed and is waiting to be freed. Such hashes (and the associated
    106          * items) are disregarded and skipped or the actual hash must be 
     106         * items) are disregarded and skipped or the actual hash must be
    107107         * determined via op->hash().
    108108         */
     
    116116        work_t resize_work;
    117117        /** If positive the table should grow or shrink.
    118          * 
     118         *
    119119         * If not 0 resize work had already been posted to the system work queue.
    120120         */
     
    133133
    134134extern bool cht_create_simple(cht_t *h, cht_ops_t *op);
    135 extern bool cht_create(cht_t *h, size_t init_size, size_t min_size, 
     135extern bool cht_create(cht_t *h, size_t init_size, size_t min_size,
    136136        size_t max_load, bool can_block, cht_ops_t *op);
    137137extern void cht_destroy(cht_t *h);
  • kernel/generic/include/adt/fifo.h

    rdf6ded8 r1b20da0  
    7272 *
    7373 * FIFO is allocated dynamically.
    74  * This macro is suitable for creating larger FIFOs. 
     74 * This macro is suitable for creating larger FIFOs.
    7575 *
    7676 * @param name Name of FIFO.
  • kernel/generic/include/adt/hash.h

    rdf6ded8 r1b20da0  
    7575
    7676/** Produces a uniform hash affecting all output bits from the skewed input. */
    77 static inline size_t hash_mix(size_t hash) 
     77static inline size_t hash_mix(size_t hash)
    7878{
    7979#ifdef __32_BITS__
     
    8787
    8888/** Use to create a hash from multiple values.
    89  * 
     89 *
    9090 * Typical usage:
    9191 * @code
     
    101101static inline size_t hash_combine(size_t seed, size_t hash)
    102102{
    103         /* 
     103        /*
    104104         * todo: use Bob Jenkin's proper mixing hash pass:
    105105         * http://burtleburtle.net/bob/c/lookup3.c
  • kernel/generic/include/adt/hash_table.h

    rdf6ded8 r1b20da0  
    22 * Copyright (c) 2006 Jakub Jermar
    33 * Copyright (c) 2012 Adam Hraska
    4  * 
     4 *
    55 * All rights reserved.
    66 *
     
    6262
    6363        /** Hash table item removal callback.
    64          * 
     64         *
    6565         * Must not invoke any mutating functions of the hash table.
    6666         *
  • kernel/generic/include/adt/list.h

    rdf6ded8 r1b20da0  
    6666
    6767/** Initializer for statically allocated list.
    68  * 
     68 *
    6969 * @code
    7070 * struct named_list {
    7171 *     const char *name;
    7272 *     list_t list;
    73  * } var = { 
    74  *     .name = "default name", 
    75  *     .list = LIST_INITIALIZER(name_list.list) 
     73 * } var = {
     74 *     .name = "default name",
     75 *     .list = LIST_INITIALIZER(name_list.list)
    7676 * };
    7777 * @endcode
     
    111111 *     link_t item_link;
    112112 * } item_t;
    113  * 
     113 *
    114114 * //..
    115  * 
     115 *
    116116 * // Print each list element's value and remove the element from the list.
    117117 * list_foreach_safe(mylist, cur_link, next_link) {
     
    121121 * }
    122122 * @endcode
    123  * 
     123 *
    124124 * @param list List to traverse.
    125125 * @param iterator Iterator to the current element of the list.
  • kernel/generic/include/arch.h

    rdf6ded8 r1b20da0  
    7171#ifdef RCU_PREEMPT_A
    7272        size_t rcu_nesting;    /**< RCU nesting count and flag. */
    73 #endif 
     73#endif
    7474        struct thread *thread; /**< Current thread. */
    7575        struct task *task;     /**< Current task. */
  • kernel/generic/include/cpu/cpu_mask.h

    rdf6ded8 r1b20da0  
    3939#include <lib/memfnc.h>
    4040
    41 /** Iterates over all cpu id's whose bit is included in the cpu mask. 
    42  * 
     41/** Iterates over all cpu id's whose bit is included in the cpu mask.
     42 *
    4343 * Example usage:
    4444 * @code
    4545 * DEFINE_CPU_MASK(cpu_mask);
    4646 * cpu_mask_active(&cpu_mask);
    47  * 
     47 *
    4848 * cpu_mask_for_each(cpu_mask, cpu_id) {
    4949 *     printf("Cpu with logical id %u is active.\n", cpu_id);
     
    5353#define cpu_mask_for_each(mask, cpu_id) \
    5454        for (unsigned int (cpu_id) = 0; (cpu_id) < config.cpu_count; ++(cpu_id)) \
    55                 if (cpu_mask_is_set(&(mask), (cpu_id))) 
     55                if (cpu_mask_is_set(&(mask), (cpu_id)))
    5656
    5757/** Allocates a cpu_mask_t on stack. */
     
    7474extern bool cpu_mask_is_none(cpu_mask_t *);
    7575
    76 #endif /* KERN_CPU_CPU_MASK_H_ */ 
     76#endif /* KERN_CPU_CPU_MASK_H_ */
    7777
    7878/** @}
  • kernel/generic/include/ddi/irq.h

    rdf6ded8 r1b20da0  
    126126       
    127127        /** Notification configuration structure. */
    128         ipc_notif_cfg_t notif_cfg; 
     128        ipc_notif_cfg_t notif_cfg;
    129129} irq_t;
    130130
  • kernel/generic/include/fpu_context.h

    rdf6ded8 r1b20da0  
    11/*
    2  * Copyright (c) 2005 Jakub Vana 
     2 * Copyright (c) 2005 Jakub Vana
    33 * All rights reserved.
    44 *
     
    2727 */
    2828
    29 /** @addtogroup generic 
     29/** @addtogroup generic
    3030 * @{
    3131 */
  • kernel/generic/include/ipc/sysipc_ops.h

    rdf6ded8 r1b20da0  
    11/*
    2  * Copyright (c) 2012 Jakub Jermar 
     2 * Copyright (c) 2012 Jakub Jermar
    33 * All rights reserved.
    44 *
     
    8484        /**
    8585         * This callback is called from request_preprocess().
    86          * 
     86         *
    8787         * Context:             caller
    8888         * Caller alive:        guaranteed
  • kernel/generic/include/ipc/sysipc_priv.h

    rdf6ded8 r1b20da0  
    11/*
    2  * Copyright (c) 2012 Jakub Jermar 
     2 * Copyright (c) 2012 Jakub Jermar
    33 * All rights reserved.
    44 *
  • kernel/generic/include/mm/as.h

    rdf6ded8 r1b20da0  
    7272
    7373/** The page fault was resolved by as_page_fault(). */
    74 #define AS_PF_OK     0 
     74#define AS_PF_OK     0
    7575
    7676/** The page fault was caused by memcpy_from_uspace() or memcpy_to_uspace(). */
  • kernel/generic/include/mm/page.h

    rdf6ded8 r1b20da0  
    4242
    4343#define P2SZ(pages) \
    44         ((pages) << PAGE_WIDTH) 
     44        ((pages) << PAGE_WIDTH)
    4545
    4646/** Operations to manipulate page mappings. */
  • kernel/generic/include/mm/tlb.h

    rdf6ded8 r1b20da0  
    7373extern void tlb_shootdown_ipi_recv(void);
    7474#else
    75 #define tlb_shootdown_start(w, x, y, z) interrupts_disable()   
     75#define tlb_shootdown_start(w, x, y, z) interrupts_disable()
    7676#define tlb_shootdown_finalize(i)       (interrupts_restore(i));
    7777#define tlb_shootdown_ipi_recv()
  • kernel/generic/include/proc/thread.h

    rdf6ded8 r1b20da0  
    190190        struct work_queue *workq;
    191191        /** Links work queue threads. Protected by workq->lock. */
    192         link_t workq_link; 
     192        link_t workq_link;
    193193        /** True if the worker was blocked and is not running. Use thread->lock. */
    194194        bool workq_blocked;
  • kernel/generic/include/security/perm.h

    rdf6ded8 r1b20da0  
    4040 * holder to perform certain security sensitive tasks.
    4141 *
    42  * Each task can have arbitrary combination of the permissions 
     42 * Each task can have arbitrary combination of the permissions
    4343 * defined in this file. Therefore, they are required to be powers
    4444 * of two.
  • kernel/generic/include/synch/rcu.h

    rdf6ded8 r1b20da0  
    4141
    4242
    43 /** Use to assign a pointer to newly initialized data to a rcu reader 
     43/** Use to assign a pointer to newly initialized data to a rcu reader
    4444 * accessible pointer.
    45  * 
     45 *
    4646 * Example:
    4747 * @code
     
    5050 *     int grade;
    5151 * } exam_t;
    52  * 
     52 *
    5353 * exam_t *exam_list;
    5454 * // ..
    55  * 
     55 *
    5656 * // Insert at the beginning of the list.
    5757 * exam_t *my_exam = malloc(sizeof(exam_t), 0);
     
    5959 * my_exam->next = exam_list;
    6060 * rcu_assign(exam_list, my_exam);
    61  * 
     61 *
    6262 * // Changes properly propagate. Every reader either sees
    6363 * // the old version of exam_list or the new version with
     
    6565 * rcu_synchronize();
    6666 * // Now we can be sure every reader sees my_exam.
    67  * 
     67 *
    6868 * @endcode
    6969 */
     
    7575
    7676/** Use to access RCU protected data in a reader section.
    77  * 
     77 *
    7878 * Example:
    7979 * @code
    8080 * exam_t *exam_list;
    8181 * // ...
    82  * 
     82 *
    8383 * rcu_read_lock();
    8484 * exam_t *first_exam = rcu_access(exam_list);
    85  * // We can now safely use first_exam, it won't change 
     85 * // We can now safely use first_exam, it won't change
    8686 * // under us while we're using it.
    8787 *
     
    131131void _rcu_preempted_unlock(void);
    132132
    133 /** Delimits the start of an RCU reader critical section. 
    134  * 
     133/** Delimits the start of an RCU reader critical section.
     134 *
    135135 * Reader sections may be nested and are preemptible. You must not
    136136 * however block/sleep within reader sections.
     
    165165        assert(PREEMPTION_DISABLED || interrupts_disabled());
    166166       
    167         /* 
    168          * A new GP was started since the last time we passed a QS. 
     167        /*
     168         * A new GP was started since the last time we passed a QS.
    169169         * Notify the detector we have reached a new QS.
    170170         */
    171171        if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
    172172                rcu_gp_t cur_gp = ACCESS_ONCE(_rcu_cur_gp);
    173                 /* 
    174                  * Contain memory accesses within a reader critical section. 
     173                /*
     174                 * Contain memory accesses within a reader critical section.
    175175                 * If we are in rcu_lock() it also makes changes prior to the
    176176                 * start of the GP visible in the reader section.
     
    180180                 * Acknowledge we passed a QS since the beginning of rcu.cur_gp.
    181181                 * Cache coherency will lazily transport the value to the
    182                  * detector while it sleeps in gp_sleep(). 
    183                  * 
     182                 * detector while it sleeps in gp_sleep().
     183                 *
    184184                 * Note that there is a theoretical possibility that we
    185                  * overwrite a more recent/greater last_seen_gp here with 
     185                 * overwrite a more recent/greater last_seen_gp here with
    186186                 * an older/smaller value. If this cpu is interrupted here
    187                  * while in rcu_lock() reader sections in the interrupt handler 
    188                  * will update last_seen_gp to the same value as is currently 
    189                  * in local cur_gp. However, if the cpu continues processing 
    190                  * interrupts and the detector starts a new GP immediately, 
    191                  * local interrupt handlers may update last_seen_gp again (ie 
    192                  * properly ack the new GP) with a value greater than local cur_gp. 
    193                  * Resetting last_seen_gp to a previous value here is however 
    194                  * benign and we only have to remember that this reader may end up 
     187                 * while in rcu_lock() reader sections in the interrupt handler
     188                 * will update last_seen_gp to the same value as is currently
     189                 * in local cur_gp. However, if the cpu continues processing
     190                 * interrupts and the detector starts a new GP immediately,
     191                 * local interrupt handlers may update last_seen_gp again (ie
     192                 * properly ack the new GP) with a value greater than local cur_gp.
     193                 * Resetting last_seen_gp to a previous value here is however
     194                 * benign and we only have to remember that this reader may end up
    195195                 * in cur_preempted even after the GP ends. That is why we
    196                  * append next_preempted to cur_preempted rather than overwriting 
     196                 * append next_preempted to cur_preempted rather than overwriting
    197197                 * it as if cur_preempted were empty.
    198198                 */
     
    201201}
    202202
    203 /** Delimits the start of an RCU reader critical section. 
    204  * 
     203/** Delimits the start of an RCU reader critical section.
     204 *
    205205 * Reader sections may be nested and are preemptable. You must not
    206206 * however block/sleep within reader sections.
     
    229229                _rcu_record_qs();
    230230               
    231                 /* 
    232                  * The thread was preempted while in a critical section or 
    233                  * the detector is eagerly waiting for this cpu's reader to finish. 
     231                /*
     232                 * The thread was preempted while in a critical section or
     233                 * the detector is eagerly waiting for this cpu's reader to finish.
    234234                 */
    235235                if (CPU->rcu.signal_unlock) {
  • kernel/generic/include/synch/rcu_types.h

    rdf6ded8 r1b20da0  
    7272       
    7373        /** True if we should signal the detector that we exited a reader section.
    74          * 
     74         *
    7575         * Equal to (THREAD->rcu.was_preempted || CPU->rcu.is_delaying_gp).
    7676         */
    7777        bool signal_unlock;
    7878
    79         /** The number of times an RCU reader section is nested on this cpu. 
    80          * 
    81          * If positive, it is definitely executing reader code. If zero, 
     79        /** The number of times an RCU reader section is nested on this cpu.
     80         *
     81         * If positive, it is definitely executing reader code. If zero,
    8282         * the thread might already be executing reader code thanks to
    8383         * cpu instruction reordering.
     
    9292        /** Number of callbacks in cur_cbs. */
    9393        size_t cur_cbs_cnt;
    94         /** Callbacks to invoke once the next grace period ends, ie next_cbs_gp. 
     94        /** Callbacks to invoke once the next grace period ends, ie next_cbs_gp.
    9595         * Accessed by the local reclaimer only.
    9696         */
     
    102102        /** Tail of arriving_cbs list. Disable interrupts to access. */
    103103        rcu_item_t **parriving_cbs_tail;
    104         /** Number of callbacks currently in arriving_cbs. 
     104        /** Number of callbacks currently in arriving_cbs.
    105105         * Disable interrupts to access.
    106106         */
     
    110110        rcu_gp_t cur_cbs_gp;
    111111        /** At the end of this grace period callbacks in next_cbs will be invoked.
    112          * 
    113          * Should be the next grace period but it allows the reclaimer to 
     112         *
     113         * Should be the next grace period but it allows the reclaimer to
    114114         * notice if it missed a grace period end announcement. In that
    115115         * case it can execute next_cbs without waiting for another GP.
    116          * 
     116         *
    117117         * Invariant: next_cbs_gp >= cur_cbs_gp
    118118         */
     
    143143/** RCU related per-thread data. */
    144144typedef struct rcu_thread_data {
    145         /** 
    146          * Nesting count of the thread's RCU read sections when the thread 
     145        /**
     146         * Nesting count of the thread's RCU read sections when the thread
    147147         * is not running.
    148148         */
     
    151151#ifdef RCU_PREEMPT_PODZIMEK
    152152       
    153         /** True if the thread was preempted in a reader section. 
     153        /** True if the thread was preempted in a reader section.
    154154         *
    155155         * The thread is placed into rcu.cur_preempted or rcu.next_preempted
    156          * and must remove itself in rcu_read_unlock(). 
    157          * 
     156         * and must remove itself in rcu_read_unlock().
     157         *
    158158         * Access with interrupts disabled.
    159159         */
  • kernel/generic/include/synch/workqueue.h

    rdf6ded8 r1b20da0  
    5353        /* Magic number for integrity checks. */
    5454        uint32_t cookie;
    55 #endif 
     55#endif
    5656} work_t;
    5757
  • kernel/generic/include/userspace.h

    rdf6ded8 r1b20da0  
    4040
    4141/** Switch to user-space (CPU user priviledge level) */
    42 extern void userspace(uspace_arg_t *uarg) __attribute__ ((noreturn)); 
     42extern void userspace(uspace_arg_t *uarg) __attribute__ ((noreturn));
    4343
    4444#endif
Note: See TracChangeset for help on using the changeset viewer.