Changeset a35b458 in mainline for kernel/arch/amd64/include


Ignore:
Timestamp:
2018-03-02T20:10:49Z (8 years ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
f1380b7
Parents:
3061bc1
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:38:31)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2018-03-02 20:10:49)
Message:

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

Location:
kernel/arch/amd64/include/arch
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/amd64/include/arch/asm.h

    r3061bc1 ra35b458  
    5353{
    5454        uintptr_t v;
    55        
     55
    5656        asm volatile (
    5757                "andq %%rsp, %[v]\n"
     
    5959                : "0" (~((uint64_t) STACK_SIZE - 1))
    6060        );
    61        
     61
    6262        return v;
    6363}
     
    9191        if (port < (ioport8_t *) IO_SPACE_BOUNDARY) {
    9292                uint8_t val;
    93                
     93
    9494                asm volatile (
    9595                        "inb %w[port], %b[val]\n"
     
    9797                        : [port] "d" (port)
    9898                );
    99                
     99
    100100                return val;
    101101        } else
     
    115115        if (port < (ioport16_t *) IO_SPACE_BOUNDARY) {
    116116                uint16_t val;
    117                
     117
    118118                asm volatile (
    119119                        "inw %w[port], %w[val]\n"
     
    121121                        : [port] "d" (port)
    122122                );
    123                
     123
    124124                return val;
    125125        } else
     
    139139        if (port < (ioport32_t *) IO_SPACE_BOUNDARY) {
    140140                uint32_t val;
    141                
     141
    142142                asm volatile (
    143143                        "inl %w[port], %[val]\n"
     
    145145                        : [port] "d" (port)
    146146                );
    147                
     147
    148148                return val;
    149149        } else
     
    252252{
    253253        ipl_t ipl = interrupts_read();
    254        
     254
    255255        asm volatile ("sti\n");
    256        
     256
    257257        return ipl;
    258258}
     
    268268{
    269269        ipl_t ipl = interrupts_read();
    270        
     270
    271271        asm volatile ("cli\n");
    272        
     272
    273273        return ipl;
    274274}
     
    310310{
    311311        uint32_t ax, dx;
    312        
     312
    313313        asm volatile (
    314314                "rdmsr\n"
     
    316316                : "c" (msr)
    317317        );
    318        
     318
    319319        return ((uint64_t) dx << 32) | ax;
    320320}
  • kernel/arch/amd64/include/arch/atomic.h

    r3061bc1 ra35b458  
    7474{
    7575        atomic_count_t r = 1;
    76        
     76
    7777        asm volatile (
    7878                "lock xaddq %[r], %[count]\n"
     
    8080                  [r] "+r" (r)
    8181        );
    82        
     82
    8383        return r;
    8484}
     
    8787{
    8888        atomic_count_t r = -1;
    89        
     89
    9090        asm volatile (
    9191                "lock xaddq %[r], %[count]\n"
     
    9393                  [r] "+r" (r)
    9494        );
    95        
     95
    9696        return r;
    9797}
     
    103103{
    104104        atomic_count_t v = 1;
    105        
     105
    106106        asm volatile (
    107107                "xchgq %[v], %[count]\n"
     
    109109                  [count] "+m" (val->count)
    110110        );
    111        
     111
    112112        return v;
    113113}
     
    117117{
    118118        atomic_count_t tmp;
    119        
     119
    120120        preemption_disable();
    121121        asm volatile (
     
    125125                "       testq %[tmp], %[tmp]\n"
    126126                "       jnz 0b\n"       /* lightweight looping on locked spinlock */
    127                
     127
    128128                "       incq %[tmp]\n"  /* now use the atomic operation */
    129129                "       xchgq %[count], %[tmp]\n"
     
    133133                  [tmp] "=&r" (tmp)
    134134        );
    135        
     135
    136136        /*
    137137         * Prevent critical section code from bleeding out this way up.
  • kernel/arch/amd64/include/arch/cpu.h

    r3061bc1 ra35b458  
    8989        int stepping;
    9090        tss_t *tss;
    91        
     91
    9292        unsigned int id; /** CPU's local, ie physical, APIC ID. */
    93        
     93
    9494        size_t iomapver_copy;  /** Copy of TASK's I/O Permission bitmap generation count. */
    9595} cpu_arch_t;
  • kernel/arch/amd64/include/arch/cycle.h

    r3061bc1 ra35b458  
    4242        uint32_t lower;
    4343        uint32_t upper;
    44        
     44
    4545        asm volatile (
    4646                "rdtsc\n"
     
    4848                  "=d" (upper)
    4949        );
    50        
     50
    5151        return ((uint64_t) lower) | (((uint64_t) upper) << 32);
    5252}
  • kernel/arch/amd64/include/arch/mm/page.h

    r3061bc1 ra35b458  
    210210{
    211211        pte_t *p = &pt[i];
    212        
     212
    213213        return ((!p->page_cache_disable) << PAGE_CACHEABLE_SHIFT |
    214214            (!p->present) << PAGE_PRESENT_SHIFT |
     
    223223{
    224224        pte_t *p = &pt[i];
    225        
     225
    226226        p->addr_12_31 = (a >> 12) & UINT32_C(0xfffff);
    227227        p->addr_32_51 = a >> 32;
     
    231231{
    232232        pte_t *p = &pt[i];
    233        
     233
    234234        p->page_cache_disable = !(flags & PAGE_CACHEABLE);
    235235        p->present = !(flags & PAGE_NOT_PRESENT);
     
    238238        p->no_execute = (flags & PAGE_EXEC) == 0;
    239239        p->global = (flags & PAGE_GLOBAL) != 0;
    240        
     240
    241241        /*
    242242         * Ensure that there is at least one bit set even if the present bit is cleared.
Note: See TracChangeset for help on using the changeset viewer.