source: scripts/patches/linux-2.6-seg-5.patch@ 0ae794b

clfs-1.2 clfs-2.1 clfs-3.0.0-systemd clfs-3.0.0-sysvinit systemd sysvinit
Last change on this file since 0ae794b was 7f65c0e, checked in by Jim Gifford <clfs@…>, 19 years ago

r625@server (orig r623): jim | 2005-10-31 12:43:24 -0800
Final Move

  • Property mode set to 100644
File size: 3.9 KB
  • linux/arch/i386/kernel/process.c

    old new struct task_struct fastcall * __switch_t  
    597597         * Save away %fs and %gs. No need to save %es and %ds, as
    598598         * those are always kernel segments while inside the kernel.
    599599         */
    600         asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
    601         asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
     600        asm volatile("mov %%fs,%0":"=m" (prev->fs));
     601        asm volatile("mov %%gs,%0":"=m" (prev->gs));
    602602
    603603        /*
    604604         * Restore %fs and %gs if needed.
  • linux/arch/i386/kernel/vm86.c

    old new static void do_sys_vm86(struct kernel_vm  
    294294 */
    295295        info->regs32->eax = 0;
    296296        tsk->thread.saved_esp0 = tsk->thread.esp0;
    297         asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs));
    298         asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs));
     297        asm volatile("mov %%fs,%0":"=m" (tsk->thread.saved_fs));
     298        asm volatile("mov %%gs,%0":"=m" (tsk->thread.saved_gs));
    299299
    300300        tss = &per_cpu(init_tss, get_cpu());
    301301        tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
  • linux/arch/x86_64/kernel/process.c

    old new int copy_thread(int nr, unsigned long cl  
    391391        p->thread.fs = me->thread.fs;
    392392        p->thread.gs = me->thread.gs;
    393393
    394         asm("movl %%gs,%0" : "=m" (p->thread.gsindex));
    395         asm("movl %%fs,%0" : "=m" (p->thread.fsindex));
    396         asm("movl %%es,%0" : "=m" (p->thread.es));
    397         asm("movl %%ds,%0" : "=m" (p->thread.ds));
     394        asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
     395        asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
     396        asm("mov %%es,%0" : "=m" (p->thread.es));
     397        asm("mov %%ds,%0" : "=m" (p->thread.ds));
    398398
    399399        if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
    400400                p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
    struct task_struct *__switch_to(struct t  
    457457         * Switch DS and ES.
    458458         * This won't pick up thread selector changes, but I guess that is ok.
    459459         */
    460         asm volatile("movl %%es,%0" : "=m" (prev->es));
     460        asm volatile("mov %%es,%0" : "=m" (prev->es));
    461461        if (unlikely(next->es | prev->es))
    462462                loadsegment(es, next->es);
    463463       
    464         asm volatile ("movl %%ds,%0" : "=m" (prev->ds));
     464        asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
    465465        if (unlikely(next->ds | prev->ds))
    466466                loadsegment(ds, next->ds);
    467467
    struct task_struct *__switch_to(struct t  
    472472         */
    473473        {
    474474                unsigned fsindex;
    475                 asm volatile("movl %%fs,%0" : "=g" (fsindex));
     475                asm volatile("movl %%fs,%0" : "=r" (fsindex));
    476476                /* segment register != 0 always requires a reload.
    477477                   also reload when it has changed.
    478478                   when prev process used 64bit base always reload
    struct task_struct *__switch_to(struct t  
    493493        }
    494494        {
    495495                unsigned gsindex;
    496                 asm volatile("movl %%gs,%0" : "=g" (gsindex));
     496                asm volatile("movl %%gs,%0" : "=r" (gsindex));
    497497                if (unlikely(gsindex | next->gsindex | prev->gs)) {
    498498                        load_gs_index(next->gsindex);
    499499                        if (gsindex)
  • linux/include/asm-i386/system.h

    old new static inline unsigned long _get_base(ch  
    8181#define loadsegment(seg,value)                  \
    8282        asm volatile("\n"                       \
    8383                "1:\t"                          \
    84                 "movl %0,%%" #seg "\n"          \
     84                "mov %0,%%" #seg "\n"           \
    8585                "2:\n"                          \
    8686                ".section .fixup,\"ax\"\n"      \
    8787                "3:\t"                          \
    static inline unsigned long _get_base(ch  
    9393                ".align 4\n\t"                  \
    9494                ".long 1b,3b\n"                 \
    9595                ".previous"                     \
    96                 : :"m" (*(unsigned int *)&(value)))
     96                : :"m" (value))
    9797
    9898/*
    9999 * Save a segment register away
    100100 */
    101101#define savesegment(seg, value) \
    102         asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
     102        asm volatile("mov %%" #seg ",%0":"=m" (value))
    103103
    104104/*
    105105 * Clear and set 'TS' bit respectively
Note: See TracBrowser for help on using the repository browser.