source:
scripts/patches/linux-2.6-seg-5.patch@
5548691
Last change on this file since 5548691 was 7f65c0e, checked in by , 19 years ago | |
---|---|
|
|
File size: 3.9 KB |
-
linux/arch/i386/kernel/process.c
old new struct task_struct fastcall * __switch_t 597 597 * Save away %fs and %gs. No need to save %es and %ds, as 598 598 * those are always kernel segments while inside the kernel. 599 599 */ 600 asm volatile("mov l %%fs,%0":"=m" (*(int *)&prev->fs));601 asm volatile("mov l %%gs,%0":"=m" (*(int *)&prev->gs));600 asm volatile("mov %%fs,%0":"=m" (prev->fs)); 601 asm volatile("mov %%gs,%0":"=m" (prev->gs)); 602 602 603 603 /* 604 604 * Restore %fs and %gs if needed. -
linux/arch/i386/kernel/vm86.c
old new static void do_sys_vm86(struct kernel_vm 294 294 */ 295 295 info->regs32->eax = 0; 296 296 tsk->thread.saved_esp0 = tsk->thread.esp0; 297 asm volatile("mov l%%fs,%0":"=m" (tsk->thread.saved_fs));298 asm volatile("mov l%%gs,%0":"=m" (tsk->thread.saved_gs));297 asm volatile("mov %%fs,%0":"=m" (tsk->thread.saved_fs)); 298 asm volatile("mov %%gs,%0":"=m" (tsk->thread.saved_gs)); 299 299 300 300 tss = &per_cpu(init_tss, get_cpu()); 301 301 tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; -
linux/arch/x86_64/kernel/process.c
old new int copy_thread(int nr, unsigned long cl 391 391 p->thread.fs = me->thread.fs; 392 392 p->thread.gs = me->thread.gs; 393 393 394 asm("mov l%%gs,%0" : "=m" (p->thread.gsindex));395 asm("mov l%%fs,%0" : "=m" (p->thread.fsindex));396 asm("mov l%%es,%0" : "=m" (p->thread.es));397 asm("mov l%%ds,%0" : "=m" (p->thread.ds));394 asm("mov %%gs,%0" : "=m" (p->thread.gsindex)); 395 asm("mov %%fs,%0" : "=m" (p->thread.fsindex)); 396 asm("mov %%es,%0" : "=m" (p->thread.es)); 397 asm("mov %%ds,%0" : "=m" (p->thread.ds)); 398 398 399 399 if (unlikely(me->thread.io_bitmap_ptr != NULL)) { 400 400 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); … … struct task_struct *__switch_to(struct t 457 457 * Switch DS and ES. 458 458 * This won't pick up thread selector changes, but I guess that is ok. 459 459 */ 460 asm volatile("mov l%%es,%0" : "=m" (prev->es));460 asm volatile("mov %%es,%0" : "=m" (prev->es)); 461 461 if (unlikely(next->es | prev->es)) 462 462 loadsegment(es, next->es); 463 463 464 asm volatile ("mov l%%ds,%0" : "=m" (prev->ds));464 asm volatile ("mov %%ds,%0" : "=m" (prev->ds)); 465 465 if (unlikely(next->ds | prev->ds)) 466 466 loadsegment(ds, next->ds); 467 467 … … struct task_struct *__switch_to(struct t 472 472 */ 473 473 { 474 474 unsigned fsindex; 475 asm volatile("movl %%fs,%0" : "= g" (fsindex));475 asm volatile("movl %%fs,%0" : "=r" (fsindex)); 476 476 /* segment register != 0 always requires a reload. 477 477 also reload when it has changed. 478 478 when prev process used 64bit base always reload … … struct task_struct *__switch_to(struct t 493 493 } 494 494 { 495 495 unsigned gsindex; 496 asm volatile("movl %%gs,%0" : "= g" (gsindex));496 asm volatile("movl %%gs,%0" : "=r" (gsindex)); 497 497 if (unlikely(gsindex | next->gsindex | prev->gs)) { 498 498 load_gs_index(next->gsindex); 499 499 if (gsindex) -
linux/include/asm-i386/system.h
old new static inline unsigned long _get_base(ch 81 81 #define loadsegment(seg,value) \ 82 82 asm volatile("\n" \ 83 83 "1:\t" \ 84 "mov l%0,%%" #seg "\n" \84 "mov %0,%%" #seg "\n" \ 85 85 "2:\n" \ 86 86 ".section .fixup,\"ax\"\n" \ 87 87 "3:\t" \ … … static inline unsigned long _get_base(ch 93 93 ".align 4\n\t" \ 94 94 ".long 1b,3b\n" \ 95 95 ".previous" \ 96 : :"m" ( *(unsigned int *)&(value)))96 : :"m" (value)) 97 97 98 98 /* 99 99 * Save a segment register away 100 100 */ 101 101 #define savesegment(seg, value) \ 102 asm volatile("mov l %%" #seg ",%0":"=m" (*(int *)&(value)))102 asm volatile("mov %%" #seg ",%0":"=m" (value)) 103 103 104 104 /* 105 105 * Clear and set 'TS' bit respectively
Note:
See TracBrowser
for help on using the repository browser.