• R/O
  • HTTP
  • SSH
  • HTTPS

linux-2.4.36: Commit

2.4.36-stable kernel tree


Commit MetaInfo

Revision6a76456407be8aa48f4a9e09a31bbe0119a4d065 (tree)
Zeit2005-06-10 03:49:59
AutorH. J. Lu <hjl@luco...>
CommiterMarcelo Tosatti

Log Message

[PATCH] newer i386/x86_64 assemblers prohibit instructions for moving between a seg register and a 32bit location

The new i386/x86_64 assemblers no longer accept instructions for moving
between a segment register and a 32bit memory location, i.e.,

movl (%eax),%ds
movl %ds,(%eax)

To generate instructions for moving between a segment register and a
16bit memory location without the 16bit operand size prefix, 0x66,

mov (%eax),%ds
mov %ds,(%eax)

should be used. It will work with both new and old assemblers. The
assembler starting from 2.16.90.0.1 will also support

movw (%eax),%ds
movw %ds,(%eax)

without the 0x66 prefix. I am enclosing patches for 2.4 and 2.6 kernels
here. The resulting kernel binaries should be unchanged as before, with
old and new assemblers, if gcc never generates memory access for

unsigned gsindex;
asm volatile("movl %%gs,%0" : "=g" (gsindex));

If gcc does generate memory access for the code above, the upper bits
in gsindex are undefined and the new assembler doesn't allow it.

Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Ändern Zusammenfassung

Diff

--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -327,7 +327,7 @@ extern int (*console_blank_hook)(int);
327327 * Save a segment register away
328328 */
329329 #define savesegment(seg, where) \
330- __asm__ __volatile__("movl %%" #seg ",%0" : "=m" (where))
330+ __asm__ __volatile__("mov %%" #seg ",%0" : "=m" (where))
331331
332332 /*
333333 * Maximum number of events stored
@@ -553,7 +553,7 @@ static inline void apm_restore_cpus(unsigned long mask)
553553
554554 #ifdef APM_ZERO_SEGS
555555 # define APM_DECL_SEGS \
556- unsigned int saved_fs; unsigned int saved_gs;
556+ unsigned short saved_fs; unsigned short saved_gs;
557557 # define APM_DO_SAVE_SEGS \
558558 savesegment(fs, saved_fs); savesegment(gs, saved_gs)
559559 # define APM_DO_ZERO_SEGS \
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -544,7 +544,7 @@ void release_thread(struct task_struct *dead_task)
544544 * Save a segment.
545545 */
546546 #define savesegment(seg,value) \
547- asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
547+ asm volatile("mov %%" #seg ",%0":"=m" (value))
548548
549549 int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
550550 unsigned long unused,
@@ -661,8 +661,8 @@ void fastcall __switch_to(struct task_struct *prev_p, struct task_struct *next_p
661661 * Save away %fs and %gs. No need to save %es and %ds, as
662662 * those are always kernel segments while inside the kernel.
663663 */
664- asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
665- asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
664+ asm volatile("mov %%fs,%0":"=m" (prev->fs));
665+ asm volatile("mov %%gs,%0":"=m" (prev->gs));
666666
667667 /*
668668 * Restore %fs and %gs.
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -526,10 +526,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
526526 p->thread.fs = me->thread.fs;
527527 p->thread.gs = me->thread.gs;
528528
529- asm("movl %%gs,%0" : "=m" (p->thread.gsindex));
530- asm("movl %%fs,%0" : "=m" (p->thread.fsindex));
531- asm("movl %%es,%0" : "=m" (p->thread.es));
532- asm("movl %%ds,%0" : "=m" (p->thread.ds));
529+ asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
530+ asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
531+ asm("mov %%es,%0" : "=m" (p->thread.es));
532+ asm("mov %%ds,%0" : "=m" (p->thread.ds));
533533
534534 unlazy_fpu(current);
535535 p->thread.i387 = current->thread.i387;
@@ -574,11 +574,11 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *
574574 /*
575575 * Switch DS and ES.
576576 */
577- asm volatile("movl %%es,%0" : "=m" (prev->es));
577+ asm volatile("mov %%es,%0" : "=m" (prev->es));
578578 if (unlikely(next->es | prev->es))
579579 loadsegment(es, next->es);
580580
581- asm volatile ("movl %%ds,%0" : "=m" (prev->ds));
581+ asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
582582 if (unlikely(next->ds | prev->ds))
583583 loadsegment(ds, next->ds);
584584
@@ -587,7 +587,7 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *
587587 */
588588 {
589589 unsigned fsindex;
590- asm volatile("movl %%fs,%0" : "=g" (fsindex));
590+ asm volatile("movl %%fs,%0" : "=r" (fsindex));
591591 /* segment register != 0 always requires a reload.
592592 also reload when it has changed.
593593 when prev process used 64bit base always reload
@@ -608,7 +608,7 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *
608608 }
609609 {
610610 unsigned gsindex;
611- asm volatile("movl %%gs,%0" : "=g" (gsindex));
611+ asm volatile("movl %%gs,%0" : "=r" (gsindex));
612612 if (unlikely((gsindex | next->gsindex) || prev->gs)) {
613613 load_gs_index(next->gsindex);
614614 if (gsindex)
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -84,7 +84,7 @@ static inline unsigned long _get_base(char * addr)
8484 #define loadsegment(seg,value) \
8585 asm volatile("\n" \
8686 "1:\t" \
87- "movl %0,%%" #seg "\n" \
87+ "mov %0,%%" #seg "\n" \
8888 "2:\n" \
8989 ".section .fixup,\"ax\"\n" \
9090 "3:\t" \
@@ -96,7 +96,7 @@ static inline unsigned long _get_base(char * addr)
9696 ".align 4\n\t" \
9797 ".long 1b,3b\n" \
9898 ".previous" \
99- : :"m" (*(unsigned int *)&(value)))
99+ : :"m" (value))
100100
101101 /*
102102 * Clear and set 'TS' bit respectively
Show on old repository browser