/* $FabBSD$ */ /* $OpenBSD: locore.s,v 1.124 2008/07/28 19:08:46 miod Exp $ */ /* $NetBSD: locore.s,v 1.145 1996/05/03 19:41:19 christos Exp $ */ /*- * Copyright (c) 1993, 1994, 1995 Charles M. Hannum. All rights reserved. * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)locore.s 7.3 (Berkeley) 5/13/91 */ #include "npx.h" #include "assym.h" #include "apm.h" #include "lapic.h" #include "ioapic.h" #include "pctr.h" #include "ksyms.h" #include "acpi.h" #include #include #include #include #include #include #include #include #include #if NLAPIC > 0 #include #endif /* * override user-land alignment before including asm.h */ #define ALIGN_DATA .align 4 #define ALIGN_TEXT .align 4,0x90 /* 4-byte boundaries, NOP-filled */ #define SUPERALIGN_TEXT .align 16,0x90 /* 16-byte boundaries better for 486 */ #define _ALIGN_TEXT ALIGN_TEXT #include #define CPL _C_LABEL(lapic_tpr) #define GET_CURPCB(reg) \ movl CPUVAR(CURPCB), reg #define SET_CURPCB(reg) \ movl reg, CPUVAR(CURPCB) #define CHECK_ASTPENDING(treg) \ movl CPUVAR(CURPROC),treg ; \ cmpl $0, treg ; \ je 1f ; \ cmpl $0,P_MD_ASTPENDING(treg) ; \ 1: #define CLEAR_ASTPENDING(cpreg) \ movl $0,P_MD_ASTPENDING(cpreg) /* * These are used on interrupt or trap entry or exit. */ #define INTRENTRY \ pushl %eax ; \ pushl %ecx ; \ pushl %edx ; \ pushl %ebx ; \ pushl %ebp ; \ pushl %esi ; \ pushl %edi ; \ pushl %ds ; \ pushl %es ; \ pushl %gs ; \ movl $GSEL(GDATA_SEL, SEL_KPL),%eax ; \ movw %ax,%ds ; \ movw %ax,%es ; \ movw %ax,%gs ; \ pushl %fs ; \ movl $GSEL(GCPU_SEL, SEL_KPL),%eax ; \ movw %ax,%fs #define INTRFASTEXIT \ popl %fs ; \ popl %gs ; \ popl %es ; \ popl %ds ; \ popl %edi ; \ popl %esi ; \ popl %ebp ; \ popl %ebx ; \ popl %edx ; \ popl %ecx ; \ popl %eax ; \ sti ; \ addl $8,%esp ; \ iret /* * PTmap is recursive pagemap at top of virtual address space. * Within PTmap, the page directory can be found (third indirection). */ .globl _C_LABEL(PTmap), _C_LABEL(PTD), _C_LABEL(PTDpde) .set _C_LABEL(PTmap), (PDSLOT_PTE << PDSHIFT) .set _C_LABEL(PTD), (_C_LABEL(PTmap) + PDSLOT_PTE * NBPG) .set _C_LABEL(PTDpde), (_C_LABEL(PTD) + PDSLOT_PTE * 4) # XXX 4 == sizeof pde /* * APTmap, APTD is the alternate recursive pagemap. * It's used when modifying another process's page tables. */ .globl _C_LABEL(APTmap), _C_LABEL(APTD), _C_LABEL(APTDpde) .set _C_LABEL(APTmap), (PDSLOT_APTE << PDSHIFT) .set _C_LABEL(APTD), (_C_LABEL(APTmap) + PDSLOT_APTE * NBPG) # XXX 4 == sizeof pde .set _C_LABEL(APTDpde), (_C_LABEL(PTD) + PDSLOT_APTE * 4) /* * Initialization */ .data .globl _C_LABEL(cpu), _C_LABEL(cpu_id), _C_LABEL(cpu_vendor) .globl _C_LABEL(cpu_brandstr) .globl _C_LABEL(cpuid_level) .globl _C_LABEL(cpu_miscinfo) .globl _C_LABEL(cpu_feature), _C_LABEL(cpu_ecxfeature) .globl _C_LABEL(cpu_cache_eax), _C_LABEL(cpu_cache_ebx) .globl _C_LABEL(cpu_cache_ecx), _C_LABEL(cpu_cache_edx) .globl _C_LABEL(cold), _C_LABEL(cnvmem), _C_LABEL(extmem) .globl _C_LABEL(esym) .globl _C_LABEL(boothowto), _C_LABEL(bootdev), _C_LABEL(atdevbase) .globl _C_LABEL(proc0paddr), _C_LABEL(PTDpaddr), _C_LABEL(PTDsize) .globl _C_LABEL(gdt) .globl _C_LABEL(bootapiver), _C_LABEL(bootargc), _C_LABEL(bootargv) .globl _C_LABEL(lapic_tpr) #if NLAPIC > 0 #ifdef __ELF__ .align NBPG #else .align 12 #endif .globl _C_LABEL(local_apic), _C_LABEL(lapic_id) _C_LABEL(local_apic): .space LAPIC_ID _C_LABEL(lapic_id): .long 0x00000000 .space LAPIC_TPRI-(LAPIC_ID+4) _C_LABEL(lapic_tpr): .space LAPIC_PPRI-LAPIC_TPRI _C_LABEL(lapic_ppr): .space LAPIC_ISR-LAPIC_PPRI _C_LABEL(lapic_isr): .space NBPG-LAPIC_ISR #else _C_LABEL(lapic_tpr): .long 0 #endif _C_LABEL(cpu): .long 0 # are we 386, 386sx, 486, 586 or 686 _C_LABEL(cpu_id): .long 0 # saved from 'cpuid' instruction _C_LABEL(cpu_miscinfo): .long 0 # misc info (apic/brand id) from 'cpuid' _C_LABEL(cpu_feature): .long 0 # feature flags from 'cpuid' instruction _C_LABEL(cpu_ecxfeature):.long 0 # extended feature flags from 'cpuid' _C_LABEL(cpuid_level): .long -1 # max. lvl accepted by 'cpuid' insn _C_LABEL(cpu_cache_eax):.long 0 _C_LABEL(cpu_cache_ebx):.long 0 _C_LABEL(cpu_cache_ecx):.long 0 _C_LABEL(cpu_cache_edx):.long 0 _C_LABEL(cpu_vendor): .space 16 # vendor string returned by 'cpuid' instruction _C_LABEL(cpu_brandstr): .space 48 # brand string returned by 'cpuid' _C_LABEL(cold): .long 1 # cold till we are not _C_LABEL(esym): .long 0 # ptr to end of syms _C_LABEL(cnvmem): .long 0 # conventional memory size _C_LABEL(extmem): .long 0 # extended memory size _C_LABEL(atdevbase): .long 0 # location of start of iomem in virtual _C_LABEL(bootapiver): .long 0 # /boot API version _C_LABEL(bootargc): .long 0 # /boot argc _C_LABEL(bootargv): .long 0 # /boot argv _C_LABEL(bootdev): .long 0 # device we booted from _C_LABEL(proc0paddr): .long 0 _C_LABEL(PTDpaddr): .long 0 # paddr of PTD, for libkvm _C_LABEL(PTDsize): .long NBPG # size of PTD, for libkvm .space 512 tmpstk: #define RELOC(x) ((x) - KERNBASE) .text .globl start .globl _C_LABEL(kernel_text) _C_LABEL(kernel_text) = KERNTEXTOFF start: movw $0x1234,0x472 # warm boot /* * Load parameters from stack (howto, bootdev, unit, bootapiver, esym). * note: (%esp) is return address of boot * (If we want to hold onto /boot, it's physical %esp up to _end.) */ movl 4(%esp),%eax movl %eax,RELOC(_C_LABEL(boothowto)) movl 8(%esp),%eax movl %eax,RELOC(_C_LABEL(bootdev)) movl 16(%esp),%eax testl %eax,%eax jz 1f addl $KERNBASE,%eax 1: movl %eax,RELOC(_C_LABEL(esym)) movl 12(%esp),%eax movl %eax,RELOC(_C_LABEL(bootapiver)) movl 28(%esp), %eax movl %eax, RELOC(_C_LABEL(bootargc)) movl 32(%esp), %eax movl %eax, RELOC(_C_LABEL(bootargv)) /* First, reset the PSL. */ pushl $PSL_MBO popfl /* Clear segment registers; null until proc0 setup */ xorl %eax,%eax movw %ax,%fs movw %ax,%gs /* Find out our CPU type. */ try386: /* Try to toggle alignment check flag; does not exist on 386. */ pushfl popl %eax movl %eax,%ecx orl $PSL_AC,%eax pushl %eax popfl pushfl popl %eax xorl %ecx,%eax andl $PSL_AC,%eax pushl %ecx popfl testl %eax,%eax jnz try486 /* * Try the test of a NexGen CPU -- ZF will not change on a DIV * instruction on a NexGen, it will on an i386. Documented in * Nx586 Processor Recognition Application Note, NexGen, Inc. */ movl $0x5555,%eax xorl %edx,%edx movl $2,%ecx divl %ecx jnz is386 isnx586: /* * Don't try cpuid, as Nx586s reportedly don't support the * PSL_ID bit. */ movl $CPU_NX586,RELOC(_C_LABEL(cpu)) jmp 2f is386: movl $CPU_386,RELOC(_C_LABEL(cpu)) jmp 2f try486: /* Try to toggle identification flag; does not exist on early 486s. */ pushfl popl %eax movl %eax,%ecx xorl $PSL_ID,%eax pushl %eax popfl pushfl popl %eax xorl %ecx,%eax andl $PSL_ID,%eax pushl %ecx popfl testl %eax,%eax jnz try586 is486: movl $CPU_486,RELOC(_C_LABEL(cpu)) /* * Check Cyrix CPU * Cyrix CPUs do not change the undefined flags following * execution of the divide instruction which divides 5 by 2. * * Note: CPUID is enabled on M2, so it passes another way. */ pushfl movl $0x5555, %eax xorl %edx, %edx movl $2, %ecx clc divl %ecx jnc trycyrix486 popfl jmp 2f trycyrix486: movl $CPU_6x86,RELOC(_C_LABEL(cpu)) # set CPU type /* * Check for Cyrix 486 CPU by seeing if the flags change during a * divide. This is documented in the Cx486SLC/e SMM Programmer's * Guide. */ xorl %edx,%edx cmpl %edx,%edx # set flags to known state pushfl popl %ecx # store flags in ecx movl $-1,%eax movl $4,%ebx divl %ebx # do a long division pushfl popl %eax xorl %ecx,%eax # are the flags different? testl $0x8d5,%eax # only check C|PF|AF|Z|N|V jne 2f # yes; must not be Cyrix CPU movl $CPU_486DLC,RELOC(_C_LABEL(cpu)) # set CPU type #ifndef CYRIX_CACHE_WORKS /* Disable caching of the ISA hole only. */ invd movb $CCR0,%al # Configuration Register index (CCR0) outb %al,$0x22 inb $0x23,%al orb $(CCR0_NC1|CCR0_BARB),%al movb %al,%ah movb $CCR0,%al outb %al,$0x22 movb %ah,%al outb %al,$0x23 invd #else /* CYRIX_CACHE_WORKS */ /* Set cache parameters */ invd # Start with guaranteed clean cache movb $CCR0,%al # Configuration Register index (CCR0) outb %al,$0x22 inb $0x23,%al andb $~CCR0_NC0,%al #ifndef CYRIX_CACHE_REALLY_WORKS orb $(CCR0_NC1|CCR0_BARB),%al #else orb $CCR0_NC1,%al #endif movb %al,%ah movb $CCR0,%al outb %al,$0x22 movb %ah,%al outb %al,$0x23 /* clear non-cacheable region 1 */ movb $(NCR1+2),%al outb %al,$0x22 movb $NCR_SIZE_0K,%al outb %al,$0x23 /* clear non-cacheable region 2 */ movb $(NCR2+2),%al outb %al,$0x22 movb $NCR_SIZE_0K,%al outb %al,$0x23 /* clear non-cacheable region 3 */ movb $(NCR3+2),%al outb %al,$0x22 movb $NCR_SIZE_0K,%al outb %al,$0x23 /* clear non-cacheable region 4 */ movb $(NCR4+2),%al outb %al,$0x22 movb $NCR_SIZE_0K,%al outb %al,$0x23 /* enable caching in CR0 */ movl %cr0,%eax andl $~(CR0_CD|CR0_NW),%eax movl %eax,%cr0 invd #endif /* CYRIX_CACHE_WORKS */ jmp 2f try586: /* Use the `cpuid' instruction. */ xorl %eax,%eax cpuid movl %eax,RELOC(_C_LABEL(cpuid_level)) movl %ebx,RELOC(_C_LABEL(cpu_vendor)) # store vendor string movl %edx,RELOC(_C_LABEL(cpu_vendor))+4 movl %ecx,RELOC(_C_LABEL(cpu_vendor))+8 movl $0, RELOC(_C_LABEL(cpu_vendor))+12 movl $1,%eax cpuid movl %eax,RELOC(_C_LABEL(cpu_id)) # store cpu_id and features movl %ebx,RELOC(_C_LABEL(cpu_miscinfo)) movl %edx,RELOC(_C_LABEL(cpu_feature)) movl %ecx,RELOC(_C_LABEL(cpu_ecxfeature)) movl RELOC(_C_LABEL(cpuid_level)),%eax cmp $2,%eax jl 1f movl $2,%eax cpuid /* cmp $1,%al jne 1f */ movl %eax,RELOC(_C_LABEL(cpu_cache_eax)) movl %ebx,RELOC(_C_LABEL(cpu_cache_ebx)) movl %ecx,RELOC(_C_LABEL(cpu_cache_ecx)) movl %edx,RELOC(_C_LABEL(cpu_cache_edx)) 1: /* Check if brand identification string is supported */ movl $0x80000000,%eax cpuid cmpl $0x80000000,%eax jbe 2f movl $0x80000002,%eax cpuid movl %eax,RELOC(_C_LABEL(cpu_brandstr)) movl %ebx,RELOC(_C_LABEL(cpu_brandstr))+4 movl %ecx,RELOC(_C_LABEL(cpu_brandstr))+8 movl %edx,RELOC(_C_LABEL(cpu_brandstr))+12 movl $0x80000003,%eax cpuid movl %eax,RELOC(_C_LABEL(cpu_brandstr))+16 movl %ebx,RELOC(_C_LABEL(cpu_brandstr))+20 movl %ecx,RELOC(_C_LABEL(cpu_brandstr))+24 movl %edx,RELOC(_C_LABEL(cpu_brandstr))+28 movl $0x80000004,%eax cpuid movl %eax,RELOC(_C_LABEL(cpu_brandstr))+32 movl %ebx,RELOC(_C_LABEL(cpu_brandstr))+36 movl %ecx,RELOC(_C_LABEL(cpu_brandstr))+40 andl $0x00ffffff,%edx /* Shouldn't be necessary */ movl %edx,RELOC(_C_LABEL(cpu_brandstr))+44 2: /* * Finished with old stack; load new %esp now instead of later so we * can trace this code without having to worry about the trace trap * clobbering the memory test or the zeroing of the bss+bootstrap page * tables. * * The boot program should check: * text+data <= &stack_variable - more_space_for_stack * text+data+bss+pad+space_for_page_tables <= end_of_memory * Oops, the gdt is in the carcass of the boot program so clearing * the rest of memory is still not possible. */ movl $RELOC(tmpstk),%esp # bootstrap stack end location /* * Virtual address space of kernel: * * text | data | bss | [syms] | proc0 stack | page dir | Sysmap * 0 1 2 3 */ #define PROC0STACK ((0) * NBPG) #define PROC0PDIR (( UPAGES) * NBPG) #define SYSMAP ((1+UPAGES) * NBPG) #define TABLESIZE ((1+UPAGES) * NBPG) /* + _C_LABEL(nkpde) * NBPG */ /* Find end of kernel image. */ movl $RELOC(_C_LABEL(end)),%edi #if (defined(DDB) || NKSYMS > 0) && !defined(SYMTAB_SPACE) /* Save the symbols (if loaded). */ movl RELOC(_C_LABEL(esym)),%eax testl %eax,%eax jz 1f subl $KERNBASE,%eax movl %eax,%edi 1: #endif /* Calculate where to start the bootstrap tables. */ movl %edi,%esi # edi = esym ? esym : end addl $PGOFSET, %esi # page align up andl $~PGOFSET, %esi /* * Calculate the size of the kernel page table directory, and * how many entries it will have. */ movl RELOC(_C_LABEL(nkpde)),%ecx # get nkpde cmpl $NKPTP_MIN,%ecx # larger than min? jge 1f movl $NKPTP_MIN,%ecx # set at min jmp 2f 1: cmpl $NKPTP_MAX,%ecx # larger than max? jle 2f movl $NKPTP_MAX,%ecx 2: movl %ecx,RELOC(_C_LABEL(nkpde)) # and store it back /* Clear memory for bootstrap tables. */ shll $PGSHIFT,%ecx addl $TABLESIZE,%ecx addl %esi,%ecx # end of tables subl %edi,%ecx # size of tables shrl $2,%ecx xorl %eax, %eax cld rep stosl /* * fillkpt * eax = pte (page frame | control | status) * ebx = page table address * ecx = number of pages to map */ #define fillkpt \ 1: movl %eax,(%ebx) ; \ addl $NBPG,%eax ; /* increment physical address */ \ addl $4,%ebx ; /* next pte */ \ loop 1b ; /* * Build initial page tables. */ /* Calculate end of text segment, rounded to a page. */ leal (RELOC(_C_LABEL(etext))+PGOFSET),%edx andl $~PGOFSET,%edx /* Skip over the first 2MB. */ movl $RELOC(KERNTEXTOFF),%eax movl %eax,%ecx shrl $PGSHIFT,%ecx leal (SYSMAP)(%esi,%ecx,4),%ebx /* Map the kernel text read-only. */ movl %edx,%ecx subl %eax,%ecx shrl $PGSHIFT,%ecx #ifdef DDB orl $(PG_V|PG_KW),%eax #else orl $(PG_V|PG_KR),%eax #endif fillkpt /* Map the data, BSS, and bootstrap tables read-write. */ leal (PG_V|PG_KW)(%edx),%eax movl RELOC(_C_LABEL(nkpde)),%ecx shll $PGSHIFT,%ecx addl $TABLESIZE,%ecx addl %esi,%ecx # end of tables subl %edx,%ecx # subtract end of text shrl $PGSHIFT,%ecx fillkpt /* Map ISA I/O memory. */ movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax # having these bits set movl $(IOM_SIZE>>PGSHIFT),%ecx # for this many pte s, fillkpt /* * Construct a page table directory. */ movl RELOC(_C_LABEL(nkpde)),%ecx # count of pde s, leal (PROC0PDIR+0*4)(%esi),%ebx # where temp maps! leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0 fillkpt /* * Map kernel PDEs: this is the real mapping used * after the temp mapping outlives its usefulness. */ movl RELOC(_C_LABEL(nkpde)),%ecx # count of pde s, leal (PROC0PDIR+PDSLOT_KERN*4)(%esi),%ebx # map them high leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0 fillkpt /* Install a PDE recursively mapping page directory as a page table! */ leal (PROC0PDIR+PG_V|PG_KW)(%esi),%eax # pte for ptd movl %eax,(PROC0PDIR+PDSLOT_PTE*4)(%esi) # recursive PD slot /* Save phys. addr of PTD, for libkvm. */ leal (PROC0PDIR)(%esi),%eax # phys address of ptd in proc 0 movl %eax,RELOC(_C_LABEL(PTDpaddr)) /* Load base of page directory and enable mapping. */ movl %eax,%cr3 # load ptd addr into mmu movl %cr0,%eax # get control word # enable paging & NPX emulation orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_EM|CR0_MP),%eax movl %eax,%cr0 # and let's page NOW! pushl $begin # jump to high mem ret begin: /* Now running relocated at KERNBASE. Remove double mapping. */ movl _C_LABEL(nkpde),%ecx # for this many pde s, leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps! addl $(KERNBASE), %ebx # now use relocated address 1: movl $0,(%ebx) addl $4,%ebx # next pde loop 1b /* Relocate atdevbase. */ movl _C_LABEL(nkpde),%edx shll $PGSHIFT,%edx addl $(TABLESIZE+KERNBASE),%edx addl %esi,%edx movl %edx,_C_LABEL(atdevbase) /* Set up bootstrap stack. */ leal (PROC0STACK+KERNBASE)(%esi),%eax movl %eax,_C_LABEL(proc0paddr) leal (USPACE-FRAMESIZE)(%eax),%esp leal (PROC0PDIR)(%esi),%ebx # phys address of ptd in proc 0 movl %ebx,PCB_CR3(%eax) # pcb->pcb_cr3 xorl %ebp,%ebp # mark end of frames movl _C_LABEL(nkpde),%eax shll $PGSHIFT,%eax addl $TABLESIZE,%eax addl %esi,%eax # skip past stack and page tables pushl %eax call _C_LABEL(init386) # wire 386 chip for unix operation addl $4,%esp call _C_LABEL(main) /* NOTREACHED */ NENTRY(proc_trampoline) #ifdef MULTIPROCESSOR call _C_LABEL(proc_trampoline_mp) #endif movl $IPL_NONE,CPL pushl %ebx call *%esi addl $4,%esp INTRFASTEXIT /* NOTREACHED */ /*****************************************************************************/ /* * Signal trampoline; copied to top of user stack. */ NENTRY(sigcode) movl SIGF_FPSTATE(%esp),%esi # FPU state area if need saving testl %esi,%esi jz 1f fnsave (%esi) 1: call *SIGF_HANDLER(%esp) testl %esi,%esi jz 2f frstor (%esi) jmp 2f .globl _C_LABEL(sigcode_xmm) _C_LABEL(sigcode_xmm): movl SIGF_FPSTATE(%esp),%esi # FPU state area if need saving testl %esi,%esi jz 1f fxsave (%esi) fninit 1: call *SIGF_HANDLER(%esp) testl %esi,%esi jz 2f fxrstor (%esi) 2: leal SIGF_SC(%esp),%eax # scp (the call may have clobbered the # copy at SIGF_SCP(%esp)) pushl %eax pushl %eax # junk to fake return address movl $SYS_sigreturn,%eax int $0x80 # enter kernel with args on stack movl $SYS_exit,%eax int $0x80 # exit if sigreturn fails .globl _C_LABEL(esigcode) _C_LABEL(esigcode): /*****************************************************************************/ /* * The following primitives are used to fill and copy regions of memory. */ /* Frame pointer reserve on stack. */ #ifdef DDB #define FPADD 4 #else #define FPADD 0 #endif /* * kcopy(caddr_t from, caddr_t to, size_t len); * Copy len bytes, abort on fault. */ ENTRY(kcopy) #ifdef DDB pushl %ebp movl %esp,%ebp #endif pushl %esi pushl %edi GET_CURPCB(%eax) # load curpcb into eax and set on-fault pushl PCB_ONFAULT(%eax) movl $_C_LABEL(copy_fault), PCB_ONFAULT(%eax) movl 16+FPADD(%esp),%esi movl 20+FPADD(%esp),%edi movl 24+FPADD(%esp),%ecx movl %edi,%eax subl %esi,%eax cmpl %ecx,%eax # overlapping? jb 1f cld # nope, copy forward shrl $2,%ecx # copy by 32-bit words rep movsl movl 24+FPADD(%esp),%ecx andl $3,%ecx # any bytes left? rep movsb GET_CURPCB(%edx) # XXX save curpcb? popl PCB_ONFAULT(%edx) popl %edi popl %esi xorl %eax,%eax #ifdef DDB leave #endif ret ALIGN_TEXT 1: addl %ecx,%edi # copy backward addl %ecx,%esi std andl $3,%ecx # any fractional bytes? decl %edi decl %esi rep movsb movl 24+FPADD(%esp),%ecx # copy remainder by 32-bit words shrl $2,%ecx subl $3,%esi subl $3,%edi rep movsl cld GET_CURPCB(%edx) popl PCB_ONFAULT(%edx) popl %edi popl %esi xorl %eax,%eax #ifdef DDB leave #endif ret /* * bcopy(caddr_t from, caddr_t to, size_t len); * Copy len bytes. */ ALTENTRY(ovbcopy) ENTRY(bcopy) pushl %esi pushl %edi movl 12(%esp),%esi movl 16(%esp),%edi movl 20(%esp),%ecx movl %edi,%eax subl %esi,%eax cmpl %ecx,%eax # overlapping? jb 1f cld # nope, copy forward shrl $2,%ecx # copy by 32-bit words rep movsl movl 20(%esp),%ecx andl $3,%ecx # any bytes left? rep movsb popl %edi popl %esi ret ALIGN_TEXT 1: addl %ecx,%edi # copy backward addl %ecx,%esi std andl $3,%ecx # any fractional bytes? decl %edi decl %esi rep movsb movl 20(%esp),%ecx # copy remainder by 32-bit words shrl $2,%ecx subl $3,%esi subl $3,%edi rep movsl popl %edi popl %esi cld ret /* * Emulate memcpy() by swapping the first two arguments and calling bcopy() */ ENTRY(memcpy) movl 4(%esp),%ecx xchg 8(%esp),%ecx movl %ecx,4(%esp) jmp _C_LABEL(bcopy) /*****************************************************************************/ /* * The following primitives are used to copy data in and out of the user's * address space. */ /* * copyout(caddr_t from, caddr_t to, size_t len); * Copy len bytes into the user's address space. */ ENTRY(copyout) #ifdef DDB pushl %ebp movl %esp,%ebp #endif pushl %esi pushl %edi pushl $0 movl 16+FPADD(%esp),%esi movl 20+FPADD(%esp),%edi movl 24+FPADD(%esp),%eax /* * We check that the end of the destination buffer is not past the end * of the user's address space. If it's not, then we only need to * check that each page is writable. The 486 will do this for us; the * 386 will not. (We assume that pages in user space that are not * writable by the user are not writable by the kernel either.) */ movl %edi,%edx addl %eax,%edx jc _C_LABEL(copy_fault) cmpl $VM_MAXUSER_ADDRESS,%edx ja _C_LABEL(copy_fault) 3: GET_CURPCB(%edx) movl $_C_LABEL(copy_fault),PCB_ONFAULT(%edx) /* bcopy(%esi, %edi, %eax); */ cld movl %eax,%ecx shrl $2,%ecx rep movsl movl %eax,%ecx andl $3,%ecx rep movsb popl PCB_ONFAULT(%edx) popl %edi popl %esi xorl %eax,%eax #ifdef DDB leave #endif ret /* * copyin(caddr_t from, caddr_t to, size_t len); * Copy len bytes from the user's address space. */ ENTRY(copyin) #ifdef DDB pushl %ebp movl %esp,%ebp #endif pushl %esi pushl %edi GET_CURPCB(%eax) pushl $0 movl $_C_LABEL(copy_fault),PCB_ONFAULT(%eax) movl 16+FPADD(%esp),%esi movl 20+FPADD(%esp),%edi movl 24+FPADD(%esp),%eax /* * We check that the end of the destination buffer is not past the end * of the user's address space. If it's not, then we only need to * check that each page is readable, and the CPU will do that for us. */ movl %esi,%edx addl %eax,%edx jc _C_LABEL(copy_fault) cmpl $VM_MAXUSER_ADDRESS,%edx ja _C_LABEL(copy_fault) 3: /* bcopy(%esi, %edi, %eax); */ cld movl %eax,%ecx shrl $2,%ecx rep movsl movb %al,%cl andb $3,%cl rep movsb GET_CURPCB(%edx) popl PCB_ONFAULT(%edx) popl %edi popl %esi xorl %eax,%eax #ifdef DDB leave #endif ret ENTRY(copy_fault) GET_CURPCB(%edx) popl PCB_ONFAULT(%edx) popl %edi popl %esi movl $EFAULT,%eax #ifdef DDB leave #endif ret /* * copyoutstr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied); * Copy a NUL-terminated string, at most maxlen characters long, into the * user's address space. Return the number of characters copied (including the * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else * return 0 or EFAULT. */ ENTRY(copyoutstr) #ifdef DDB pushl %ebp movl %esp,%ebp #endif pushl %esi pushl %edi movl 12+FPADD(%esp),%esi # esi = from movl 16+FPADD(%esp),%edi # edi = to movl 20+FPADD(%esp),%edx # edx = maxlen 5: GET_CURPCB(%eax) movl $_C_LABEL(copystr_fault),PCB_ONFAULT(%eax) /* * Get min(%edx, VM_MAXUSER_ADDRESS-%edi). */ movl $VM_MAXUSER_ADDRESS,%eax subl %edi,%eax jbe _C_LABEL(copystr_fault) # die if CF == 1 || ZF == 1 # i.e. make sure that %edi # is below VM_MAXUSER_ADDRESS cmpl %edx,%eax jae 1f movl %eax,%edx movl %eax,20+FPADD(%esp) 1: incl %edx cld 1: decl %edx jz 2f lodsb stosb testb %al,%al jnz 1b /* Success -- 0 byte reached. */ decl %edx xorl %eax,%eax jmp copystr_return 2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */ cmpl $VM_MAXUSER_ADDRESS,%edi jae _C_LABEL(copystr_fault) movl $ENAMETOOLONG,%eax jmp copystr_return /* * copyinstr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied); * Copy a NUL-terminated string, at most maxlen characters long, from the * user's address space. Return the number of characters copied (including the * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else * return 0 or EFAULT. */ ENTRY(copyinstr) #ifdef DDB pushl %ebp movl %esp,%ebp #endif pushl %esi pushl %edi GET_CURPCB(%ecx) movl $_C_LABEL(copystr_fault),PCB_ONFAULT(%ecx) movl 12+FPADD(%esp),%esi # %esi = from movl 16+FPADD(%esp),%edi # %edi = to movl 20+FPADD(%esp),%edx # %edx = maxlen /* * Get min(%edx, VM_MAXUSER_ADDRESS-%esi). */ movl $VM_MAXUSER_ADDRESS,%eax subl %esi,%eax jbe _C_LABEL(copystr_fault) # Error if CF == 1 || ZF == 1 # i.e. make sure that %esi # is below VM_MAXUSER_ADDRESS cmpl %edx,%eax jae 1f movl %eax,%edx movl %eax,20+FPADD(%esp) 1: incl %edx cld 1: decl %edx jz 2f lodsb stosb testb %al,%al jnz 1b /* Success -- 0 byte reached. */ decl %edx xorl %eax,%eax jmp copystr_return 2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */ cmpl $VM_MAXUSER_ADDRESS,%esi jae _C_LABEL(copystr_fault) movl $ENAMETOOLONG,%eax jmp copystr_return ENTRY(copystr_fault) movl $EFAULT,%eax copystr_return: /* Set *lencopied and return %eax. */ GET_CURPCB(%ecx) movl $0,PCB_ONFAULT(%ecx) movl 20+FPADD(%esp),%ecx subl %edx,%ecx movl 24+FPADD(%esp),%edx testl %edx,%edx jz 8f movl %ecx,(%edx) 8: popl %edi popl %esi #ifdef DDB leave #endif ret /* * copystr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied); * Copy a NUL-terminated string, at most maxlen characters long. Return the * number of characters copied (including the NUL) in *lencopied. If the * string is too long, return ENAMETOOLONG; else return 0. */ ENTRY(copystr) #ifdef DDB pushl %ebp movl %esp,%ebp #endif pushl %esi pushl %edi movl 12+FPADD(%esp),%esi # esi = from movl 16+FPADD(%esp),%edi # edi = to movl 20+FPADD(%esp),%edx # edx = maxlen incl %edx cld 1: decl %edx jz 4f lodsb stosb testb %al,%al jnz 1b /* Success -- 0 byte reached. */ decl %edx xorl %eax,%eax jmp 6f 4: /* edx is zero -- return ENAMETOOLONG. */ movl $ENAMETOOLONG,%eax 6: /* Set *lencopied and return %eax. */ movl 20+FPADD(%esp),%ecx subl %edx,%ecx movl 24+FPADD(%esp),%edx testl %edx,%edx jz 7f movl %ecx,(%edx) 7: popl %edi popl %esi #ifdef DDB leave #endif ret /*****************************************************************************/ /* * The following is i386-specific nonsense. */ /* * void lgdt(struct region_descriptor *rdp); * Change the global descriptor table. */ NENTRY(lgdt) /* Reload the descriptor table. */ movl 4(%esp),%eax lgdt (%eax) /* Flush the prefetch q. */ jmp 1f nop 1: /* Reload "stale" selectors. */ movl $GSEL(GDATA_SEL, SEL_KPL),%eax movw %ax,%ds movw %ax,%es movw %ax,%ss movl $GSEL(GCPU_SEL, SEL_KPL),%eax movw %ax,%fs /* Reload code selector by doing intersegment return. */ popl %eax pushl $GSEL(GCODE_SEL, SEL_KPL) pushl %eax lret ENTRY(setjmp) movl 4(%esp),%eax movl %ebx,(%eax) # save ebx movl %esp,4(%eax) # save esp movl %ebp,8(%eax) # save ebp movl %esi,12(%eax) # save esi movl %edi,16(%eax) # save edi movl (%esp),%edx # get rta movl %edx,20(%eax) # save eip xorl %eax,%eax # return (0); ret ENTRY(longjmp) movl 4(%esp),%eax movl (%eax),%ebx # restore ebx movl 4(%eax),%esp # restore esp movl 8(%eax),%ebp # restore ebp movl 12(%eax),%esi # restore esi movl 16(%eax),%edi # restore edi movl 20(%eax),%edx # get rta movl %edx,(%esp) # put in return frame xorl %eax,%eax # return (1); incl %eax ret /*****************************************************************************/ #ifdef DIAGNOSTIC NENTRY(switch_error1) pushl %edi pushl $1f call _C_LABEL(panic) /* NOTREACHED */ 1: .asciz "cpu_switch1 %p" NENTRY(switch_error2) pushl %edi pushl $1f call _C_LABEL(panic) /* NOTREACHED */ 1: .asciz "cpu_switch2 %p" #endif /* DIAGNOSTIC */ /* * cpu_switchto(struct proc *old, struct proc *new) * Switch from the "old" proc to the "new" proc. If "old" is NULL, we * don't need to bother saving old context. */ ENTRY(cpu_switchto) pushl %ebx pushl %esi pushl %edi movl 16(%esp), %esi movl 20(%esp), %edi #ifdef DIAGNOSTIC xorl %eax, %eax cmpl %eax,P_WCHAN(%edi) # Waiting for something? jne _C_LABEL(switch_error1) # Yes; shouldn't be queued. cmpb $SRUN,P_STAT(%edi) # In run state? jne _C_LABEL(switch_error2) # No; shouldn't be queued. #endif /* DIAGNOSTIC */ /* If old process exited, don't bother. */ testl %esi,%esi jz switch_exited /* * Save old context. * * Registers: * %eax, %ecx - scratch * %esi - old process, then old pcb * %edi - new process */ pushl %esi call _C_LABEL(pmap_deactivate) addl $4,%esp movl P_ADDR(%esi),%esi /* Save stack pointers. */ movl %esp,PCB_ESP(%esi) movl %ebp,PCB_EBP(%esi) switch_exited: /* * Third phase: restore saved context. * * Registers: * %eax, %ecx, %edx - scratch * %esi - new pcb * %edi - new process */ /* No interrupts while loading new state. */ cli /* Record new process. */ movl CPUVAR(SELF), %ebx movl %edi, CPUVAR(CURPROC) movb $SONPROC, P_STAT(%edi) movl %ebx, P_CPU(%edi) movl P_ADDR(%edi),%esi /* Restore stack pointers. */ movl PCB_ESP(%esi),%esp movl PCB_EBP(%esi),%ebp /* * Activate the address space. We're curproc, so %cr3 will * be reloaded, but we're not yet curpcb, so the LDT won't * be reloaded, although the PCB copy of the selector will * be refreshed from the pmap. */ pushl %edi call _C_LABEL(pmap_activate) addl $4,%esp /* Load TSS info. */ movl CPUVAR(GDT),%eax movl P_MD_TSS_SEL(%edi),%edx /* Switch TSS. */ andl $~0x0200,4-SEL_KPL(%eax,%edx,1) ltr %dx #ifdef USER_LDT /* * Switch LDT. * * XXX * Always do this, because the LDT could have been swapped into a * different selector after a process exited. (See gdt_compact().) */ movl PCB_LDT_SEL(%esi),%edx lldt %dx #endif /* USER_LDT */ /* Restore cr0 (including FPU state). */ movl PCB_CR0(%esi),%ecx #ifdef MULTIPROCESSOR /* * If our floating point registers are on a different CPU, * clear CR0_TS so we'll trap rather than reuse bogus state. */ movl CPUVAR(SELF), %ebx cmpl PCB_FPCPU(%esi),%ebx jz 1f orl $CR0_TS,%ecx 1: #endif movl %ecx,%cr0 /* Record new pcb. */ SET_CURPCB(%esi) /* Interrupts are okay again. */ sti popl %edi popl %esi popl %ebx ret ENTRY(cpu_idle_enter) movl _C_LABEL(cpu_idle_enter_fcn),%eax cmpl $0,%eax je 1f jmpl *%eax 1: ret ENTRY(cpu_idle_cycle) movl _C_LABEL(cpu_idle_cycle_fcn),%eax cmpl $0,%eax je 1f call *%eax ret 1: sti hlt ret ENTRY(cpu_idle_leave) movl _C_LABEL(cpu_idle_leave_fcn),%eax cmpl $0,%eax je 1f jmpl *%eax 1: ret /* * savectx(struct pcb *pcb); * Update pcb, saving current processor state. */ ENTRY(savectx) movl 4(%esp),%edx # edx = p->p_addr /* Save stack pointers. */ movl %esp,PCB_ESP(%edx) movl %ebp,PCB_EBP(%edx) movl PCB_FLAGS(%edx),%ecx orl $PCB_SAVECTX,%ecx movl %ecx,PCB_FLAGS(%edx) ret /*****************************************************************************/ /* * Trap and fault vector routines * * On exit from the kernel to user mode, we always need to check for ASTs. In * addition, we need to do this atomically; otherwise an interrupt may occur * which causes an AST, but it won't get processed until the next kernel entry * (possibly the next clock tick). Thus, we disable interrupt before checking, * and only enable them again on the final `iret' or before calling the AST * handler. * * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose * control. The sti's give the standard losing behaviour for ddb and kgdb. */ #define IDTVEC(name) ALIGN_TEXT; .globl X/**/name; X/**/name: #define TRAP(a) pushl $(a) ; jmp _C_LABEL(alltraps) #define ZTRAP(a) pushl $0 ; TRAP(a) #define BPTTRAP(a) testb $(PSL_I>>8),13(%esp) ; jz 1f ; sti ; 1: ; \ TRAP(a) .text IDTVEC(div) ZTRAP(T_DIVIDE) IDTVEC(dbg) subl $4,%esp pushl %eax movl %dr6,%eax movl %eax,4(%esp) andb $~0xf,%al movl %eax,%dr6 popl %eax BPTTRAP(T_TRCTRAP) IDTVEC(nmi) ZTRAP(T_NMI) IDTVEC(bpt) pushl $0 BPTTRAP(T_BPTFLT) IDTVEC(ofl) ZTRAP(T_OFLOW) IDTVEC(bnd) ZTRAP(T_BOUND) IDTVEC(ill) ZTRAP(T_PRIVINFLT) IDTVEC(dna) #if NNPX > 0 pushl $0 # dummy error code pushl $T_DNA INTRENTRY #ifdef MULTIPROCESSOR pushl CPUVAR(SELF) #else pushl $_C_LABEL(cpu_info_primary) #endif call *_C_LABEL(npxdna_func) addl $4,%esp testl %eax,%eax jz calltrap INTRFASTEXIT #else ZTRAP(T_DNA) #endif IDTVEC(dble) TRAP(T_DOUBLEFLT) IDTVEC(fpusegm) ZTRAP(T_FPOPFLT) IDTVEC(tss) TRAP(T_TSSFLT) IDTVEC(missing) TRAP(T_SEGNPFLT) IDTVEC(stk) TRAP(T_STKFLT) IDTVEC(prot) TRAP(T_PROTFLT) IDTVEC(f00f_redirect) pushl $T_PAGEFLT INTRENTRY testb $PGEX_U,TF_ERR(%esp) jnz calltrap movl %cr2,%eax subl _C_LABEL(idt),%eax cmpl $(6*8),%eax jne calltrap movb $T_PRIVINFLT,TF_TRAPNO(%esp) jmp calltrap IDTVEC(page) TRAP(T_PAGEFLT) IDTVEC(rsvd) ZTRAP(T_RESERVED) IDTVEC(mchk) ZTRAP(T_MACHK) IDTVEC(simd) ZTRAP(T_XFTRAP) IDTVEC(intrspurious) /* * The Pentium Pro local APIC may erroneously call this vector for a * default IR7. Just ignore it. * * (The local APIC does this when CPL is raised while it's on the * way to delivering an interrupt.. presumably enough has been set * up that it's inconvenient to abort delivery completely..) */ iret IDTVEC(fpu) #if NNPX > 0 /* * Handle like an interrupt so that we can call npxintr to clear the * error. It would be better to handle npx interrupts as traps but * this is difficult for nested interrupts. */ pushl $0 # dummy error code pushl $T_ASTFLT INTRENTRY pushl CPL # if_ppl in intrframe pushl %esp # push address of intrframe incl _C_LABEL(uvmexp)+V_TRAP call _C_LABEL(npxintr) addl $8,%esp # pop address and if_ppl INTRFASTEXIT #else ZTRAP(T_ARITHTRAP) #endif IDTVEC(align) ZTRAP(T_ALIGNFLT) /* 18 - 31 reserved for future exp */ /* * If an error is detected during trap, syscall, or interrupt exit, trap() will * change %eip to point to one of these labels. We clean up the stack, if * necessary, and resume as if we were handling a general protection fault. * This will cause the process to get a SIGBUS. */ NENTRY(resume_iret) ZTRAP(T_PROTFLT) NENTRY(resume_pop_ds) pushl %es movl $GSEL(GDATA_SEL, SEL_KPL),%eax movw %ax,%es NENTRY(resume_pop_es) pushl %gs movl $GSEL(GDATA_SEL, SEL_KPL),%eax movw %ax,%gs NENTRY(resume_pop_gs) pushl %fs movl $GSEL(GCPU_SEL, SEL_KPL),%eax movw %ax,%fs NENTRY(resume_pop_fs) movl $T_PROTFLT,TF_TRAPNO(%esp) jmp calltrap NENTRY(alltraps) INTRENTRY calltrap: #ifdef DIAGNOSTIC movl CPL,%ebx #endif /* DIAGNOSTIC */ call _C_LABEL(trap) 2: /* Check for ASTs on exit to user mode. */ cli CHECK_ASTPENDING(%ecx) je 1f testb $SEL_RPL,TF_CS(%esp) #ifdef VM86 jnz 5f testl $PSL_VM,TF_EFLAGS(%esp) #endif jz 1f 5: CLEAR_ASTPENDING(%ecx) sti movl $T_ASTFLT,TF_TRAPNO(%esp) call _C_LABEL(trap) jmp 2b #ifndef DIAGNOSTIC 1: INTRFASTEXIT #else 1: cmpl CPL,%ebx jne 3f INTRFASTEXIT 3: sti pushl $4f call _C_LABEL(printf) addl $4,%esp #if defined(DDB) && 0 int $3 #endif /* DDB */ movl %ebx,CPL jmp 2b 4: .asciz "WARNING: SPL NOT LOWERED ON TRAP EXIT\n" #endif /* DIAGNOSTIC */ /* * Old call gate entry for syscall */ IDTVEC(osyscall) /* Set eflags in trap frame. */ pushfl popl 8(%esp) /* Turn off trace flag and nested task. */ pushfl andb $~((PSL_T|PSL_NT)>>8),1(%esp) popfl pushl $7 # size of instruction for restart jmp syscall1 IDTVEC(osyscall_end) /* * Trap gate entry for syscall */ IDTVEC(syscall) pushl $2 # size of instruction for restart syscall1: pushl $T_ASTFLT # trap # for doing ASTs INTRENTRY call _C_LABEL(syscall) 2: /* Check for ASTs on exit to user mode. */ cli CHECK_ASTPENDING(%ecx) je 1f /* Always returning to user mode here. */ CLEAR_ASTPENDING(%ecx) sti /* Pushed T_ASTFLT into tf_trapno on entry. */ call _C_LABEL(trap) jmp 2b 1: INTRFASTEXIT #include #include /* * bzero (void *b, size_t len) * write len zero bytes to the string b. */ ENTRY(bzero) pushl %edi movl 8(%esp),%edi movl 12(%esp),%edx cld /* set fill direction forward */ xorl %eax,%eax /* set fill data to 0 */ /* * if the string is too short, it's really not worth the overhead * of aligning to word boundaries, etc. So we jump to a plain * unaligned set. */ cmpl $16,%edx jb 7f movl %edi,%ecx /* compute misalignment */ negl %ecx andl $3,%ecx subl %ecx,%edx rep /* zero until word aligned */ stosb cmpl $CPUCLASS_486,_C_LABEL(cpu_class) jne 8f movl %edx,%ecx shrl $6,%ecx jz 8f andl $63,%edx 1: movl %eax,(%edi) movl %eax,4(%edi) movl %eax,8(%edi) movl %eax,12(%edi) movl %eax,16(%edi) movl %eax,20(%edi) movl %eax,24(%edi) movl %eax,28(%edi) movl %eax,32(%edi) movl %eax,36(%edi) movl %eax,40(%edi) movl %eax,44(%edi) movl %eax,48(%edi) movl %eax,52(%edi) movl %eax,56(%edi) movl %eax,60(%edi) addl $64,%edi decl %ecx jnz 1b 8: movl %edx,%ecx /* zero by words */ shrl $2,%ecx andl $3,%edx rep stosl 7: movl %edx,%ecx /* zero remainder bytes */ rep stosb popl %edi ret #if !defined(SMALL_KERNEL) ENTRY(sse2_pagezero) pushl %ebx movl 8(%esp),%ecx movl %ecx,%eax addl $4096,%eax xor %ebx,%ebx 1: movnti %ebx,(%ecx) addl $4,%ecx cmpl %ecx,%eax jne 1b sfence popl %ebx ret ENTRY(i686_pagezero) pushl %edi pushl %ebx movl 12(%esp), %edi movl $1024, %ecx cld ALIGN_TEXT 1: xorl %eax, %eax repe scasl jnz 2f popl %ebx popl %edi ret ALIGN_TEXT 2: incl %ecx subl $4, %edi movl %ecx, %edx cmpl $16, %ecx jge 3f movl %edi, %ebx andl $0x3f, %ebx shrl %ebx shrl %ebx movl $16, %ecx subl %ebx, %ecx 3: subl %ecx, %edx rep stosl movl %edx, %ecx testl %edx, %edx jnz 1b popl %ebx popl %edi ret #endif #if NACPI > 0 ENTRY(acpi_acquire_global_lock) movl 4(%esp), %ecx 1: movl (%ecx), %eax movl %eax, %edx andl $~1, %edx btsl $1, %edx adcl $0, %edx lock cmpxchgl %edx, (%ecx) jnz 1b andl $3, %edx cmpl $3, %edx sbb %eax, %eax ret ENTRY(acpi_release_global_lock) movl 4(%esp), %ecx 1: movl (%ecx), %eax movl %eax, %edx andl $~3, %edx lock cmpxchgl %edx, (%ecx) jnz 1b andl $1, %eax ret #endif #if NLAPIC > 0 #include #endif #include