linux arm irq (2)

2 interrupt handling

============================================================================================================

Author: Yangkai Wang
wang\_[email protected]
Coding in 2021/05/10
转载请注明author,出处.

linux version 3.4.39
s5p6818 soc


Cortex-A53 Octa core CPU
Interrupt Controller,GIC400

  • idle进程(start\_kernel)stack(svc)的设置
/* arch/arm/kernel/head-common.S */

...
/*
 * The following fragment of code is executed with the MMU on in MMU mode,
 * and uses absolute addresses; this is not position independent.
 *
 *  r0  = cp#15 control register
 *  r1  = machine ID
 *  r2  = atags/dtb pointer
 *  r9  = processor ID
 */
    __INIT
__mmap_switched:
    adr    r3, __mmap_switched_data

    ldmia    r3!, {r4, r5, r6, r7}
    cmp    r4, r5                @ Copy data segment if needed
1:    cmpne    r5, r6
    ldrne    fp, [r4], #4
    strne    fp, [r5], #4
    bne    1b

    mov    fp, #0                @ Clear BSS (and zero fp)
1:    cmp    r6, r7
    strcc    fp, [r6],#4
    bcc    1b

 ARM(    ldmia    r3, {r4, r5, r6, r7, sp})
 THUMB(    ldmia    r3, {r4, r5, r6, r7}    )
 THUMB(    ldr    sp, [r3, #16]        )
    str    r9, [r4]            @ Save processor ID
    str    r1, [r5]            @ Save machine type
    str    r2, [r6]            @ Save atags pointer
    bic    r4, r0, #CR_A            @ Clear 'A' bit
    stmia    r7, {r0, r4}            @ Save control register values
    b    start_kernel
ENDPROC(__mmap_switched)
...


    .align    2
    .type    __mmap_switched_data, %object
__mmap_switched_data:
    .long    __data_loc            @ r4
    .long    _sdata                @ r5
    .long    __bss_start            @ r6
    .long    _end                @ r7
    .long    processor_id            @ r4
    .long    __machine_arch_type        @ r5
    .long    __atags_pointer            @ r6
    .long    cr_alignment            @ r7
    .long    init_thread_union + THREAD_START_SP @ sp
    .size    __mmap_switched_data, . - __mmap_switched_data

\_\_mmap\_switched:
adr r3, \_\_mmap\_switched\_data
...
ARM( ldmia r3, {r4, r5, r6, r7, sp})
b start\_kernel

/* arch/arm/kernel/init_task.c */
...
/*
 * Initial thread structure.
 *
 * We need to make sure that this is 8192-byte aligned due to the
 * way process stacks are handled. This is done by making sure
 * the linker maps this in the .text segment right after head.S,
 * and making head.S ensure the proper alignment.
 *
 * The things we do for performance..
 */
union thread_union init_thread_union __init_task_data =
    { INIT_THREAD_INFO(init_task) };
...
/* include/linux/init_task.h */
...
/* Attach to the init_task data structure for proper alignment */
#define __init_task_data __attribute__((__section__(".data..init_task")))
...

其他SMP core,stack怎么设置的,以后再分析;

  • linux kernel arm exception stack init
  • sets up the CPU(startup core) stacks
void __init start_kernel(void)
    |
    setup_arch(&command_line);
        |
        setup_processor();
            |
            cpu_init();
/* arch/arm/kernel/setup.c */

...
struct stack {
    u32 irq[3];
    u32 abt[3];
    u32 und[3];
} ____cacheline_aligned;

static struct stack stacks[NR_CPUS];
...

/*
 * cpu_init - initialise one CPU.
 *
 * cpu_init sets up the per-CPU stacks.
 */
void cpu_init(void)
{
    unsigned int cpu = smp_processor_id();
    struct stack *stk = &stacks[cpu];

    if (cpu >= NR_CPUS) {
        printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
        BUG();
    }

    cpu_proc_init();

    /*
     * Define the placement constraint for the inline asm directive below.
     * In Thumb-2, msr with an immediate value is not allowed.
     */
#ifdef CONFIG_THUMB2_KERNEL
#define PLC    "r"
#else
#define PLC    "I"
#endif

    /*
     * setup stacks for re-entrant exception handlers
     */
    __asm__ (
    "msr    cpsr_c, %1\n\t"
    "add    r14, %0, %2\n\t"
    "mov    sp, r14\n\t"
    "msr    cpsr_c, %3\n\t"
    "add    r14, %0, %4\n\t"
    "mov    sp, r14\n\t"
    "msr    cpsr_c, %5\n\t"
    "add    r14, %0, %6\n\t"
    "mov    sp, r14\n\t"
    "msr    cpsr_c, %7"
        :
        : "r" (stk),
          PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
          "I" (offsetof(struct stack, irq[0])),
          PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
          "I" (offsetof(struct stack, abt[0])),
          PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
          "I" (offsetof(struct stack, und[0])),
          PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
        : "r14");
}

arm c代码中内嵌汇编,语法形式: asm(汇编语句: 输出操作数列表: 输入操作数列表: 破坏描述部分)
汇编语句,由汇编语句序列组成,语句之间使用 “;”、“\n”或“\n\t”分开;
指令中的操作数可以使用占位符引用C语言变量,操作数占位符最多10个,名称如下:%0,%1,%2,…,%9;
指令中使用占位符表示的操作数,总被视为long型(4 byte);

输入和输出操作数列表的格式是一样的,是由一个或者多个操作数组成,多个操作数采用逗号隔开,单个操作数的格式:
[C变量在汇编中的访问名称] "限制性字符“ (C传递进来的变量名称或立即数)

常用限制性字符:
I: 立即数
m: 内存地址(变量地址)
r: 寄存器(r0-r15,传参)

/* ARM ®  Architecture Reference Manual(ARM ® v7-A and ARM ® v7-R edition)
Application Level Programmers’ Model
Instruction Details*/

A8.6.102 MRS
A8.6.103 MSR (immediate)
A8.6.104 MSR (register)

Move to Special Register from ARM core register moves selected bits of a general-purpose register to the 
APSR.

Encoding T1 ARMv6T2, ARMv7
MSR<c> <spec_reg>,<Rn>
...
<spec_reg> Is one of:
• APSR_<bits>
• CPSR_<fields>.

ARM,只有 MSR instruction可以设置APSR processor状态寄存器(CPSR/SPSR)

PS:
MRS
Move to Register from Special Register moves the value from the APSR into a general-purpose register.
MRS ,

只有 MRS instruction可以读取APSR processor状态寄存器(CPSR/SPSR)

"msr    cpsr_c, %1\n\t"    /* 将(PSR_F_BIT | PSR_I_BIT | IRQ_MODE) 赋值给CPSR, 进入IRQ mode */
"add    r14, %0, %2\n\t"    /*  stk的地址值 + ((size_t) &((struct stack *)0)->irq[0]) 的值赋值给r14寄存器 */
"mov    sp, r14\n\t"    /* the value of r14 赋值给IRQ mode 的SP register */

依次设置IRQ,ABT,UND,模式的stack;  最后"msr    cpsr_c, %7", 切回SVC mode;
  • early\_trap\_init(vectors)
void __init start_kernel(void)
    |
    setup_arch(&command_line);
        |
        paging_init(mdesc);
            |
            map_lowmem();
                |
                /* Map all the lowmem memory banks. */
            |
            devicemaps_init(mdesc);
                |
                /*
                 * Allocate the vector page early.
                 */
                vectors = early_alloc(PAGE_SIZE);

                early_trap_init(vectors);
                ...

                /*
                 * Create a mapping for the machine vectors at the high-vectors
                 * location (0xffff0000).  If we aren't using high-vectors, also
                 * create a mapping at the low-vectors virtual address.
                 */
                map.pfn = __phys_to_pfn(virt_to_phys(vectors));
                map.virtual = 0xffff0000;
                map.length = PAGE_SIZE;
                map.type = MT_HIGH_VECTORS;
                create_mapping(&map, false);

                if (!vectors_high()) {
                    map.virtual = 0;
                    map.type = MT_LOW_VECTORS;
                    create_mapping(&map, false);
                }
                ...

/* arch/arm/kernel/traps.c */

...
void __init early_trap_init(void *vectors_base)
{
    unsigned long vectors = (unsigned long)vectors_base;
    extern char __stubs_start[], __stubs_end[];
    extern char __vectors_start[], __vectors_end[];
    extern char __kuser_helper_start[], __kuser_helper_end[];
    int kuser_sz = __kuser_helper_end - __kuser_helper_start;

    vectors_page = vectors_base;

    /*
     * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
     * into the vector page, mapped at 0xffff0000, and ensure these
     * are visible to the instruction stream.
     */
    memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
    memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
    memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);

    /*
     * Do processor specific fixups for the kuser helpers
     */
    kuser_get_tls_init(vectors);

    /*
     * Copy signal return handlers into the vector page, and
     * set sigreturn to be a pointer to these.
     */
    memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
           sigreturn_codes, sizeof(sigreturn_codes));
    memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
           syscall_restart_code, sizeof(syscall_restart_code));

    flush_icache_range(vectors, vectors + PAGE_SIZE);
    modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}
...

call early\_alloc(PAGE\_SIZE)
|
memblock\_alloc()
申请了一个page 4K大小的内存;

memcpy((void *)vectors, \_\_vectors\_start, \_\_vectors\_end - \_\_vectors\_start);
memcpy((void *)vectors + 0x200, \_\_stubs\_start, \_\_stubs\_end - \_\_stubs\_start);
memcpy((void *)vectors + 0x1000 - kuser\_sz, \_\_kuser\_helper\_start, kuser\_sz);

copy这三段到这个page;这样分布:

          • -

\_\_vectors\_start
...
\_\_vectors\_end
...

\_\_stubs\_start
...
\_\_stubs\_end
...

\_\_kuser\_helper\_start
...
\_\_kuser\_helper\_end
...

          • -

之后,将这个page 映射到虚拟地址0xffff0000;

reference:
/* ARM ®  Architecture Reference Manual(ARM ® v7-A and ARM ® v7-R edition)
Part B System Level Architecture
The System Level Programmers’ Model
B1.6 Exceptions
B1.6.1 Exception vectors and the exception base address */

If the Security Extensions are not implemented there is a single exception base address. This is controlled 
by the SCTLR.V bit:
V == 0  Exception base address = 0x00000000. This setting is referred to as normal vectors, or as low 
vectors.
V == 1  Exception base address = 0xFFFF0000. This setting is referred to as high vectors, or Hivecs.

linux ARM32 不配置为Exception base address = 0x00000000,因为0x00000000属于0G-3G,用户空间,就不去分割占用 用户空间了,让0-3G 连续地址空间都给应用程序用;

  • exception entry
/* ARM ®  Architecture Reference Manual(ARM ® v7-A and ARM ® v7-R edition)
Part B System Level Architecture
The System Level Programmers’ Model
B1.6 Exceptions */

B1.6.3 Exception entry
On taking an exception:
1. The value of the CPSR is saved in the SPSR for the exception mode that is handling the exception. 
2. The value of (PC + exception-dependent offset) is saved in the LR for the exception mode that is handling the exception, see Table B1-4.
3. The CPSR and PC are updated with information for the exception handler:
    • The CPSR is updated with new context information. This includes:
        — Setting CPSR.M to the processor mode in which the exception is to be handled. 
        — Disabling appropriate classes of interrupt, to prevent uncontrolled nesting of exception handlers. For more information, see Table B1-6 on page B1-36, Table B1-7 on page B1-37, and Table B1-8 on page B1-37.
        — Setting the instruction set state to the instruction set chosen for exception entry, see Instruction set state on exception entry on page B1-35.
        — Setting the endianness to the value chosen for exception entry, see CPSR.E bit value on exception entry on page B1-38.
        — Clearing the IT[7:0] bits to 0.
        For more information, see CPSR M field and A, I, and F mask bit values on exception entry on page B1-36.
    • The appropriate exception vector is loaded to the PC, see Exception vectors and the exception base address on page B1-30.
4. Execution continues from the address held in the PC.
...

B1.6.4 Exception return
...

  • exception vectors
/* arch/arm/kernel/entry-armv.S */

...
    .equ    stubs_offset, __vectors_start + 0x200 - __stubs_start

    .globl    __vectors_start
__vectors_start:
 ARM(    swi    SYS_ERROR0    )
 THUMB(    svc    #0        )
 THUMB(    nop            )
    W(b)    vector_und + stubs_offset
    W(ldr)    pc, .LCvswi + stubs_offset
    W(b)    vector_pabt + stubs_offset
    W(b)    vector_dabt + stubs_offset
    W(b)    vector_addrexcptn + stubs_offset
    W(b)    vector_irq + stubs_offset
    W(b)    vector_fiq + stubs_offset

    .globl    __vectors_end
__vectors_end:
...

interrupt request occur in svc/usr processor mode

  1. CPSR is saved in the SPSR for the IRQ exception mode.
  2. The value of PC is saved in the LR for the IRQ exception mode.
  3. The CPSR is updated
    — Setting CPSR.M to the IRQ exception mode.
    — Disabling IRQ interrupt.
    — Setting the instruction set state to the instruction set chosen for exception entry.
    — Setting the endianness to the value chosen for exception entry.
    — Clearing the IT[7:0] bits to 0.
    For more information, see CPSR M field and A, I, and F mask bit values on exception entry on page B1-36.
    (On exception entry, the CPSR.I bit is always set to 1, to disable IRQs)
    (On IRQ exception entry, CPSR.A:1, CPSR.A:Unchanged)

The appropriate exception vector is loaded to the PC.

  1. Execution continues from the address held in the PC.

W(b) vector\_irq + stubs\_offset

.equ stubs\_offset, \_\_vectors\_start + 0x200 - \_\_stubs\_start

/* arch/arm/kernel/entry-armv.S */

    .align    2
    @ handler addresses follow this label
1:
    .endm

    .globl    __stubs_start
__stubs_start:
/*
 * Interrupt dispatcher
 */
    vector_stub    irq, IRQ_MODE, 4

    .long    __irq_usr            @  0  (USR_26 / USR_32)
    .long    __irq_invalid            @  1  (FIQ_26 / FIQ_32)
    .long    __irq_invalid            @  2  (IRQ_26 / IRQ_32)
    .long    __irq_svc            @  3  (SVC_26 / SVC_32)
    .long    __irq_invalid            @  4
    .long    __irq_invalid            @  5
    .long    __irq_invalid            @  6
    .long    __irq_invalid            @  7
    .long    __irq_invalid            @  8
    .long    __irq_invalid            @  9
    .long    __irq_invalid            @  a
    .long    __irq_invalid            @  b
    .long    __irq_invalid            @  c
    .long    __irq_invalid            @  d
    .long    __irq_invalid            @  e
    .long    __irq_invalid            @  f

/*
 * Data abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
    vector_stub    dabt, ABT_MODE, 8

    .long    __dabt_usr            @  0  (USR_26 / USR_32)
    .long    __dabt_invalid            @  1  (FIQ_26 / FIQ_32)
    .long    __dabt_invalid            @  2  (IRQ_26 / IRQ_32)
    .long    __dabt_svc            @  3  (SVC_26 / SVC_32)
    .long    __dabt_invalid            @  4
    .long    __dabt_invalid            @  5
    .long    __dabt_invalid            @  6
    .long    __dabt_invalid            @  7
    .long    __dabt_invalid            @  8
    .long    __dabt_invalid            @  9
    .long    __dabt_invalid            @  a
    .long    __dabt_invalid            @  b
    .long    __dabt_invalid            @  c
    .long    __dabt_invalid            @  d
    .long    __dabt_invalid            @  e
    .long    __dabt_invalid            @  f

/*
 * Prefetch abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
    vector_stub    pabt, ABT_MODE, 4

    .long    __pabt_usr            @  0 (USR_26 / USR_32)
    .long    __pabt_invalid            @  1 (FIQ_26 / FIQ_32)
    .long    __pabt_invalid            @  2 (IRQ_26 / IRQ_32)
    .long    __pabt_svc            @  3 (SVC_26 / SVC_32)
    .long    __pabt_invalid            @  4
    .long    __pabt_invalid            @  5
    .long    __pabt_invalid            @  6
    .long    __pabt_invalid            @  7
    .long    __pabt_invalid            @  8
    .long    __pabt_invalid            @  9
    .long    __pabt_invalid            @  a
    .long    __pabt_invalid            @  b
    .long    __pabt_invalid            @  c
    .long    __pabt_invalid            @  d
    .long    __pabt_invalid            @  e
    .long    __pabt_invalid            @  f

/*
 * Undef instr entry dispatcher
 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 */
    vector_stub    und, UND_MODE

    .long    __und_usr            @  0 (USR_26 / USR_32)
    .long    __und_invalid            @  1 (FIQ_26 / FIQ_32)
    .long    __und_invalid            @  2 (IRQ_26 / IRQ_32)
    .long    __und_svc            @  3 (SVC_26 / SVC_32)
    .long    __und_invalid            @  4
    .long    __und_invalid            @  5
    .long    __und_invalid            @  6
    .long    __und_invalid            @  7
    .long    __und_invalid            @  8
    .long    __und_invalid            @  9
    .long    __und_invalid            @  a
    .long    __und_invalid            @  b
    .long    __und_invalid            @  c
    .long    __und_invalid            @  d
    .long    __und_invalid            @  e
    .long    __und_invalid            @  f

    .align    5

/*=============================================================================
 * Undefined FIQs
 *-----------------------------------------------------------------------------
 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
 * Basically to switch modes, we *HAVE* to clobber one register...  brain
 * damage alert!  I don't think that we can execute any code in here in any
 * other mode than FIQ...  Ok you can switch to another mode, but you can't
 * get out of that mode without clobbering one register.
 */
vector_fiq:
    subs    pc, lr, #4

/*=============================================================================
 * Address exception handler
 *-----------------------------------------------------------------------------
 * These aren't too critical.
 * (they're not supposed to happen, and won't happen in 32-bit data mode).
 */

vector_addrexcptn:
    b    vector_addrexcptn

/*
 * We group all the following data together to optimise
 * for CPUs with separate I & D caches.
 */
    .align    5

.LCvswi:
    .word    vector_swi

    .globl    __stubs_end
__stubs_end:

/* arch/arm/kernel/entry-armv.S */

/*
 * Vector stubs.
 *
 * This code is copied to 0xffff0200 so we can use branches in the
 * vectors, rather than ldr's.  Note that this code must not
 * exceed 0x300 bytes.
 *
 * Common stub entry macro:
 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 *
 * SP points to a minimal amount of processor-private memory, the address
 * of which is copied into r0 for the mode specific abort handler.
 */
    .macro    vector_stub, name, mode, correction=0
    .align    5

vector_\name:
    .if \correction
    sub    lr, lr, #\correction
    .endif

    @
    @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
    @ (parent CPSR)
    @
    stmia    sp, {r0, lr}        @ save r0, lr
    mrs    lr, spsr
    str    lr, [sp, #8]        @ save spsr

    @
    @ Prepare for SVC32 mode.  IRQs remain disabled.
    @
    mrs    r0, cpsr
    eor    r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
    msr    spsr_cxsf, r0

    @
    @ the branch table must immediately follow this code
    @
    and    lr, lr, #0x0f
 THUMB(    adr    r0, 1f            )
 THUMB(    ldr    lr, [r0, lr, lsl #2]    )
    mov    r0, sp
 ARM(    ldr    lr, [pc, lr, lsl #2]    )
    movs    pc, lr            @ branch to handler in SVC mode
ENDPROC(vector_\name)

vector\_stub irq, IRQ\_MODE, 4
展开,去除THUMB() 相关代码,

THUMB() 和 ARM() 预处理宏 用于根据目标指令集 有条件地编译 源代码;
if target instruction set is ARM,则THUMB()宏为no ops;
if target instruction set is THUMB,则THUMB()宏将扩展为其参数;

CONFIG\_THUMB2\_KERNEL这个宏是没定义的;

/* arch/arm/include/asm/unified.h */
...
#ifdef CONFIG_THUMB2_KERNEL

#if __GNUC__ < 4
#error Thumb-2 kernel requires gcc >= 4
#endif

/* The CPSR bit describing the instruction set (Thumb) */
#define PSR_ISETSTATE    PSR_T_BIT

#define ARM(x...)
#define THUMB(x...)    x
#ifdef __ASSEMBLY__
#define W(instr)    instr.w
#define BSYM(sym)    sym + 1
#endif

#else    /* !CONFIG_THUMB2_KERNEL */

/* The CPSR bit describing the instruction set (ARM) */
#define PSR_ISETSTATE    0

#define ARM(x...)    x
#define THUMB(x...)
#ifdef __ASSEMBLY__
#define W(instr)    instr
#define BSYM(sym)    sym
#endif

#endif    /* CONFIG_THUMB2_KERNEL */
...


有:

    .align    2
    @ handler addresses follow this label
1:
    .endm

    .globl    __stubs_start
__stubs_start:
/*
 * Interrupt dispatcher
 */
    /*vector_stub    irq, IRQ_MODE, 4*/

/*    .macro    vector_stub, name, mode, correction=0 */
    .align    5

vector_\name:
    .if \correction
    sub    lr, lr, #\correction
    .endif
/*  进入IRQ中断,W(b)    vector_irq + stubs_offset;跳转到sub    lr, lr, #\correction;
这里,IRQ mode,当前的context:
r0 到 r12,没操作,还是中断前的context;
lr,irq mode的lr, save被中断那刻的PC值;
sp,irq mode, save irq mode栈的地址;
cpsr, irq模式
spsr, save中断前的CPSR(svc/usr)

correction的值是4,
sub    lr, lr, #\correction; /*lr = lr - 4;*/

ARM 架构执行一条instruction的pipeline至少包含流程:取值,译码,执行,(访存),(回写);
PC寄存器保存的是取指PC值, 被中断时执行的指令的地址是:PC - 8; 以后中断处理完成,需要接着执行被中断的下一条指令的地址是PC - 4; 故lr 的值 减去 4;  进入不同异常模式correction 值不同;
*/

    @
    @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
    @ (parent CPSR)
    @
    stmia    sp, {r0, lr}        @ save r0, lr
    mrs    lr, spsr
    str    lr, [sp, #8]        @ save spsr

/* stmia sp, {r0, lr} ;将 r0 lr的数据存储到 sp(irq) 指向的地址上, sp 后没有带!,SP值不更新;
因为后面r0 会用到,故需要先保存到栈;这个栈是IRQ栈,只有12个字节大小;
lr 保存的是中断跳转前一刻的,pc(svc/usr) - 4值;
mrs    lr, spsr; spsr(irq)保存中断前svc/usr模式的cpsr;  spsr的值赋给lr,  
str    lr, [sp, #8]; sp = sp + 8;  lr的值保存到sp 指向的地址;
IRQ的栈,依次入栈了:r0 lr(svc/usr pc) SPSR(svc/usr cpsr);
*/

    @
    @ Prepare for SVC32 mode.  IRQs remain disabled.
    @
    mrs    r0, cpsr
    eor    r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
    msr    spsr_cxsf, r0

/* mrs    r0, cpsr; cpsr 的值 赋值给 r0;
eor    r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE);  eor,异或操作指令; r0 = r0 eor 
mode 的值是IRQ_MODE,/* arm/arm/include/asm/ptrace.h*/
#define IRQ_MODE    0x00000012
#define SVC_MODE        0x00000013

eor r0, r0, #(0x12 ^ 0x13 | 0x0)
eor r0 , r0, #(0x01)
r0(bit[0:4]:10010) eor 0x01; -> r0(bit[0:4]:10011), CSSR M[0:4]:svc mode;

msr    spsr_cxsf, r0;  parts of r0 的值save to SPSR_irq; 其中SPSR_irq 中的M[0:4],改为SVC mode
(msr here is SPSR_<fields>, where <fields> "Is a sequence of one or more of" c, x, s, f, which represent bits 7:0, 15:8, 23:16 and 31:24 respectively)

*/

    @
    @ the branch table must immediately follow this code
    @
    and    lr, lr, #0x0f
    mov    r0, sp
 ARM(    ldr    lr, [pc, lr, lsl #2]    )
    movs    pc, lr            @ branch to handler in SVC mode
/*ENDPROC(vector_\name)*/

    .long    __irq_usr            @  0  (USR_26 / USR_32)
    .long    __irq_invalid            @  1  (FIQ_26 / FIQ_32)
    .long    __irq_invalid            @  2  (IRQ_26 / IRQ_32)
    .long    __irq_svc            @  3  (SVC_26 / SVC_32)
    .long    __irq_invalid            @  4
    .long    __irq_invalid            @  5
    .long    __irq_invalid            @  6
    .long    __irq_invalid            @  7
    .long    __irq_invalid            @  8
    .long    __irq_invalid            @  9
    .long    __irq_invalid            @  a
    .long    __irq_invalid            @  b
    .long    __irq_invalid            @  c
    .long    __irq_invalid            @  d
    .long    __irq_invalid            @  e
    .long    __irq_invalid            @  f

/* and    lr, lr, #0x0f; lr &= 0x0f; 进入中断前cpsr的值&0x0f,以确认是从svc 模式还是从usr模式进入中断的;
reference:B1.3.1 ARM processor modes
processor mode, mode encoding,
User            10000
FIQ             10001
IRQ             10010
Supervisor      10011
Monitor         10110
Abort           10111
Undefined       11011
System          11111

lr & 0x0f 值为0,从User模式进入中断; 为3,从svc模式进入中断;
mov    r0, sp;  sp(IRQ)的值赋值给r0;
ARM(    ldr    lr, [pc, lr, lsl #2]    );  lr = [pc + (lr << 2)]; lsl, #2, 左移2bit;相当于与剩以4;
如果lr 值等于0, lr 的值为pc的值;
如果lr 值等于3, lr 的值为pc + 12;

ARM 架构执行一条instruction的pipeline至少包含流程:取值,译码,执行,(访存),(回写);
ARM core pc寄存器的值,存的是取指的地址;当执行到ARM(    ldr    lr, [pc, lr, lsl #2]    ),这条指令时,pc 存的当前指令 地址值+8;

故:
如果lr 值等于0, lr 的值为pc的值;即:__irq_usr
如果lr 值等于3, lr 的值为pc + 12;即:__irq_svc

到这里IRQ mode, 当前的context:
r0 保存 sp(IRQ) 的值;IRQ的栈,保存中断跳转前一刻的r0, pc, cpsr;
r1 到 r12,进入中断后没操作,还是中断前的context;
sp指向 sp的栈;
cpsr, irq模式;
spsr, irq模式; CPSR_irq的cxsf,save to SPSR_irq,且其中的M[0:4],为SVC mode

movs    pc, lr;lr 的值赋值给pc;
跳转到
__irq_usr or __irq_svc

movs, 
s, If present, specifies that the instruction updates the flags. Otherwise, the instruction does not update the flags.
执行MOVS pc, lr时,SPSR_irq会copy(move) 到 CPSR_svc;也就是会切换到svc mode;

到这里切换到SVC mode, 当前的context:
r0 保存 sp(IRQ) 的值;sp_irq栈,保存中断跳转前一刻的r0, pc, cpsr;
r1 到 r12,进入中断后没操作,还是中断前的context;
sp指向 svc的栈;
cpsr, svc模式;
spsr, svc模式;

PS:
user mode context:       svn mode context:        irq mode context: 
R0_usr                   R0_usr                   R0_usr
R1_usr                   R1_usr                   R1_usr
R2_usr                   R2_usr                   R2_usr
R3_usr                   R3_usr                   R3_usr
R4_usr                   R4_usr                   R4_usr
R5_usr                   R5_usr                   R5_usr
R6_usr                   R6_usr                   R6_usr
R7_usr                   R7_usr                   R7_usr
R8_usr                   R8_usr                   R8_usr
R9_usr                   R9_usr                   R9_usr
R10_usr                  R10_usr                  R10_usr
R11_usr                  R11_usr                  R11_usr
R12_usr                  R12_usr                  R12_usr
SP_usr                   SP_svc                   SP_irq
LR_usr                   LR_svc                   LR_irq
PC                       PC                       PC
CPSR                     CPSR                     CPSR
                         SPSR_svc                 SPSR_irq
*/


/*
 * Data abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
    vector_stub    dabt, ABT_MODE, 8

    .long    __dabt_usr            @  0  (USR_26 / USR_32)
    .long    __dabt_invalid            @  1  (FIQ_26 / FIQ_32)
    .long    __dabt_invalid            @  2  (IRQ_26 / IRQ_32)
    .long    __dabt_svc            @  3  (SVC_26 / SVC_32)
    .long    __dabt_invalid            @  4
    .long    __dabt_invalid            @  5
    .long    __dabt_invalid            @  6
    .long    __dabt_invalid            @  7
    .long    __dabt_invalid            @  8
    .long    __dabt_invalid            @  9
    .long    __dabt_invalid            @  a
    .long    __dabt_invalid            @  b
    .long    __dabt_invalid            @  c
    .long    __dabt_invalid            @  d
    .long    __dabt_invalid            @  e
    .long    __dabt_invalid            @  f
...

  • \_\_irq\_usr
/* arch/arm/kernel/entry-armv.S */

    .align    5
__irq_usr:
    usr_entry
    kuser_cmpxchg_check
    irq_handler
    get_thread_info tsk
    mov    why, #0
    b    ret_to_user_from_irq
 UNWIND(.fnend        )
ENDPROC(__irq_usr)

usr\_entry宏,去掉THUMB(),code:

/* arch/arm/kernel/entry-armv.S */

/*
 * User mode handlers
 *
 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
 */

#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif

    .macro    usr_entry
 UNWIND(.fnstart    )
 UNWIND(.cantunwind    )    @ don't unwind the user space
    sub    sp, sp, #S_FRAME_SIZE
 ARM(    stmib    sp, {r1 - r12}    )

/*
#define S_FRAME_SIZE 72 /* sizeof(struct pt_regs) /* include/generated/asm-offsets.h */
这里是在svc mode;
sp = sp - 72; ARM 的栈是高地址往低地址长; 72 / 4 = 18,减去72,先腾出72个byte的栈空间;
为啥是18 * sizeof(unsigned long);这是上下文的大小;

arch/arm/include/asm/ptrace.h:
/*
 * This struct defines the way the registers are stored on the
 * stack during a system call.  Note that sizeof(struct pt_regs)
 * has to be a multiple of 8.
 */
#ifndef __KERNEL__
struct pt_regs {
    long uregs[18];
};
#else /* __KERNEL__ */
struct pt_regs {
    unsigned long uregs[18];
};
#endif /* __KERNEL__ */

#define ARM_cpsr    uregs[16]
#define ARM_pc        uregs[15]
#define ARM_lr        uregs[14]
#define ARM_sp        uregs[13]
#define ARM_ip        uregs[12]
#define ARM_fp        uregs[11]
#define ARM_r10        uregs[10]
#define ARM_r9        uregs[9]
#define ARM_r8        uregs[8]
#define ARM_r7        uregs[7]
#define ARM_r6        uregs[6]
#define ARM_r5        uregs[5]
#define ARM_r4        uregs[4]
#define ARM_r3        uregs[3]
#define ARM_r2        uregs[2]
#define ARM_r1        uregs[1]
#define ARM_r0        uregs[0]
#define ARM_ORIG_r0    uregs[17]


ARM(    stmib    sp, {r1 - r12}    )
STMIB  sp!,{R1-r12}  ;将 r1~r12 的数据保存到内存中,sp指针在保存第一个值之前增加,增长方向为向上增长。 
这里sp后面没有!,sp 指向的地址没变;

low address
|_    sp save地址值
|_r1
|_r2
|_r3
|_r4
|_r5
|_r6
|_r7
|_r8
|_r9
|_r10
|_r11
|_r12
|_
|_
|_
|_
|_
high address

*/

    ldmia    r0, {r3 - r5}
    add    r0, sp, #S_PC        @ here for interlock avoidance
    mov    r6, #-1            @  ""  ""     ""        ""

    str    r3, [sp]        @ save the "real" r0 copied
                    @ from the exception stack

/* r0 保存IRQ mode栈的地址;
ldmia    r0, {r3 - r5}
ldmia   r0!,{r3-r5}; 加载 r0 指向的地址上的多字数据,保存到 r3~r9 中,r0 值更新,没有!,r0不更新;
这样r3到r5,依次保存: 中断跳转前一刻的r0, pc, cpsr;

dd    r0, sp, #S_PC; r0 = sp + #S_PC; 
include/generated/asm-offsets.h:
#define S_PC 60 /* offsetof(struct pt_regs, ARM_pc)     @ */
60 / 4 = 15

mov    r6, #-1

str    r3, [sp]

到这里svc stack:
low address
|_r0    sp save的地址值
|_r1
|_r2
|_r3
|_r4
|_r5
|_r6
|_r7
|_r8
|_r9
|_r10
|_r11
|_r12
|_
|_
|_    r0 save的地址值
|_
|_
high address

*/

    @
    @ We are now ready to fill in the remaining blanks on the stack:
    @
    @  r4 - lr_<exception>, already fixed up for correct return/restart
    @  r5 - spsr_<exception>
    @  r6 - orig_r0 (see pt_regs definition in ptrace.h)
    @
    @ Also, separately save sp_usr and lr_usr
    @
    stmia    r0, {r4 - r6}
 ARM(    stmdb    r0, {sp, lr}^            )

/*stmia    r0, {r4 - r6}
r4,r5为  中断跳转前一刻的pc(lr_irq), cpsr(spsr_irq)值;  r6的值为-1;
将 r4~r6 的数据存储到 r0 指向的地址上,增长方向为向上增长,r0 不更新;

low address
|_r0    sp save的地址值
|_r1
|_r2
|_r3
|_r4
|_r5
|_r6
|_r7
|_r8
|_r9
|_r10
|_r11
|_r12
|_
|_
|_pc    r0 save的地址值
|_cpsr
|_-1
high address

 ARM(    stmdb    r0, {sp, lr}^            )
将 sp,lr 的数据保存到内存中,增长方向为向下增长, ^表示访问usr mode的寄存器;

这时有:
low address
|_r0    svc sp save的地址值
|_r1
|_r2
|_r3
|_r4
|_r5
|_r6
|_r7
|_r8
|_r9
|_r10
|_r11
|_r12
|_lr_usr
|_sp_usr
|_pc    r0 save的地址值
|_cpsr
|_-1
high address

到这里中断发生前一刻的context都保存到svc stack中了;
中断程序处理完成后,restore这个上下文,就接着执行中断被打断后的下一条指令;

mov r6 , #-1;  为啥要orig_r0 ?

*/

    @
    @ Enable the alignment trap while in kernel mode
    @
    alignment_trap r0

    @
    @ Clear FP to mark the first stack frame
    @
    zero_fp

#ifdef CONFIG_IRQSOFF_TRACER
    bl    trace_hardirqs_off
#endif
    .endm

  • irq\_handler:
/* arch/arm/kernel/entry-armv.S */

/*
 * Interrupt handling.
 */
    .macro    irq_handler
#ifdef CONFIG_MULTI_IRQ_HANDLER
    ldr    r1, =handle_arch_irq
    mov    r0, sp
    adr    lr, BSYM(9997f)
    ldr    pc, [r1]
#else
    arch_irq_handler_default
#endif
9997:
    .endm

/*
./include/generated/autoconf.h:813:#define CONFIG_MULTI_IRQ_HANDLER 1

ldr    r1, =handle_arch_irq
mov    r0, sp; /* sp_svc 地址值save to r0, 传参 */
adr    lr, BSYM(9997f); /* adr 伪指令,将BSYM(9997f) 地址值放到lr,  f应该是front的含义 */
./arch/arm/include/asm/unified.h:52:#define BSYM(sym)    sym
irq_handler,是个宏,  这里将lable 9997f后的指令的地址值,给到lr;  
为从handle_arch_irq回来做准备;

ldr    pc, [r1]; /* 跳转到handle_arch_irq()  */

*/
  • IRQ exit

Note:

/* ./arch/arm/kernel/entry-header.S */

/*
 * These are the registers used in the syscall handler, and allow us to
 * have in theory up to 7 arguments to a function - r0 to r6.
 *
 * r7 is reserved for the system call number for thumb mode.
 *
 * Note that tbl == why is intentional.
 *
 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
 */
scno    .req    r7              @ syscall number
tbl     .req    r8              @ syscall table pointer
why     .req    r8              @ Linux syscall (!= 0)
tsk     .req    r9              @ current thread_info

get\_thread\_info tsk:

/* ./arch/arm/kernel/entry-header.S */

    .macro    get_thread_info, rd
    mov    \rd, sp, lsr #13
    mov    \rd, \rd, lsl #13
    .endm

/*
SP_svc 保存的值,右移动13 bit,再左移13bit; 13bit,8K;
获取当前进程的thread_info的地址值,赋值给rd 寄存器;
*/

ret\_to\_user\_from\_irq:

/* arch/arm/kernel/entry-common.S */

/*
 * "slow" syscall return path.  "why" tells us if this was a real syscall.
 */
ENTRY(ret_to_user)
ret_slow_syscall:
    disable_irq                @ disable interrupts
ENTRY(ret_to_user_from_irq)
    ldr    r1, [tsk, #TI_FLAGS]
    tst    r1, #_TIF_WORK_MASK
    bne    work_pending
no_work_pending:
#if defined(CONFIG_IRQSOFF_TRACER)
    asm_trace_hardirqs_on
#endif
    /* perform architecture specific actions before user return */
    arch_ret_to_user r1, lr

    restore_user_regs fast = 0, offset = 0
ENDPROC(ret_to_user_from_irq)
ENDPROC(ret_to_user)

/*
在call ret_to_user_from_irq,前有:
__irq_usr:
    usr_entry
    kuser_cmpxchg_check
    irq_handler
    get_thread_info tsk
    mov    why, #0
    b    ret_to_user_from_irq

thread_info 地址放到r9(tsk);
r8(why) 赋值为0;
b ret_to_user_from_irq;
*/

#if 0
/* arch/arm/include/asm/thread_info.h */

/*
 * low level task data that entry.S needs immediate access to.
 * __switch_to() assumes cpu_context follows immediately after cpu_domain.
 */
struct thread_info {
    unsigned long        flags;        /* low level flags */
    int            preempt_count;    /* 0 => preemptable, <0 => bug */
    mm_segment_t        addr_limit;    /* address limit */
    struct task_struct    *task;        /* main task structure */
    struct exec_domain    *exec_domain;    /* execution domain */
    __u32            cpu;        /* cpu */
    __u32            cpu_domain;    /* cpu domain */
    struct cpu_context_save    cpu_context;    /* cpu context */
    __u32            syscall;    /* syscall number */
    __u8            used_cp[16];    /* thread used copro */
    unsigned long        tp_value;
    struct crunch_state    crunchstate;
    union fp_state        fpstate __attribute__((aligned(8)));
    union vfp_state        vfpstate;
#ifdef CONFIG_ARM_THUMBEE
    unsigned long        thumbee_state;    /* ThumbEE Handler Base register */
#endif
    struct restart_block    restart_block;
};
#endif

/*
arch/arm/kernel/asm-offsets.c:51:  DEFINE(TI_FLAGS,        offsetof(struct thread_info, flags));
arch/arm//include/asm/thread_info.h:174:#define _TIF_WORK_MASK        0x000000ff
*/

#if 0
/* arch/arm/include/asm/thread_info.h */

/*
 * We use bit 30 of the preempt_count to indicate that kernel
 * preemption is occurring.  See <asm/hardirq.h>.
 */
#define PREEMPT_ACTIVE    0x40000000

/*
 * thread information flags:
 *  TIF_SYSCALL_TRACE    - syscall trace active
 *  TIF_SYSCAL_AUDIT    - syscall auditing active
 *  TIF_SIGPENDING    - signal pending
 *  TIF_NEED_RESCHED    - rescheduling necessary
 *  TIF_NOTIFY_RESUME    - callback before returning to user
 *  TIF_USEDFPU        - FPU was used by this task this quantum (SMP)
 *  TIF_POLLING_NRFLAG    - true if poll_idle() is polling TIF_NEED_RESCHED
 */
#define TIF_SIGPENDING        0
#define TIF_NEED_RESCHED    1
#define TIF_NOTIFY_RESUME    2    /* callback before returning to user */
#define TIF_SYSCALL_TRACE    8
#define TIF_SYSCALL_AUDIT    9
#define TIF_POLLING_NRFLAG    16
#define TIF_USING_IWMMXT    17
#define TIF_MEMDIE        18    /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK    20
#define TIF_SECCOMP        21
#define TIF_SWITCH_MM        22    /* deferred switch_mm */

#define _TIF_SIGPENDING        (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED    (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME    (1 << TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACE    (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT    (1 << TIF_SYSCALL_AUDIT)
#define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
#define _TIF_USING_IWMMXT    (1 << TIF_USING_IWMMXT)
#define _TIF_RESTORE_SIGMASK    (1 << TIF_RESTORE_SIGMASK)
#define _TIF_SECCOMP        (1 << TIF_SECCOMP)


/* Checks for any syscall work in entry-common.S*/
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)


/*
 * Change these and you break ASM code in entry-common.S
 */
#define _TIF_WORK_MASK        0x000000ff

#endif

ldr    r1, [tsk, #TI_FLAGS]
tst    r1, #_TIF_WORK_MASK
bne    work_pending

判断thread_info->flags & 0xff,是否不为0; 
ne  标志Z:0  不相等
eq  标志Z:1  相等 

不为0,b work_pending; 

这里判断当前task是否有signal pending,resched等标志,有的话进入work_pending,进行相关处理;
这里可以看出,内核对signal的处理是在,异常发生后,从异常返回用户态 前(内核)处理的;  

arch\_ret\_to\_user r1, lr:

/* arch/arm/kernel/entry-common.S */

#ifdef CONFIG_NEED_RET_TO_USER
#include <mach/entry-macro.S>
#else
    .macro  arch_ret_to_user, tmp1, tmp2
    .endm
#endif

/* CONFIG_NEED_RET_TO_USER 这个宏是没有定义的 */

restore\_user\_regs fast = 0, offset = 0

/* arch/arm/kernel/entry-header.S */

    .macro    restore_user_regs, fast = 0, offset = 0
    ldr    r1, [sp, #\offset + S_PSR]    @ get calling cpsr
    ldr    lr, [sp, #\offset + S_PC]!    @ get pc
    msr    spsr_cxsf, r1            @ save in spsr_svc

    .if    \fast
    ldmdb    sp, {r1 - lr}^            @ get calling r1 - lr
    .else
    ldmdb    sp, {r0 - lr}^            @ get calling r0 - lr
    .endif
    mov    r0, r0                @ ARMv5T and earlier require a nop
                        @ after ldm {}^
    add    sp, sp, #S_FRAME_SIZE - S_PC
    movs    pc, lr                @ return & move spsr_svc into cpsr
    .endm

/*
./kernel/asm-offsets.c:92:  DEFINE(S_PSR,            offsetof(struct pt_regs, ARM_cpsr));
./kernel/asm-offsets.c:91:  DEFINE(S_PC,            offsetof(struct pt_regs, ARM_pc));

ldr    r1, [sp, #\offset + S_PSR]    @ get calling cpsr
ldr    lr, [sp, #\offset + S_PC]!    @ get pc
msr    spsr_cxsf, r1            @ save in spsr_svc

如注释;
这里有个地方要注意,妈呀,看了好久才发现:
ldr    lr, [sp, #\offset + S_PC]!    @ get pc;
这里有个!;!用来控制基址变址寻址的最终新地址是否进行回写操作;
这里sp save的值更新为sp : sp + S_PS;


svc stack:
low address
|_r0    svc sp save的地址值
|_r1
|_r2
|_r3
|_r4
|_r5
|_r6
|_r7
|_r8
|_r9
|_r10
|_r11
|_r12
|_lr_usr
|_sp_usr
|_pc
|_cpsr
|_-1
high address

ldmdb    sp, {r0 - lr}^            @ get calling r0 - lr
将SP_svc指向的内存地址,数据保存到{r0 - lr}中,增长方向为向上增长, ^表示访问usr mode的寄存器; sp 后面没有!;

add    sp, sp, #S_FRAME_SIZE - S_PC;  SP_svc save的地址值 回到中断发生前的位置

movs    pc, lr                @ return & move spsr_svc into cpsr

*/

  • \_\_irq\_svc
/* arch/arm/kernel/entry-armv.S */

    .align    5
__irq_svc:
    svc_entry
    irq_handler

#ifdef CONFIG_PREEMPT
    get_thread_info tsk
    ldr    r8, [tsk, #TI_PREEMPT]        @ get preempt count
    ldr    r0, [tsk, #TI_FLAGS]        @ get flags
    teq    r8, #0                @ if preempt count != 0
    movne    r0, #0                @ force flags to 0
    tst    r0, #_TIF_NEED_RESCHED
    blne    svc_preempt
#endif

#ifdef CONFIG_TRACE_IRQFLAGS
    @ The parent context IRQs must have been enabled to get here in
    @ the first place, so there's no point checking the PSR I bit.
    bl    trace_hardirqs_on
#endif
    svc_exit r5                @ return from exception
 UNWIND(.fnend        )
ENDPROC(__irq_svc)

  • handle\_arch\_irq()
void __init start_kernel(void)
    |
    /* arch/arm/kernel/setup.c */
    void __init setup_arch(char **cmdline_p)
        |
        ...

        /* arm/kernel/entry-armv.S irq_handler, call handle_arch_irq() */
        #ifdef CONFIG_MULTI_IRQ_HANDLER
            handle_arch_irq = mdesc->handle_irq;  /* call gic_handle_irq() */
        #endif
        ...
  • void \_\_exception\_irq\_entry gic\_handle\_irq(struct pt\_regs *regs)
/* arch/arm/mach-s5p6818/gic.c */

asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
    u32 irqstat, irqnr;
    struct gic_chip_data *gic = &gic_data[0];
    void __iomem *cpu_base = gic_data_cpu_base(gic);

    /*printk("~~~ %s() ARM_cpsr:0x%08x\n", __func__, regs->ARM_cpsr);*/

    do {
        irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
        //printk("~~~ %s() irq ack reg:0x%08x\n", __func__, irqstat);
        irqnr = irqstat & ~0x1c00;

        if (likely(irqnr > 15 && irqnr < 1021)) {
            if (irqnr >= IRQ_PHY_GPIOA)
                printk("~~~ %s() hwirq:%d\n", __func__, irqnr);
            irqnr = irq_find_mapping(gic->domain, irqnr);
            if (irqnr >= IRQ_PHY_GPIOA)
                printk("~~~ %s() irqnr:%d\n", __func__, irqnr);
            handle_IRQ(irqnr, regs);
            continue;
        }
        if (irqnr < 16) {
            writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
#ifdef CONFIG_SMP
            handle_IPI(irqnr, regs);
#endif
            continue;
        }
        break;
    } while (1);
}

irqstat = readl\_relaxed(cpu\_base + GIC\_CPU\_INTACK);

define GIC\_CPU\_INTACK 0x0c

=================================================

ARM ® Generic Interrupt Controller Architecture version 2.0,
4.4.4 Interrupt Acknowledge Register, GICC\_IAR;
Purpose:The processor reads this register to obtain the interrupt ID of the signaled interrupt. This read acts as an acknowledge for the interrupt.

Interrupt Acknowledge Register, GICC\_IAR,[9:0], The interrupt ID.

        if (irqnr < 16) {
            writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
            handle_IPI(irqnr, regs);
            continue;
        }

define GIC\_CPU\_EOI 0x10

==============================================

4.4.5 End of Interrupt Register, GICC\_EOIR
Purpose A processor writes to this register to inform the CPU interface either:
• that it has completed the processing of the specified interrupt
• in a GICv2 implementation, when the appropriate GICC\_CTLR.EOImode bit is set to 1, to indicate that the interface should perform priority drop for the specified interrupt.

[9:0] EOIINTID The Interrupt ID value from the corresponding GICC\_IAR access.

读取GIC cpu interface, Interrupt Acknowledge Register,得到Interrupt ID;
如果id值小于16,是SGI中断; 用于core 之间通信;
把这个ID值写到GIC cpu interface,End of Interrupt Register,that it has completed the processing of the specified interrupt;
之后call handle\_IPI(irqnr, regs); and continue,直到所有中断处理完成;

        if (likely(irqnr > 15 && irqnr < 1021)) {
            if (irqnr >= IRQ_PHY_GPIOA)
                printk("~~~ %s() hwirq:%d\n", __func__, irqnr);
            irqnr = irq_find_mapping(gic->domain, irqnr);
            if (irqnr >= IRQ_PHY_GPIOA)
                printk("~~~ %s() irqnr:%d\n", __func__, irqnr);
            handle_IRQ(irqnr, regs);
            continue;
        }

如果id值大于15,是Private Peripheral Interrupt(PPI) 和 Share Peripheral Interrupt(SPI)中断;
irqnr = irq\_find\_mapping(gic->domain, irqnr);
输入gic->domain, GIC 硬件 interrupt number, 以确定是哪个struct irq\_desc irq\_desc;
得到对应中断,全局的struct irq\_desc irq\_desc[NR\_IRQS]的下标; irqnr;

/* kernel/irq/irqdomain.c */

/**
 * irq_find_mapping() - Find a linux irq from an hw irq number.
 * @domain: domain owning this hardware interrupt
 * @hwirq: hardware irq number in that domain space
 *
 * This is a slow path, for use by generic code. It's expected that an
 * irq controller implementation directly calls the appropriate low level
 * mapping function.
 */
unsigned int irq_find_mapping(struct irq_domain *domain,
                  irq_hw_number_t hwirq)
{
    unsigned int i;
    unsigned int hint = hwirq % nr_irqs;
    unsigned int irq;

    /* Look for default domain if nececssary */
    if (domain == NULL)
        domain = irq_default_domain;
    if (domain == NULL)
        return 0;

    /* legacy -> bail early */
    if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) {
        irq = irq_domain_legacy_revmap(domain, hwirq);
        if (irq >= IRQ_PHY_GPIOA)
            printk("~~~ %s() hwirq:%lu, irq:%u\n", __func__, \
                    hwirq, irq);
        return irq;
    }

    /* Slow path does a linear search of the map */
    if (hint == 0)
        hint = 1;
    i = hint;
    do {
        struct irq_data *data = irq_get_irq_data(i);
        if (data && (data->domain == domain) && (data->hwirq == hwirq))
            return i;
        i++;
        if (i >= nr_irqs)
            i = 1;
    } while(i != hint);
    return 0;
}
EXPORT_SYMBOL_GPL(irq_find_mapping);
/* kernel/irq/irqdomain.c */

static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
                         irq_hw_number_t hwirq)
{
    irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq;
    int size = domain->revmap_data.legacy.size;

    if (hwirq >= IRQ_PHY_GPIOA)
        printk("~~~ %s() hwirq:%lu\n", __func__, hwirq);

    if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size))
        return 0;
    return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq;
}

根据hw interrupt 和 irqnr 的映射关系,获取irqnr;
之后call handle\_IRQ(irqnr, regs);
之后,and continue,直到所有中断处理完成;

  • void handle\_IRQ(unsigned int irq, struct pt\_regs *regs)
/*
 * handle_IRQ handles all hardware IRQ's.  Decoded IRQs should
 * not come via this function.  Instead, they should provide their
 * own 'handler'.  Used by platform code implementing C-based 1st
 * level decoding.
 */
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
    struct pt_regs *old_regs = set_irq_regs(regs);

    irq_enter();

    /*
     * Some hardware gives randomly wrong interrupts.  Rather
     * than crashing, do something sensible.
     */
    if (unlikely(irq >= nr_irqs)) {
        if (printk_ratelimit())
            printk(KERN_WARNING "Bad IRQ%u\n", irq);
        ack_bad_irq(irq);
    } else {
        generic_handle_irq(irq);
    }

    /* AT91 specific workaround */
    irq_finish(irq);

    irq_exit();
    set_irq_regs(old_regs);
}
  • int generic\_handle\_irq(unsigned int irq)
/* kernel/irq/irqdesc.c */

/**
 * generic_handle_irq - Invoke the handler for a particular irq
 * @irq:    The irq number to handle
 *
 */
int generic_handle_irq(unsigned int irq)
{
    struct irq_desc *desc = irq_to_desc(irq);

    if (irq >= IRQ_PHY_GPIOA)
        printk("~~~ %s() irq:%u, irq_desc->irq_data.irq:%u\n", \
            __func__, irq, irq_desc->irq_data.irq);

    if (!desc)
        return -EINVAL;
    generic_handle_irq_desc(irq, desc);
    return 0;
}
EXPORT_SYMBOL_GPL(generic_handle_irq);
  • inline void generic\_handle\_irq\_desc(unsigned int irq, struct irq\_desc *desc)
/* include/linux/irqdesc.h */

/*
 * Architectures call this to let the generic IRQ layer
 * handle an interrupt. If the descriptor is attached to an
 * irqchip-style controller then we call the ->handle_irq() handler,
 * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
 */
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
{
    if (irq >= IRQ_PHY_GPIOA)
        printk("~~~ %s() irq:%u, irq_desc->irq_data.irq:%u\n", \
            __func__, irq, irq_desc->irq_data.irq);

    desc->handle_irq(irq, desc);
}

struct irq\_desc *desc = irq\_to\_desc(irq);
call desc->handle\_irq(irq, desc);

gic\_init(0, IRQ\_GIC\_PPI\_START, dist\_base, cpu\_base);
|
void \_\_init gic\_init\_bases(unsigned int gic\_nr, int irq\_start,
void \_\_iomem *dist\_base, void \_\_iomem *cpu\_base,
u32 percpu\_offset, struct device\_node *node)
|
gic->domain = irq\_domain\_add\_legacy(node, gic\_irqs, irq\_base,
hwirq\_base, &gic\_irq\_domain\_ops, gic);
|
ops->map(domain, irq, hwirq);
/ * call struct irq\_domain\_ops gic\_irq\_domain\_ops, .map = gic\_irq\_domain\_map
set desc->handle\_irq = handle; / * high level irq-events handler */
如果是SPI中断,set desc->handle\_irq:handle\_fasteoi\_irq();
*/

/* arch/arm/mach-s5p6818/gic.c */

...
const struct irq_domain_ops gic_irq_domain_ops = {
    .map = gic_irq_domain_map,
    .xlate = gic_irq_domain_xlate,
};
...
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
                irq_hw_number_t hw)
{
    /*printk("~~~ %s() irq:%d, hw:%d, call irq_set_chip_and_handler()\n", \
            __func__, irq, hw); */
    
    if (hw < 32) {
        irq_set_percpu_devid(irq);
        irq_set_chip_and_handler(irq, &gic_chip,
                     handle_percpu_devid_irq);
        set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
    } else {
        irq_set_chip_and_handler(irq, &gic_chip,
                     handle_fasteoi_irq);
        set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
    }
    irq_set_chip_data(irq, d->host_data);
    return 0;
}

如果是SPI中断

static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
{
    desc->handle_irq(irq, desc);  /* call handle_fasteoi_irq(); */
}
  • void handle\_fasteoi\_irq(unsigned int irq, struct irq\_desc *desc)
/* kernel/irq/chip.c */

/**
 *    handle_fasteoi_irq - irq handler for transparent controllers
 *    @irq:    the interrupt number
 *    @desc:    the interrupt description structure for this irq
 *
 *    Only a single callback will be issued to the chip: an ->eoi()
 *    call when the interrupt has been serviced. This enables support
 *    for modern forms of interrupt handlers, which handle the flow
 *    details in hardware, transparently.
 */
void
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
{
    if (irq >= IRQ_PHY_GPIOA)
        printk("~~~ %s() irq:%d\n", __func__, irq);

    raw_spin_lock(&desc->lock);

    if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
        if (!irq_check_poll(desc))
            goto out;

    desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
    kstat_incr_irqs_this_cpu(irq, desc);

    /*
     * If its disabled or no action available
     * then mask it and get out of here:
     */
    if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
        desc->istate |= IRQS_PENDING;
        mask_irq(desc);
        goto out;
    }

    if (desc->istate & IRQS_ONESHOT)
        mask_irq(desc);

    preflow_handler(desc);
    handle_irq_event(desc);

    if (desc->istate & IRQS_ONESHOT)
        cond_unmask_irq(desc);

out_eoi:
    if (irq >= IRQ_PHY_GPIOA)
        printk("~~~ %s() irq:%d, call chip->irq_eoi()\n", \
            __func__, irq);
    desc->irq_data.chip->irq_eoi(&desc->irq_data);
out_unlock:
    raw_spin_unlock(&desc->lock);
    return;
out:
    if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
        goto out_eoi;
    goto out_unlock;
}

主要有:

    if (desc->istate & IRQS_ONESHOT)   
    mask_irq(desc);   /* disable this int */

    handle_irq_event(desc);   /* handle */

    if (desc->istate & IRQS_ONESHOT)   
        cond_unmask_irq(desc);   /* enable this int */ */

    desc->irq_data.chip->irq_eoi(&desc->irq_data); /* call irq chip end of interrupt */

可以看到 IRQS\_ONESHOT flag的用做处之一;

  • static void gic\_eoi\_irq(struct irq\_data *d)
/* arch/arm/mach-s5p6818/gic.c */
...
static struct irq_chip gic_chip = {
    .name            = "GIC",
    .irq_mask        = gic_mask_irq,
    .irq_unmask        = gic_unmask_irq,
    .irq_eoi        = gic_eoi_irq,
    .irq_set_type        = gic_set_type,
    .irq_retrigger        = gic_retrigger,
#ifdef CONFIG_SMP
    .irq_set_affinity    = gic_set_affinity,
#endif
    .irq_set_wake        = gic_set_wake,
};
...
static void gic_eoi_irq(struct irq_data *d)
{
    if (d->hwirq >= IRQ_PHY_GPIOA)
        printk("~~~ %s() hwirq:%d\n", __func__, d->hwirq);

    if (gic_arch_extn.irq_eoi) {
        raw_spin_lock(&irq_controller_lock);
        gic_arch_extn.irq_eoi(d);
        raw_spin_unlock(&irq_controller_lock);
    }

    writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
}
  • irqreturn\_t handle\_irq\_event(struct irq\_desc *desc)
/*  */

irqreturn_t handle_irq_event(struct irq_desc *desc)
{
    struct irqaction *action = desc->action;
    irqreturn_t ret;

    if (desc->irq_data.irq >= IRQ_PHY_GPIOA)
        printk("~~~ %s() irq:%d, name:%s, irq_chip:%s\n", \
            __func__, desc->irq_data.irq, desc->name, \
            (desc->irq_data.chip)->name);

    desc->istate &= ~IRQS_PENDING;
    irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
    raw_spin_unlock(&desc->lock);

    ret = handle_irq_event_percpu(desc, action);

    raw_spin_lock(&desc->lock);
    irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
    return ret;
}

  • irqreturn\_t handle\_irq\_event\_percpu(struct irq\_desc *desc, struct irqaction *action)
irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
    irqreturn_t retval = IRQ_NONE;
    unsigned int flags = 0, irq = desc->irq_data.irq;

    if (irq >= IRQ_PHY_GPIOA)
        printk("~~~ %s() irq:%d, name:%s\n", __func__, irq, desc->name);

    do {
        irqreturn_t res;

        trace_irq_handler_entry(irq, action);
        res = action->handler(irq, action->dev_id);
        if (irq >= IRQ_PHY_GPIOA)
            printk("~~~ %s() after call action->handler(), name:%s, res:%d\n", \
                __func__, action->name, res);
        trace_irq_handler_exit(irq, action, res);

        if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
                  irq, action->handler))
            local_irq_disable();

        switch (res) {
        case IRQ_WAKE_THREAD:
            /*
             * Catch drivers which return WAKE_THREAD but
             * did not set up a thread function
             */
            if (unlikely(!action->thread_fn)) {
                warn_no_thread(irq, action);
                break;
            }

            if (irq >= IRQ_PHY_GPIOA)
                printk("~~~ %s() irq_wake_thread\n", \
                    __func__);
            irq_wake_thread(desc, action);

            /* Fall through to add to randomness */
        case IRQ_HANDLED:
            flags |= action->flags;
            break;

        default:
            break;
        }

        retval |= res;
        action = action->next;
    } while (action);

    add_interrupt_randomness(irq, flags);

    if (!noirqdebug)
        note_interrupt(irq, desc, retval);
    return retval;
}

终于到达:
res = action->handler(irq, action->dev\_id);

review:
linux arm irq (1)

  • ARM SOC(GIC)发生中断主要处理流程(精简描述):
  1. 进入irq模式,切换到svc模式,保存现场;
  2. 读取GIC irq chip 获取是哪个硬件中断number,这个Hw int number是SOC硬件设计确定的;
    对于GIC Share Peripheral Interrupt(SPI),一般是一个soc controller对应一个Hw int number;
  3. 找到这个Hw int 对应的interrupt descriptor,(struct irq\_desc);call 其handle\_irq(high level irq-events handler);
    handle\_irq中call action->handler();
    PS:这个Hw int如果是一个irq chip,(这个中断chained若干个中断,可以是int controller或一个GPIO控制器连接若干个IO),这个中断的handle\_irq(),(high level irq-events handler)中就需要去读取这个irq chip 的寄存器,确定Hw init number;再走lable 3流程;
  4. 执行GIC 的end of irq operations;(action->handler()中有clear irq(operate controller))
  5. b ret\_to\_user\_from\_irq or svc\_exit r5;

标签: long, irq, desc, interrupt, r0, lr, handling

相关文章推荐

添加新评论,含*的栏目为必填