Loading...
Searching...
No Matches
cpu.h
Go to the documentation of this file.
1/*
2 * Copyright (C) 2014, Freie Universitaet Berlin (FUB) & INRIA.
3 * All rights reserved.
4 *
5 * This file is subject to the terms and conditions of the GNU Lesser
6 * General Public License v2.1. See the file LICENSE in the top level
7 * directory for more details.
8 */
9
20#ifndef CPU_H
21#define CPU_H
22
23#include <stdint.h>
24
25#include <msp430.h>
26
27#include "sched.h"
28#include "thread.h"
29
30#ifdef __cplusplus
31extern "C" {
32#endif
33
37#define WORDSIZE 16
38
42#define PROVIDES_PM_SET_LOWEST
43
47#define ISR(a,b) void __attribute__((naked, interrupt (a))) b(void)
48
52extern volatile int __irq_is_in;
53
57static inline void __attribute__((always_inline)) __save_context(void)
58{
59 __asm__("push r15");
60 __asm__("push r14");
61 __asm__("push r13");
62 __asm__("push r12");
63 __asm__("push r11");
64 __asm__("push r10");
65 __asm__("push r9");
66 __asm__("push r8");
67 __asm__("push r7");
68 __asm__("push r6");
69 __asm__("push r5");
70 __asm__("push r4");
71
72 __asm__("mov.w r1,%0" : "=r"(thread_get_active()->sp));
73}
74
78static inline void __attribute__((always_inline)) __restore_context(void)
79{
80 __asm__("mov.w %0,r1" : : "m"(thread_get_active()->sp));
81
82 __asm__("pop r4");
83 __asm__("pop r5");
84 __asm__("pop r6");
85 __asm__("pop r7");
86 __asm__("pop r8");
87 __asm__("pop r9");
88 __asm__("pop r10");
89 __asm__("pop r11");
90 __asm__("pop r12");
91 __asm__("pop r13");
92 __asm__("pop r14");
93 __asm__("pop r15");
94 __asm__("reti");
95}
96
100static inline void __attribute__((always_inline)) __enter_isr(void)
101{
102 /* modify state register pushed to stack to not got to power saving
103 * mode right again */
104 __asm__ volatile(
105 "bic %[mask], 0(SP)" "\n\t"
106 : /* no outputs */
107 : [mask] "i"(CPUOFF | SCG0 | SCG1 | OSCOFF)
108 : "memory"
109 );
110 extern char __stack; /* defined by linker script to end of RAM */
112 __asm__("mov.w %0,r1" : : "i"(&__stack));
113 __irq_is_in = 1;
114}
115
119static inline void __attribute__((always_inline)) __exit_isr(void)
120{
121 __irq_is_in = 0;
122
124 sched_run();
125 }
126
128}
129
133__attribute__((always_inline))
134static inline uintptr_t cpu_get_caller_pc(void)
135{
136 return (uintptr_t)__builtin_return_address(0);
137}
138
139#ifdef __cplusplus
140}
141#endif
142
143#endif /* CPU_H */
static uinttxtptr_t cpu_get_caller_pc(void)
Get the last instruction's address.
Definition cpu.h:141
thread_t * sched_run(void)
Triggers the scheduler to schedule the next thread.
volatile unsigned int sched_context_switch_request
Flag indicating whether a context switch is necessary after handling an interrupt.
static thread_t * thread_get_active(void)
Returns a pointer to the Thread Control Block of the currently running thread.
Definition thread.h:397
volatile int __irq_is_in
The current ISR state (inside or not)
static void __restore_context(void)
Restore the thread context from inside an ISR.
Definition cpu.h:78
static void __enter_isr(void)
Run this code on entering interrupt routines.
Definition cpu.h:100
static void __exit_isr(void)
Run this code on exiting interrupt routines.
Definition cpu.h:119
static void __save_context(void)
Save the current thread context from inside an ISR.
Definition cpu.h:57
Scheduler API definition.
char * sp
thread's stack pointer
Definition thread.h:169