Skip to content

Commit bf99836

Browse files
committed
link.x.in: put most __[se] symbols back into sections
This puts most start/end address symbols back into the sections. Only `__ebss` and `__edata` are kept outside their sections so that potential user code with external libraries can inject stuff using `INSERT AFTER .bss/.data` and profit from the .bss/.data zeroing/loading mechanism. This also leads to the `__sbss` and `__veneer_base` symbols having the right section type (B not D in nm). Also the trust zone start and end address are aligned to 32 bytes as per the requirements. That section does cost up to 28 byte of FLASH due to that alignment even if empty. The .rodata start is kep free for the linker to alocate it after .text. This enables users to inject sections between .text and .rodata and removes the chance to get overlapping address errors. With this the linker will by default place .rodata after .text as before. This commit also adds and exposes a few more stable address start/end symbols (__[se]uninit, __stext, __srodata) that are usefull for debugging and hooking into. See rust-embedded/cortex-m-rt#287 (comment) for discussion of the issues and description of this compromise solution.
1 parent bbdab5a commit bf99836

File tree

1 file changed

+27
-18
lines changed

1 file changed

+27
-18
lines changed

cortex-m-rt/link.x.in

Lines changed: 27 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@ SECTIONS
8686
/* ### .text */
8787
.text _stext :
8888
{
89+
__stext = .;
8990
*(.Reset);
9091

9192
*(.text .text.*);
@@ -96,34 +97,35 @@ SECTIONS
9697
*(.HardFault.*);
9798

9899
. = ALIGN(4); /* Pad .text to the alignment to workaround overlapping load section bug in old lld */
100+
__etext = .;
99101
} > FLASH
100-
. = ALIGN(4); /* Ensure __etext is aligned if something unaligned is inserted after .text */
101-
__etext = .; /* Define outside of .text to allow using INSERT AFTER .text */
102102

103103
/* ### .rodata */
104-
.rodata __etext : ALIGN(4)
104+
.rodata : ALIGN(4)
105105
{
106+
. = ALIGN(4);
107+
__srodata = .;
106108
*(.rodata .rodata.*);
107109

108110
/* 4-byte align the end (VMA) of this section.
109111
This is required by LLD to ensure the LMA of the following .data
110112
section will have the correct alignment. */
111113
. = ALIGN(4);
114+
__erodata = .;
112115
} > FLASH
113-
. = ALIGN(4); /* Ensure __erodata is aligned if something unaligned is inserted after .rodata */
114-
__erodata = .;
115116

116117
/* ### .gnu.sgstubs
117118
This section contains the TrustZone-M veneers put there by the Arm GNU linker. */
118-
. = ALIGN(32); /* Security Attribution Unit blocks must be 32 bytes aligned. */
119-
__veneer_base = ALIGN(4);
120-
.gnu.sgstubs : ALIGN(4)
119+
/* Security Attribution Unit blocks must be 32 bytes aligned. */
120+
/* Note that this does cost up to 28 bytes of FLASH. */
121+
.gnu.sgstubs : ALIGN(32)
121122
{
123+
. = ALIGN(32);
124+
__veneer_base = .;
122125
*(.gnu.sgstubs*)
123-
. = ALIGN(4); /* 4-byte align the end (VMA) of this section */
126+
. = ALIGN(32);
127+
__veneer_limit = .;
124128
} > FLASH
125-
. = ALIGN(4); /* Ensure __veneer_limit is aligned if something unaligned is inserted after .gnu.sgstubs */
126-
__veneer_limit = .;
127129

128130
/* ## Sections in RAM */
129131
/* ### .data */
@@ -134,35 +136,42 @@ SECTIONS
134136
*(.data .data.*);
135137
. = ALIGN(4); /* 4-byte align the end (VMA) of this section */
136138
} > RAM AT>FLASH
137-
. = ALIGN(4); /* Ensure __edata is aligned if something unaligned is inserted after .data */
139+
/* Allow sections from user `memory.x` injected using `INSERT AFTER .data` to
140+
* use the .data loading mechanism by pushing __edata. Note: do not change
141+
* output region or load region in those user sections! */
142+
. = ALIGN(4);
138143
__edata = .;
139144

140145
/* LMA of .data */
141146
__sidata = LOADADDR(.data);
142147

143148
/* ### .bss */
144-
. = ALIGN(4);
145-
__sbss = .; /* Define outside of section to include INSERT BEFORE/AFTER symbols */
146149
.bss (NOLOAD) : ALIGN(4)
147150
{
151+
. = ALIGN(4);
152+
__sbss = .;
148153
*(.bss .bss.*);
149154
*(COMMON); /* Uninitialized C statics */
150155
. = ALIGN(4); /* 4-byte align the end (VMA) of this section */
151156
} > RAM
152-
. = ALIGN(4); /* Ensure __ebss is aligned if something unaligned is inserted after .bss */
157+
/* Allow sections from user `memory.x` injected using `INSERT AFTER .bss` to
158+
* use the .bss zeroing mechanism by pushing __ebss. Note: do not change
159+
* output region or load region in those user sections! */
160+
. = ALIGN(4);
153161
__ebss = .;
154162

155163
/* ### .uninit */
156164
.uninit (NOLOAD) : ALIGN(4)
157165
{
158166
. = ALIGN(4);
167+
__suninit = .;
159168
*(.uninit .uninit.*);
160169
. = ALIGN(4);
170+
__euninit = .;
161171
} > RAM
162172

163-
/* Place the heap right after `.uninit` */
164-
. = ALIGN(4);
165-
__sheap = .;
173+
/* Place the heap right after `.uninit` in RAM */
174+
PROVIDE(__sheap = __euninit);
166175

167176
/* ## .got */
168177
/* Dynamic relocations are unsupported. This section is only used to detect relocatable code in

0 commit comments

Comments
 (0)