aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-06-11 18:17:16 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-06-11 18:17:16 +0000
commit2bcfbb511a63150d7b74d910c8dbda707d78a5e9 (patch)
tree2fc858c41efbbe4c48c468df577927616c2bab0b
parent758a81d16ef3cd307311c130fa34ea149f40149a (diff)
downloadsrc-2bcfbb511a63150d7b74d910c8dbda707d78a5e9.tar.gz
src-2bcfbb511a63150d7b74d910c8dbda707d78a5e9.zip
Vendor import of LLVM libunwind release_80 branch r363030:vendor/llvm-libunwind/libunwind-release_80-r363030
Notes
Notes: svn path=/vendor/llvm-libunwind/dist-release_80/; revision=348940 svn path=/vendor/llvm-libunwind/libunwind-release_80-r363030/; revision=348956; tag=vendor/llvm-libunwind/libunwind-release_80-r363030
-rw-r--r--src/UnwindRegistersRestore.S238
-rw-r--r--src/UnwindRegistersSave.S270
-rw-r--r--src/assembly.h2
3 files changed, 254 insertions, 256 deletions
diff --git a/src/UnwindRegistersRestore.S b/src/UnwindRegistersRestore.S
index 389db67579cd..a155fbe2ddf2 100644
--- a/src/UnwindRegistersRestore.S
+++ b/src/UnwindRegistersRestore.S
@@ -396,119 +396,119 @@ Lnovec:
#elif defined(__ppc__)
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
-;
-; void libunwind::Registers_ppc::jumpto()
-;
-; On entry:
-; thread_state pointer is in r3
-;
-
- ; restore integral registerrs
- ; skip r0 for now
- ; skip r1 for now
- lwz r2, 16(r3)
- ; skip r3 for now
- ; skip r4 for now
- ; skip r5 for now
- lwz r6, 32(r3)
- lwz r7, 36(r3)
- lwz r8, 40(r3)
- lwz r9, 44(r3)
- lwz r10, 48(r3)
- lwz r11, 52(r3)
- lwz r12, 56(r3)
- lwz r13, 60(r3)
- lwz r14, 64(r3)
- lwz r15, 68(r3)
- lwz r16, 72(r3)
- lwz r17, 76(r3)
- lwz r18, 80(r3)
- lwz r19, 84(r3)
- lwz r20, 88(r3)
- lwz r21, 92(r3)
- lwz r22, 96(r3)
- lwz r23,100(r3)
- lwz r24,104(r3)
- lwz r25,108(r3)
- lwz r26,112(r3)
- lwz r27,116(r3)
- lwz r28,120(r3)
- lwz r29,124(r3)
- lwz r30,128(r3)
- lwz r31,132(r3)
-
- ; restore float registers
- lfd f0, 160(r3)
- lfd f1, 168(r3)
- lfd f2, 176(r3)
- lfd f3, 184(r3)
- lfd f4, 192(r3)
- lfd f5, 200(r3)
- lfd f6, 208(r3)
- lfd f7, 216(r3)
- lfd f8, 224(r3)
- lfd f9, 232(r3)
- lfd f10,240(r3)
- lfd f11,248(r3)
- lfd f12,256(r3)
- lfd f13,264(r3)
- lfd f14,272(r3)
- lfd f15,280(r3)
- lfd f16,288(r3)
- lfd f17,296(r3)
- lfd f18,304(r3)
- lfd f19,312(r3)
- lfd f20,320(r3)
- lfd f21,328(r3)
- lfd f22,336(r3)
- lfd f23,344(r3)
- lfd f24,352(r3)
- lfd f25,360(r3)
- lfd f26,368(r3)
- lfd f27,376(r3)
- lfd f28,384(r3)
- lfd f29,392(r3)
- lfd f30,400(r3)
- lfd f31,408(r3)
-
- ; restore vector registers if any are in use
- lwz r5,156(r3) ; test VRsave
- cmpwi r5,0
- beq Lnovec
-
- subi r4,r1,16
- rlwinm r4,r4,0,0,27 ; mask low 4-bits
- ; r4 is now a 16-byte aligned pointer into the red zone
- ; the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
+//
+// void libunwind::Registers_ppc::jumpto()
+//
+// On entry:
+// thread_state pointer is in r3
+//
+
+ // restore integral registerrs
+ // skip r0 for now
+ // skip r1 for now
+ lwz %r2, 16(%r3)
+ // skip r3 for now
+ // skip r4 for now
+ // skip r5 for now
+ lwz %r6, 32(%r3)
+ lwz %r7, 36(%r3)
+ lwz %r8, 40(%r3)
+ lwz %r9, 44(%r3)
+ lwz %r10, 48(%r3)
+ lwz %r11, 52(%r3)
+ lwz %r12, 56(%r3)
+ lwz %r13, 60(%r3)
+ lwz %r14, 64(%r3)
+ lwz %r15, 68(%r3)
+ lwz %r16, 72(%r3)
+ lwz %r17, 76(%r3)
+ lwz %r18, 80(%r3)
+ lwz %r19, 84(%r3)
+ lwz %r20, 88(%r3)
+ lwz %r21, 92(%r3)
+ lwz %r22, 96(%r3)
+ lwz %r23,100(%r3)
+ lwz %r24,104(%r3)
+ lwz %r25,108(%r3)
+ lwz %r26,112(%r3)
+ lwz %r27,116(%r3)
+ lwz %r28,120(%r3)
+ lwz %r29,124(%r3)
+ lwz %r30,128(%r3)
+ lwz %r31,132(%r3)
+
+ // restore float registers
+ lfd %f0, 160(%r3)
+ lfd %f1, 168(%r3)
+ lfd %f2, 176(%r3)
+ lfd %f3, 184(%r3)
+ lfd %f4, 192(%r3)
+ lfd %f5, 200(%r3)
+ lfd %f6, 208(%r3)
+ lfd %f7, 216(%r3)
+ lfd %f8, 224(%r3)
+ lfd %f9, 232(%r3)
+ lfd %f10,240(%r3)
+ lfd %f11,248(%r3)
+ lfd %f12,256(%r3)
+ lfd %f13,264(%r3)
+ lfd %f14,272(%r3)
+ lfd %f15,280(%r3)
+ lfd %f16,288(%r3)
+ lfd %f17,296(%r3)
+ lfd %f18,304(%r3)
+ lfd %f19,312(%r3)
+ lfd %f20,320(%r3)
+ lfd %f21,328(%r3)
+ lfd %f22,336(%r3)
+ lfd %f23,344(%r3)
+ lfd %f24,352(%r3)
+ lfd %f25,360(%r3)
+ lfd %f26,368(%r3)
+ lfd %f27,376(%r3)
+ lfd %f28,384(%r3)
+ lfd %f29,392(%r3)
+ lfd %f30,400(%r3)
+ lfd %f31,408(%r3)
+
+ // restore vector registers if any are in use
+ lwz %r5, 156(%r3) // test VRsave
+ cmpwi %r5, 0
+ beq Lnovec
+ subi %r4, %r1, 16
+ rlwinm %r4, %r4, 0, 0, 27 // mask low 4-bits
+ // r4 is now a 16-byte aligned pointer into the red zone
+ // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
+
#define LOAD_VECTOR_UNALIGNEDl(_index) \
- andis. r0,r5,(1<<(15-_index)) @\
- beq Ldone ## _index @\
- lwz r0, 424+_index*16(r3) @\
- stw r0, 0(r4) @\
- lwz r0, 424+_index*16+4(r3) @\
- stw r0, 4(r4) @\
- lwz r0, 424+_index*16+8(r3) @\
- stw r0, 8(r4) @\
- lwz r0, 424+_index*16+12(r3)@\
- stw r0, 12(r4) @\
- lvx v ## _index,0,r4 @\
-Ldone ## _index:
+ andis. %r0, %r5, (1<<(15-_index)) SEPARATOR \
+ beq Ldone ## _index SEPARATOR \
+ lwz %r0, 424+_index*16(%r3) SEPARATOR \
+ stw %r0, 0(%r4) SEPARATOR \
+ lwz %r0, 424+_index*16+4(%r3) SEPARATOR \
+ stw %r0, 4(%r4) SEPARATOR \
+ lwz %r0, 424+_index*16+8(%r3) SEPARATOR \
+ stw %r0, 8(%r4) SEPARATOR \
+ lwz %r0, 424+_index*16+12(%r3) SEPARATOR \
+ stw %r0, 12(%r4) SEPARATOR \
+ lvx %v ## _index, 0, %r4 SEPARATOR \
+ Ldone ## _index:
#define LOAD_VECTOR_UNALIGNEDh(_index) \
- andi. r0,r5,(1<<(31-_index)) @\
- beq Ldone ## _index @\
- lwz r0, 424+_index*16(r3) @\
- stw r0, 0(r4) @\
- lwz r0, 424+_index*16+4(r3) @\
- stw r0, 4(r4) @\
- lwz r0, 424+_index*16+8(r3) @\
- stw r0, 8(r4) @\
- lwz r0, 424+_index*16+12(r3)@\
- stw r0, 12(r4) @\
- lvx v ## _index,0,r4 @\
- Ldone ## _index:
+ andi. %r0, %r5, (1<<(31-_index)) SEPARATOR \
+ beq Ldone ## _index SEPARATOR \
+ lwz %r0, 424+_index*16(%r3) SEPARATOR \
+ stw %r0, 0(%r4) SEPARATOR \
+ lwz %r0, 424+_index*16+4(%r3) SEPARATOR \
+ stw %r0, 4(%r4) SEPARATOR \
+ lwz %r0, 424+_index*16+8(%r3) SEPARATOR \
+ stw %r0, 8(%r4) SEPARATOR \
+ lwz %r0, 424+_index*16+12(%r3) SEPARATOR \
+ stw %r0, 12(%r4) SEPARATOR \
+ lvx %v ## _index, 0, %r4 SEPARATOR \
+ Ldone ## _index:
LOAD_VECTOR_UNALIGNEDl(0)
@@ -545,17 +545,17 @@ Ldone ## _index:
LOAD_VECTOR_UNALIGNEDh(31)
Lnovec:
- lwz r0, 136(r3) ; __cr
- mtocrf 255,r0
- lwz r0, 148(r3) ; __ctr
- mtctr r0
- lwz r0, 0(r3) ; __ssr0
- mtctr r0
- lwz r0, 8(r3) ; do r0 now
- lwz r5,28(r3) ; do r5 now
- lwz r4,24(r3) ; do r4 now
- lwz r1,12(r3) ; do sp now
- lwz r3,20(r3) ; do r3 last
+ lwz %r0, 136(%r3) // __cr
+ mtcr %r0
+ lwz %r0, 148(%r3) // __ctr
+ mtctr %r0
+ lwz %r0, 0(%r3) // __ssr0
+ mtctr %r0
+ lwz %r0, 8(%r3) // do r0 now
+ lwz %r5, 28(%r3) // do r5 now
+ lwz %r4, 24(%r3) // do r4 now
+ lwz %r1, 12(%r3) // do sp now
+ lwz %r3, 20(%r3) // do r3 last
bctr
#elif defined(__arm64__) || defined(__aarch64__)
diff --git a/src/UnwindRegistersSave.S b/src/UnwindRegistersSave.S
index 48ecb0aec70f..4b674afc6bdc 100644
--- a/src/UnwindRegistersSave.S
+++ b/src/UnwindRegistersSave.S
@@ -557,144 +557,144 @@ DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
#elif defined(__ppc__)
-;
-; extern int unw_getcontext(unw_context_t* thread_state)
-;
-; On entry:
-; thread_state pointer is in r3
-;
+//
+// extern int unw_getcontext(unw_context_t* thread_state)
+//
+// On entry:
+// thread_state pointer is in r3
+//
DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
- stw r0, 8(r3)
- mflr r0
- stw r0, 0(r3) ; store lr as ssr0
- stw r1, 12(r3)
- stw r2, 16(r3)
- stw r3, 20(r3)
- stw r4, 24(r3)
- stw r5, 28(r3)
- stw r6, 32(r3)
- stw r7, 36(r3)
- stw r8, 40(r3)
- stw r9, 44(r3)
- stw r10, 48(r3)
- stw r11, 52(r3)
- stw r12, 56(r3)
- stw r13, 60(r3)
- stw r14, 64(r3)
- stw r15, 68(r3)
- stw r16, 72(r3)
- stw r17, 76(r3)
- stw r18, 80(r3)
- stw r19, 84(r3)
- stw r20, 88(r3)
- stw r21, 92(r3)
- stw r22, 96(r3)
- stw r23,100(r3)
- stw r24,104(r3)
- stw r25,108(r3)
- stw r26,112(r3)
- stw r27,116(r3)
- stw r28,120(r3)
- stw r29,124(r3)
- stw r30,128(r3)
- stw r31,132(r3)
-
- ; save VRSave register
- mfspr r0,256
- stw r0,156(r3)
- ; save CR registers
- mfcr r0
- stw r0,136(r3)
- ; save CTR register
- mfctr r0
- stw r0,148(r3)
-
- ; save float registers
- stfd f0, 160(r3)
- stfd f1, 168(r3)
- stfd f2, 176(r3)
- stfd f3, 184(r3)
- stfd f4, 192(r3)
- stfd f5, 200(r3)
- stfd f6, 208(r3)
- stfd f7, 216(r3)
- stfd f8, 224(r3)
- stfd f9, 232(r3)
- stfd f10,240(r3)
- stfd f11,248(r3)
- stfd f12,256(r3)
- stfd f13,264(r3)
- stfd f14,272(r3)
- stfd f15,280(r3)
- stfd f16,288(r3)
- stfd f17,296(r3)
- stfd f18,304(r3)
- stfd f19,312(r3)
- stfd f20,320(r3)
- stfd f21,328(r3)
- stfd f22,336(r3)
- stfd f23,344(r3)
- stfd f24,352(r3)
- stfd f25,360(r3)
- stfd f26,368(r3)
- stfd f27,376(r3)
- stfd f28,384(r3)
- stfd f29,392(r3)
- stfd f30,400(r3)
- stfd f31,408(r3)
-
-
- ; save vector registers
-
- subi r4,r1,16
- rlwinm r4,r4,0,0,27 ; mask low 4-bits
- ; r4 is now a 16-byte aligned pointer into the red zone
+ stw %r0, 8(%r3)
+ mflr %r0
+ stw %r0, 0(%r3) // store lr as ssr0
+ stw %r1, 12(%r3)
+ stw %r2, 16(%r3)
+ stw %r3, 20(%r3)
+ stw %r4, 24(%r3)
+ stw %r5, 28(%r3)
+ stw %r6, 32(%r3)
+ stw %r7, 36(%r3)
+ stw %r8, 40(%r3)
+ stw %r9, 44(%r3)
+ stw %r10, 48(%r3)
+ stw %r11, 52(%r3)
+ stw %r12, 56(%r3)
+ stw %r13, 60(%r3)
+ stw %r14, 64(%r3)
+ stw %r15, 68(%r3)
+ stw %r16, 72(%r3)
+ stw %r17, 76(%r3)
+ stw %r18, 80(%r3)
+ stw %r19, 84(%r3)
+ stw %r20, 88(%r3)
+ stw %r21, 92(%r3)
+ stw %r22, 96(%r3)
+ stw %r23,100(%r3)
+ stw %r24,104(%r3)
+ stw %r25,108(%r3)
+ stw %r26,112(%r3)
+ stw %r27,116(%r3)
+ stw %r28,120(%r3)
+ stw %r29,124(%r3)
+ stw %r30,128(%r3)
+ stw %r31,132(%r3)
+
+ // save VRSave register
+ mfspr %r0, 256
+ stw %r0, 156(%r3)
+ // save CR registers
+ mfcr %r0
+ stw %r0, 136(%r3)
+ // save CTR register
+ mfctr %r0
+ stw %r0, 148(%r3)
+
+ // save float registers
+ stfd %f0, 160(%r3)
+ stfd %f1, 168(%r3)
+ stfd %f2, 176(%r3)
+ stfd %f3, 184(%r3)
+ stfd %f4, 192(%r3)
+ stfd %f5, 200(%r3)
+ stfd %f6, 208(%r3)
+ stfd %f7, 216(%r3)
+ stfd %f8, 224(%r3)
+ stfd %f9, 232(%r3)
+ stfd %f10,240(%r3)
+ stfd %f11,248(%r3)
+ stfd %f12,256(%r3)
+ stfd %f13,264(%r3)
+ stfd %f14,272(%r3)
+ stfd %f15,280(%r3)
+ stfd %f16,288(%r3)
+ stfd %f17,296(%r3)
+ stfd %f18,304(%r3)
+ stfd %f19,312(%r3)
+ stfd %f20,320(%r3)
+ stfd %f21,328(%r3)
+ stfd %f22,336(%r3)
+ stfd %f23,344(%r3)
+ stfd %f24,352(%r3)
+ stfd %f25,360(%r3)
+ stfd %f26,368(%r3)
+ stfd %f27,376(%r3)
+ stfd %f28,384(%r3)
+ stfd %f29,392(%r3)
+ stfd %f30,400(%r3)
+ stfd %f31,408(%r3)
+
+
+ // save vector registers
+
+ subi %r4, %r1, 16
+ rlwinm %r4, %r4, 0, 0, 27 // mask low 4-bits
+ // r4 is now a 16-byte aligned pointer into the red zone
#define SAVE_VECTOR_UNALIGNED(_vec, _offset) \
- stvx _vec,0,r4 @\
- lwz r5, 0(r4) @\
- stw r5, _offset(r3) @\
- lwz r5, 4(r4) @\
- stw r5, _offset+4(r3) @\
- lwz r5, 8(r4) @\
- stw r5, _offset+8(r3) @\
- lwz r5, 12(r4) @\
- stw r5, _offset+12(r3)
-
- SAVE_VECTOR_UNALIGNED( v0, 424+0x000)
- SAVE_VECTOR_UNALIGNED( v1, 424+0x010)
- SAVE_VECTOR_UNALIGNED( v2, 424+0x020)
- SAVE_VECTOR_UNALIGNED( v3, 424+0x030)
- SAVE_VECTOR_UNALIGNED( v4, 424+0x040)
- SAVE_VECTOR_UNALIGNED( v5, 424+0x050)
- SAVE_VECTOR_UNALIGNED( v6, 424+0x060)
- SAVE_VECTOR_UNALIGNED( v7, 424+0x070)
- SAVE_VECTOR_UNALIGNED( v8, 424+0x080)
- SAVE_VECTOR_UNALIGNED( v9, 424+0x090)
- SAVE_VECTOR_UNALIGNED(v10, 424+0x0A0)
- SAVE_VECTOR_UNALIGNED(v11, 424+0x0B0)
- SAVE_VECTOR_UNALIGNED(v12, 424+0x0C0)
- SAVE_VECTOR_UNALIGNED(v13, 424+0x0D0)
- SAVE_VECTOR_UNALIGNED(v14, 424+0x0E0)
- SAVE_VECTOR_UNALIGNED(v15, 424+0x0F0)
- SAVE_VECTOR_UNALIGNED(v16, 424+0x100)
- SAVE_VECTOR_UNALIGNED(v17, 424+0x110)
- SAVE_VECTOR_UNALIGNED(v18, 424+0x120)
- SAVE_VECTOR_UNALIGNED(v19, 424+0x130)
- SAVE_VECTOR_UNALIGNED(v20, 424+0x140)
- SAVE_VECTOR_UNALIGNED(v21, 424+0x150)
- SAVE_VECTOR_UNALIGNED(v22, 424+0x160)
- SAVE_VECTOR_UNALIGNED(v23, 424+0x170)
- SAVE_VECTOR_UNALIGNED(v24, 424+0x180)
- SAVE_VECTOR_UNALIGNED(v25, 424+0x190)
- SAVE_VECTOR_UNALIGNED(v26, 424+0x1A0)
- SAVE_VECTOR_UNALIGNED(v27, 424+0x1B0)
- SAVE_VECTOR_UNALIGNED(v28, 424+0x1C0)
- SAVE_VECTOR_UNALIGNED(v29, 424+0x1D0)
- SAVE_VECTOR_UNALIGNED(v30, 424+0x1E0)
- SAVE_VECTOR_UNALIGNED(v31, 424+0x1F0)
-
- li r3, 0 ; return UNW_ESUCCESS
+ stvx _vec, 0, %r4 SEPARATOR \
+ lwz %r5, 0(%r4) SEPARATOR \
+ stw %r5, _offset(%r3) SEPARATOR \
+ lwz %r5, 4(%r4) SEPARATOR \
+ stw %r5, _offset+4(%r3) SEPARATOR \
+ lwz %r5, 8(%r4) SEPARATOR \
+ stw %r5, _offset+8(%r3) SEPARATOR \
+ lwz %r5, 12(%r4) SEPARATOR \
+ stw %r5, _offset+12(%r3)
+
+ SAVE_VECTOR_UNALIGNED( %v0, 424+0x000)
+ SAVE_VECTOR_UNALIGNED( %v1, 424+0x010)
+ SAVE_VECTOR_UNALIGNED( %v2, 424+0x020)
+ SAVE_VECTOR_UNALIGNED( %v3, 424+0x030)
+ SAVE_VECTOR_UNALIGNED( %v4, 424+0x040)
+ SAVE_VECTOR_UNALIGNED( %v5, 424+0x050)
+ SAVE_VECTOR_UNALIGNED( %v6, 424+0x060)
+ SAVE_VECTOR_UNALIGNED( %v7, 424+0x070)
+ SAVE_VECTOR_UNALIGNED( %v8, 424+0x080)
+ SAVE_VECTOR_UNALIGNED( %v9, 424+0x090)
+ SAVE_VECTOR_UNALIGNED(%v10, 424+0x0A0)
+ SAVE_VECTOR_UNALIGNED(%v11, 424+0x0B0)
+ SAVE_VECTOR_UNALIGNED(%v12, 424+0x0C0)
+ SAVE_VECTOR_UNALIGNED(%v13, 424+0x0D0)
+ SAVE_VECTOR_UNALIGNED(%v14, 424+0x0E0)
+ SAVE_VECTOR_UNALIGNED(%v15, 424+0x0F0)
+ SAVE_VECTOR_UNALIGNED(%v16, 424+0x100)
+ SAVE_VECTOR_UNALIGNED(%v17, 424+0x110)
+ SAVE_VECTOR_UNALIGNED(%v18, 424+0x120)
+ SAVE_VECTOR_UNALIGNED(%v19, 424+0x130)
+ SAVE_VECTOR_UNALIGNED(%v20, 424+0x140)
+ SAVE_VECTOR_UNALIGNED(%v21, 424+0x150)
+ SAVE_VECTOR_UNALIGNED(%v22, 424+0x160)
+ SAVE_VECTOR_UNALIGNED(%v23, 424+0x170)
+ SAVE_VECTOR_UNALIGNED(%v24, 424+0x180)
+ SAVE_VECTOR_UNALIGNED(%v25, 424+0x190)
+ SAVE_VECTOR_UNALIGNED(%v26, 424+0x1A0)
+ SAVE_VECTOR_UNALIGNED(%v27, 424+0x1B0)
+ SAVE_VECTOR_UNALIGNED(%v28, 424+0x1C0)
+ SAVE_VECTOR_UNALIGNED(%v29, 424+0x1D0)
+ SAVE_VECTOR_UNALIGNED(%v30, 424+0x1E0)
+ SAVE_VECTOR_UNALIGNED(%v31, 424+0x1F0)
+
+ li %r3, 0 // return UNW_ESUCCESS
blr
diff --git a/src/assembly.h b/src/assembly.h
index 2df930214fae..7806892e9dcf 100644
--- a/src/assembly.h
+++ b/src/assembly.h
@@ -29,8 +29,6 @@
#ifdef _ARCH_PWR8
#define PPC64_HAS_VMX
#endif
-#elif defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__)
-#define SEPARATOR @
#elif defined(__arm64__)
#define SEPARATOR %%
#else