summaryrefslogtreecommitdiff
path: root/libgo/runtime
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-05-02 14:43:35 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-05-02 14:43:35 +0000
commit34efdaf078b01a7387007c4e6bde6db86384c4b7 (patch)
treed503eaf41d085669d1481bb46ec038bc866fece6 /libgo/runtime
parentf733cf303bcdc952c92b81dd62199a40a1f555ec (diff)
downloadgcc-tarball-master.tar.gz
gcc-7.1.0gcc-7.1.0
Diffstat (limited to 'libgo/runtime')
-rw-r--r--libgo/runtime/aeshash.c586
-rw-r--r--libgo/runtime/chan.goc1136
-rw-r--r--libgo/runtime/chan.h75
-rw-r--r--libgo/runtime/cpuprof.goc442
-rw-r--r--libgo/runtime/env_posix.c4
-rw-r--r--libgo/runtime/go-alloc.h11
-rw-r--r--libgo/runtime/go-append.c74
-rw-r--r--libgo/runtime/go-assert-interface.c45
-rw-r--r--libgo/runtime/go-byte-array-to-string.c24
-rw-r--r--libgo/runtime/go-caller.c101
-rw-r--r--libgo/runtime/go-can-convert-interface.c78
-rw-r--r--libgo/runtime/go-cgo.c193
-rw-r--r--libgo/runtime/go-check-interface.c46
-rw-r--r--libgo/runtime/go-construct-map.c26
-rw-r--r--libgo/runtime/go-convert-interface.c132
-rw-r--r--libgo/runtime/go-copy.c22
-rw-r--r--libgo/runtime/go-defer.c85
-rw-r--r--libgo/runtime/go-defer.h47
-rw-r--r--libgo/runtime/go-deferred-recover.c94
-rw-r--r--libgo/runtime/go-eface-compare.c35
-rw-r--r--libgo/runtime/go-eface-val-compare.c33
-rw-r--r--libgo/runtime/go-ffi.c394
-rw-r--r--libgo/runtime/go-ffi.h16
-rw-r--r--libgo/runtime/go-fieldtrack.c23
-rw-r--r--libgo/runtime/go-iface.goc130
-rw-r--r--libgo/runtime/go-int-array-to-string.c89
-rw-r--r--libgo/runtime/go-int-to-string.c69
-rw-r--r--libgo/runtime/go-interface-compare.c35
-rw-r--r--libgo/runtime/go-interface-eface-compare.c34
-rw-r--r--libgo/runtime/go-interface-val-compare.c33
-rw-r--r--libgo/runtime/go-libmain.c3
-rw-r--r--libgo/runtime/go-main.c5
-rw-r--r--libgo/runtime/go-make-slice.c99
-rw-r--r--libgo/runtime/go-map-delete.c61
-rw-r--r--libgo/runtime/go-map-index.c137
-rw-r--r--libgo/runtime/go-map-len.c25
-rw-r--r--libgo/runtime/go-map-range.c103
-rw-r--r--libgo/runtime/go-memclr.c16
-rw-r--r--libgo/runtime/go-memequal.c16
-rw-r--r--libgo/runtime/go-memmove.c16
-rw-r--r--libgo/runtime/go-nanotime.c6
-rw-r--r--libgo/runtime/go-new-map.c142
-rw-r--r--libgo/runtime/go-new.c1
-rw-r--r--libgo/runtime/go-nosys.c56
-rw-r--r--libgo/runtime/go-panic.c112
-rw-r--r--libgo/runtime/go-panic.h52
-rw-r--r--libgo/runtime/go-print.c36
-rw-r--r--libgo/runtime/go-recover.c275
-rw-r--r--libgo/runtime/go-reflect-call.c13
-rw-r--r--libgo/runtime/go-reflect-map.c156
-rw-r--r--libgo/runtime/go-rune.c97
-rw-r--r--libgo/runtime/go-setenv.c37
-rw-r--r--libgo/runtime/go-signal.c688
-rw-r--r--libgo/runtime/go-strcmp.c25
-rw-r--r--libgo/runtime/go-string-to-byte-array.c28
-rw-r--r--libgo/runtime/go-string-to-int-array.c56
-rw-r--r--libgo/runtime/go-strplus.c30
-rw-r--r--libgo/runtime/go-strslice.c3
-rw-r--r--libgo/runtime/go-traceback.c37
-rw-r--r--libgo/runtime/go-trampoline.c113
-rw-r--r--libgo/runtime/go-type-complex.c120
-rw-r--r--libgo/runtime/go-type-eface.c62
-rw-r--r--libgo/runtime/go-type-error.c34
-rw-r--r--libgo/runtime/go-type-float.c92
-rw-r--r--libgo/runtime/go-type-identity.c62
-rw-r--r--libgo/runtime/go-type-interface.c62
-rw-r--r--libgo/runtime/go-type-string.c49
-rw-r--r--libgo/runtime/go-type.h59
-rw-r--r--libgo/runtime/go-unsafe-new.c1
-rw-r--r--libgo/runtime/go-unsafe-newarray.c1
-rw-r--r--libgo/runtime/go-unsafe-pointer.c24
-rw-r--r--libgo/runtime/go-unsetenv.c17
-rw-r--r--libgo/runtime/go-unwind.c147
-rw-r--r--libgo/runtime/heapdump.c130
-rw-r--r--libgo/runtime/interface.h57
-rw-r--r--libgo/runtime/lfstack.goc95
-rw-r--r--libgo/runtime/lock_futex.c204
-rw-r--r--libgo/runtime/lock_sema.c281
-rw-r--r--libgo/runtime/malloc.goc216
-rw-r--r--libgo/runtime/malloc.h232
-rw-r--r--libgo/runtime/map.goc72
-rw-r--r--libgo/runtime/map.h87
-rw-r--r--libgo/runtime/mcache.c6
-rw-r--r--libgo/runtime/mcentral.c12
-rw-r--r--libgo/runtime/mem_posix_memalign.c4
-rw-r--r--libgo/runtime/mgc0.c427
-rw-r--r--libgo/runtime/mheap.c79
-rw-r--r--libgo/runtime/mprof.goc562
-rw-r--r--libgo/runtime/msize.c22
-rw-r--r--libgo/runtime/netpoll.goc472
-rw-r--r--libgo/runtime/netpoll_epoll.c174
-rw-r--r--libgo/runtime/netpoll_kqueue.c118
-rw-r--r--libgo/runtime/netpoll_select.c256
-rw-r--r--libgo/runtime/netpoll_stub.c26
-rw-r--r--libgo/runtime/panic.c203
-rw-r--r--libgo/runtime/parfor.c7
-rw-r--r--libgo/runtime/print.c301
-rw-r--r--libgo/runtime/proc.c2977
-rw-r--r--libgo/runtime/rdebug.goc26
-rw-r--r--libgo/runtime/reflect.goc25
-rw-r--r--libgo/runtime/runtime.c454
-rw-r--r--libgo/runtime/runtime.h670
-rw-r--r--libgo/runtime/runtime1.goc96
-rw-r--r--libgo/runtime/runtime_c.c190
-rw-r--r--libgo/runtime/sema.goc299
-rw-r--r--libgo/runtime/signal_unix.c176
-rw-r--r--libgo/runtime/signal_unix.h22
-rw-r--r--libgo/runtime/sigqueue.goc172
-rw-r--r--libgo/runtime/string.goc123
-rw-r--r--libgo/runtime/thread-linux.c59
-rw-r--r--libgo/runtime/thread-sema.c125
-rw-r--r--libgo/runtime/time.goc353
-rw-r--r--libgo/runtime/yield.c3
113 files changed, 2635 insertions, 14464 deletions
diff --git a/libgo/runtime/aeshash.c b/libgo/runtime/aeshash.c
new file mode 100644
index 0000000000..7f29baa07b
--- /dev/null
+++ b/libgo/runtime/aeshash.c
@@ -0,0 +1,586 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Hash code using AES intrinsics.
+
+#include "runtime.h"
+
+uintptr aeshashbody(void*, uintptr, uintptr, Slice)
+ __asm__(GOSYM_PREFIX "runtime.aeshashbody");
+
+uintptr aeshashbody(void*, uintptr, uintptr, Slice)
+ __attribute__((no_split_stack));
+
+#if (defined(__i386__) || defined(__x86_64__)) && defined(HAVE_AS_X86_AES)
+
+#include <emmintrin.h>
+#include <tmmintrin.h>
+#include <wmmintrin.h>
+
+// Force appropriate CPU level. We won't call here unless the CPU
+// supports it.
+
+#pragma GCC target("ssse3", "aes")
+
+#ifdef __x86_64__
+
+// aeshashbody implements a hash function using AES instructions
+// available in recent x86 processors. Note this is not encryption,
+// just hashing.
+//
+// This is written to produce exactly the same results as the gc
+// implementation, not because that matters, but just to ensure that
+// this does something reasonable.
+uintptr aeshashbody(void* p, uintptr seed, uintptr size, Slice aeskeysched) {
+ __m128i mseed, mseed2, mseed3, mseed4, mseed5, mseed6, mseed7, mseed8;
+ __m128i mval, mval2, mval3, mval4, mval5, mval6, mval7, mval8;
+
+ // Start with hash seed.
+ mseed = _mm_cvtsi64_si128(seed);
+ // Get 16 bits of length.
+ mseed = _mm_insert_epi16(mseed, size, 4);
+ // Repeat length 4 times total.
+ mseed = _mm_shufflehi_epi16(mseed, 0);
+ // Save unscrambled seed.
+ mseed2 = mseed;
+ // XOR in per-process seed.
+ mseed ^= _mm_loadu_si128(aeskeysched.__values);
+ // Scramble seed.
+ mseed = _mm_aesenc_si128(mseed, mseed);
+
+ if (size <= 16) {
+ if (size == 0) {
+ // Return scrambled input seed.
+ return _mm_cvtsi128_si64(_mm_aesenc_si128(mseed, mseed));
+ } else if (size < 16) {
+ if ((((uintptr)(p) + 16) & 0xff0) != 0) {
+ static const uint64 masks[32]
+ __attribute__ ((aligned(16))) =
+ {
+ 0x0000000000000000, 0x0000000000000000,
+ 0x00000000000000ff, 0x0000000000000000,
+ 0x000000000000ffff, 0x0000000000000000,
+ 0x0000000000ffffff, 0x0000000000000000,
+ 0x00000000ffffffff, 0x0000000000000000,
+ 0x000000ffffffffff, 0x0000000000000000,
+ 0x0000ffffffffffff, 0x0000000000000000,
+ 0x00ffffffffffffff, 0x0000000000000000,
+ 0xffffffffffffffff, 0x0000000000000000,
+ 0xffffffffffffffff, 0x00000000000000ff,
+ 0xffffffffffffffff, 0x000000000000ffff,
+ 0xffffffffffffffff, 0x0000000000ffffff,
+ 0xffffffffffffffff, 0x00000000ffffffff,
+ 0xffffffffffffffff, 0x000000ffffffffff,
+ 0xffffffffffffffff, 0x0000ffffffffffff,
+ 0xffffffffffffffff, 0x00ffffffffffffff
+ };
+
+ // 16 bytes loaded at p won't cross a page
+ // boundary, so we can load directly.
+ mval = _mm_loadu_si128(p);
+ mval &= *(const __m128i*)(&masks[size*2]);
+ } else {
+ static const uint64 shifts[32]
+ __attribute__ ((aligned(16))) =
+ {
+ 0x0000000000000000, 0x0000000000000000,
+ 0xffffffffffffff0f, 0xffffffffffffffff,
+ 0xffffffffffff0f0e, 0xffffffffffffffff,
+ 0xffffffffff0f0e0d, 0xffffffffffffffff,
+ 0xffffffff0f0e0d0c, 0xffffffffffffffff,
+ 0xffffff0f0e0d0c0b, 0xffffffffffffffff,
+ 0xffff0f0e0d0c0b0a, 0xffffffffffffffff,
+ 0xff0f0e0d0c0b0a09, 0xffffffffffffffff,
+ 0x0f0e0d0c0b0a0908, 0xffffffffffffffff,
+ 0x0e0d0c0b0a090807, 0xffffffffffffff0f,
+ 0x0d0c0b0a09080706, 0xffffffffffff0f0e,
+ 0x0c0b0a0908070605, 0xffffffffff0f0e0d,
+ 0x0b0a090807060504, 0xffffffff0f0e0d0c,
+ 0x0a09080706050403, 0xffffff0f0e0d0c0b,
+ 0x0908070605040302, 0xffff0f0e0d0c0b0a,
+ 0x0807060504030201, 0xff0f0e0d0c0b0a09,
+ };
+
+ // address ends in 1111xxxx. Might be
+ // up against a page boundary, so load
+ // ending at last byte. Then shift
+ // bytes down using pshufb.
+ mval = _mm_loadu_si128((void*)((char*)p - 16 + size));
+ mval = _mm_shuffle_epi8(mval, *(const __m128i*)(&shifts[size*2]));
+ }
+ } else {
+ mval = _mm_loadu_si128(p);
+ }
+
+ // XOR data with seed.
+ mval ^= mseed;
+ // Scramble combo 3 times.
+ mval = _mm_aesenc_si128(mval, mval);
+ mval = _mm_aesenc_si128(mval, mval);
+ mval = _mm_aesenc_si128(mval, mval);
+ return _mm_cvtsi128_si64(mval);
+ } else if (size <= 32) {
+ // Make second starting seed.
+ mseed2 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 16));
+ mseed2 = _mm_aesenc_si128(mseed2, mseed2);
+ // Load data to be hashed.
+ mval = _mm_loadu_si128(p);
+ mval2 = _mm_loadu_si128((void*)((char*)p + size - 16));
+ // XOR with seed.
+ mval ^= mseed;
+ mval2 ^= mseed2;
+ // Scramble 3 times.
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ // Combine results.
+ mval ^= mval2;
+ return _mm_cvtsi128_si64(mval);
+ } else if (size <= 64) {
+ // Make 3 more starting seeds.
+ mseed3 = mseed2;
+ mseed4 = mseed2;
+ mseed2 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 16));
+ mseed3 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 32));
+ mseed4 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 48));
+ mseed2 = _mm_aesenc_si128(mseed2, mseed2);
+ mseed3 = _mm_aesenc_si128(mseed3, mseed3);
+ mseed4 = _mm_aesenc_si128(mseed4, mseed4);
+
+ mval = _mm_loadu_si128(p);
+ mval2 = _mm_loadu_si128((void*)((char*)p + 16));
+ mval3 = _mm_loadu_si128((void*)((char*)p + size - 32));
+ mval4 = _mm_loadu_si128((void*)((char*)p + size - 16));
+
+ mval ^= mseed;
+ mval2 ^= mseed2;
+ mval3 ^= mseed3;
+ mval4 ^= mseed4;
+
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval3 = _mm_aesenc_si128(mval3, mval3);
+ mval4 = _mm_aesenc_si128(mval4, mval4);
+
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval3 = _mm_aesenc_si128(mval3, mval3);
+ mval4 = _mm_aesenc_si128(mval4, mval4);
+
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval3 = _mm_aesenc_si128(mval3, mval3);
+ mval4 = _mm_aesenc_si128(mval4, mval4);
+
+ mval ^= mval3;
+ mval2 ^= mval4;
+ mval ^= mval2;
+ return _mm_cvtsi128_si64(mval);
+ } else if (size <= 128) {
+ // Make 7 more starting seeds.
+ mseed3 = mseed2;
+ mseed4 = mseed2;
+ mseed5 = mseed2;
+ mseed6 = mseed2;
+ mseed7 = mseed2;
+ mseed8 = mseed2;
+ mseed2 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 16));
+ mseed3 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 32));
+ mseed4 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 48));
+ mseed5 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 64));
+ mseed6 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 80));
+ mseed7 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 96));
+ mseed8 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 112));
+ mseed2 = _mm_aesenc_si128(mseed2, mseed2);
+ mseed3 = _mm_aesenc_si128(mseed3, mseed3);
+ mseed4 = _mm_aesenc_si128(mseed4, mseed4);
+ mseed5 = _mm_aesenc_si128(mseed5, mseed5);
+ mseed6 = _mm_aesenc_si128(mseed6, mseed6);
+ mseed7 = _mm_aesenc_si128(mseed7, mseed7);
+ mseed8 = _mm_aesenc_si128(mseed8, mseed8);
+
+ // Load data.
+ mval = _mm_loadu_si128(p);
+ mval2 = _mm_loadu_si128((void*)((char*)p + 16));
+ mval3 = _mm_loadu_si128((void*)((char*)p + 32));
+ mval4 = _mm_loadu_si128((void*)((char*)p + 48));
+ mval5 = _mm_loadu_si128((void*)((char*)p + size - 64));
+ mval6 = _mm_loadu_si128((void*)((char*)p + size - 48));
+ mval7 = _mm_loadu_si128((void*)((char*)p + size - 32));
+ mval8 = _mm_loadu_si128((void*)((char*)p + size - 16));
+
+ // XOR with seed.
+ mval ^= mseed;
+ mval2 ^= mseed2;
+ mval3 ^= mseed3;
+ mval4 ^= mseed4;
+ mval5 ^= mseed5;
+ mval6 ^= mseed6;
+ mval7 ^= mseed7;
+ mval8 ^= mseed8;
+
+ // Scramble 3 times.
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval3 = _mm_aesenc_si128(mval3, mval3);
+ mval4 = _mm_aesenc_si128(mval4, mval4);
+ mval5 = _mm_aesenc_si128(mval5, mval5);
+ mval6 = _mm_aesenc_si128(mval6, mval6);
+ mval7 = _mm_aesenc_si128(mval7, mval7);
+ mval8 = _mm_aesenc_si128(mval8, mval8);
+
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval3 = _mm_aesenc_si128(mval3, mval3);
+ mval4 = _mm_aesenc_si128(mval4, mval4);
+ mval5 = _mm_aesenc_si128(mval5, mval5);
+ mval6 = _mm_aesenc_si128(mval6, mval6);
+ mval7 = _mm_aesenc_si128(mval7, mval7);
+ mval8 = _mm_aesenc_si128(mval8, mval8);
+
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval3 = _mm_aesenc_si128(mval3, mval3);
+ mval4 = _mm_aesenc_si128(mval4, mval4);
+ mval5 = _mm_aesenc_si128(mval5, mval5);
+ mval6 = _mm_aesenc_si128(mval6, mval6);
+ mval7 = _mm_aesenc_si128(mval7, mval7);
+ mval8 = _mm_aesenc_si128(mval8, mval8);
+
+ // Combine results.
+ mval ^= mval5;
+ mval2 ^= mval6;
+ mval3 ^= mval7;
+ mval4 ^= mval8;
+ mval ^= mval3;
+ mval2 ^= mval4;
+ mval ^= mval2;
+ return _mm_cvtsi128_si64(mval);
+ } else {
+ // Make 7 more starting seeds.
+ mseed3 = mseed2;
+ mseed4 = mseed2;
+ mseed5 = mseed2;
+ mseed6 = mseed2;
+ mseed7 = mseed2;
+ mseed8 = mseed2;
+ mseed2 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 16));
+ mseed3 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 32));
+ mseed4 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 48));
+ mseed5 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 64));
+ mseed6 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 80));
+ mseed7 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 96));
+ mseed8 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 112));
+ mseed2 = _mm_aesenc_si128(mseed2, mseed2);
+ mseed3 = _mm_aesenc_si128(mseed3, mseed3);
+ mseed4 = _mm_aesenc_si128(mseed4, mseed4);
+ mseed5 = _mm_aesenc_si128(mseed5, mseed5);
+ mseed6 = _mm_aesenc_si128(mseed6, mseed6);
+ mseed7 = _mm_aesenc_si128(mseed7, mseed7);
+ mseed8 = _mm_aesenc_si128(mseed8, mseed8);
+
+ // Start with last (possibly overlapping) block.
+ mval = _mm_loadu_si128((void*)((char*)p + size - 128));
+ mval2 = _mm_loadu_si128((void*)((char*)p + size - 112));
+ mval3 = _mm_loadu_si128((void*)((char*)p + size - 96));
+ mval4 = _mm_loadu_si128((void*)((char*)p + size - 80));
+ mval5 = _mm_loadu_si128((void*)((char*)p + size - 64));
+ mval6 = _mm_loadu_si128((void*)((char*)p + size - 48));
+ mval7 = _mm_loadu_si128((void*)((char*)p + size - 32));
+ mval8 = _mm_loadu_si128((void*)((char*)p + size - 16));
+
+ // XOR in seed.
+ mval ^= mseed;
+ mval2 ^= mseed2;
+ mval3 ^= mseed3;
+ mval4 ^= mseed4;
+ mval5 ^= mseed5;
+ mval6 ^= mseed6;
+ mval7 ^= mseed7;
+ mval8 ^= mseed8;
+
+ // Compute number of remaining 128-byte blocks.
+ size--;
+ size >>= 7;
+ do {
+ // Scramble state.
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval3 = _mm_aesenc_si128(mval3, mval3);
+ mval4 = _mm_aesenc_si128(mval4, mval4);
+ mval5 = _mm_aesenc_si128(mval5, mval5);
+ mval6 = _mm_aesenc_si128(mval6, mval6);
+ mval7 = _mm_aesenc_si128(mval7, mval7);
+ mval8 = _mm_aesenc_si128(mval8, mval8);
+
+ // Scramble state, XOR in a block.
+ mval = _mm_aesenc_si128(mval, _mm_loadu_si128(p));
+ mval2 = _mm_aesenc_si128(mval2, _mm_loadu_si128((void*)((char*)p + 16)));
+ mval3 = _mm_aesenc_si128(mval3, _mm_loadu_si128((void*)((char*)p + 32)));
+ mval4 = _mm_aesenc_si128(mval4, _mm_loadu_si128((void*)((char*)p + 48)));
+ mval5 = _mm_aesenc_si128(mval5, _mm_loadu_si128((void*)((char*)p + 64)));
+ mval6 = _mm_aesenc_si128(mval6, _mm_loadu_si128((void*)((char*)p + 80)));
+ mval7 = _mm_aesenc_si128(mval7, _mm_loadu_si128((void*)((char*)p + 96)));
+ mval8 = _mm_aesenc_si128(mval8, _mm_loadu_si128((void*)((char*)p + 112)));
+
+ p = (void*)((char*)p + 128);
+ } while (--size > 0);
+
+ // 3 more scrambles to finish.
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval3 = _mm_aesenc_si128(mval3, mval3);
+ mval4 = _mm_aesenc_si128(mval4, mval4);
+ mval5 = _mm_aesenc_si128(mval5, mval5);
+ mval6 = _mm_aesenc_si128(mval6, mval6);
+ mval7 = _mm_aesenc_si128(mval7, mval7);
+ mval8 = _mm_aesenc_si128(mval8, mval8);
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval3 = _mm_aesenc_si128(mval3, mval3);
+ mval4 = _mm_aesenc_si128(mval4, mval4);
+ mval5 = _mm_aesenc_si128(mval5, mval5);
+ mval6 = _mm_aesenc_si128(mval6, mval6);
+ mval7 = _mm_aesenc_si128(mval7, mval7);
+ mval8 = _mm_aesenc_si128(mval8, mval8);
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval3 = _mm_aesenc_si128(mval3, mval3);
+ mval4 = _mm_aesenc_si128(mval4, mval4);
+ mval5 = _mm_aesenc_si128(mval5, mval5);
+ mval6 = _mm_aesenc_si128(mval6, mval6);
+ mval7 = _mm_aesenc_si128(mval7, mval7);
+ mval8 = _mm_aesenc_si128(mval8, mval8);
+
+ mval ^= mval5;
+ mval2 ^= mval6;
+ mval3 ^= mval7;
+ mval4 ^= mval8;
+ mval ^= mval3;
+ mval2 ^= mval4;
+ mval ^= mval2;
+ return _mm_cvtsi128_si64(mval);
+ }
+}
+
+#else // !defined(__x86_64__)
+
+// The 32-bit version of aeshashbody.
+
+uintptr aeshashbody(void* p, uintptr seed, uintptr size, Slice aeskeysched) {
+ __m128i mseed, mseed2, mseed3, mseed4;
+ __m128i mval, mval2, mval3, mval4;
+
+ // Start with hash seed.
+ mseed = _mm_cvtsi32_si128(seed);
+ // Get 16 bits of length.
+ mseed = _mm_insert_epi16(mseed, size, 4);
+ // Replace size with its low 2 bytes repeated 4 times.
+ mseed = _mm_shufflehi_epi16(mseed, 0);
+ // Save unscrambled seed.
+ mseed2 = mseed;
+ // XOR in per-process seed.
+ mseed ^= _mm_loadu_si128(aeskeysched.__values);
+ // Scramble seed.
+ mseed = _mm_aesenc_si128(mseed, mseed);
+
+ if (size <= 16) {
+ if (size == 0) {
+ // Return scrambled input seed.
+ return _mm_cvtsi128_si32(_mm_aesenc_si128(mseed, mseed));
+ } else if (size < 16) {
+ if ((((uintptr)(p) + 16) & 0xff0) != 0) {
+ static const uint64 masks[32]
+ __attribute__ ((aligned(16))) =
+ {
+ 0x0000000000000000, 0x0000000000000000,
+ 0x00000000000000ff, 0x0000000000000000,
+ 0x000000000000ffff, 0x0000000000000000,
+ 0x0000000000ffffff, 0x0000000000000000,
+ 0x00000000ffffffff, 0x0000000000000000,
+ 0x000000ffffffffff, 0x0000000000000000,
+ 0x0000ffffffffffff, 0x0000000000000000,
+ 0x00ffffffffffffff, 0x0000000000000000,
+ 0xffffffffffffffff, 0x0000000000000000,
+ 0xffffffffffffffff, 0x00000000000000ff,
+ 0xffffffffffffffff, 0x000000000000ffff,
+ 0xffffffffffffffff, 0x0000000000ffffff,
+ 0xffffffffffffffff, 0x00000000ffffffff,
+ 0xffffffffffffffff, 0x000000ffffffffff,
+ 0xffffffffffffffff, 0x0000ffffffffffff,
+ 0xffffffffffffffff, 0x00ffffffffffffff
+ };
+
+ // 16 bytes loaded at p won't cross a page
+ // boundary, so we can load it directly.
+ mval = _mm_loadu_si128(p);
+ mval &= *(const __m128i*)(&masks[size*2]);
+ } else {
+ static const uint64 shifts[32]
+ __attribute__ ((aligned(16))) =
+ {
+ 0x0000000000000000, 0x0000000000000000,
+ 0xffffffffffffff0f, 0xffffffffffffffff,
+ 0xffffffffffff0f0e, 0xffffffffffffffff,
+ 0xffffffffff0f0e0d, 0xffffffffffffffff,
+ 0xffffffff0f0e0d0c, 0xffffffffffffffff,
+ 0xffffff0f0e0d0c0b, 0xffffffffffffffff,
+ 0xffff0f0e0d0c0b0a, 0xffffffffffffffff,
+ 0xff0f0e0d0c0b0a09, 0xffffffffffffffff,
+ 0x0f0e0d0c0b0a0908, 0xffffffffffffffff,
+ 0x0e0d0c0b0a090807, 0xffffffffffffff0f,
+ 0x0d0c0b0a09080706, 0xffffffffffff0f0e,
+ 0x0c0b0a0908070605, 0xffffffffff0f0e0d,
+ 0x0b0a090807060504, 0xffffffff0f0e0d0c,
+ 0x0a09080706050403, 0xffffff0f0e0d0c0b,
+ 0x0908070605040302, 0xffff0f0e0d0c0b0a,
+ 0x0807060504030201, 0xff0f0e0d0c0b0a09,
+ };
+
+ // address ends in 1111xxxx. Might be
+ // up against a page boundary, so load
+ // ending at last byte. Then shift
+ // bytes down using pshufb.
+ mval = _mm_loadu_si128((void*)((char*)p - 16 + size));
+ mval = _mm_shuffle_epi8(mval, *(const __m128i*)(&shifts[size*2]));
+ }
+ } else {
+ mval = _mm_loadu_si128(p);
+ }
+
+ // Scramble input, XOR in seed.
+ mval = _mm_aesenc_si128(mval, mseed);
+ mval = _mm_aesenc_si128(mval, mval);
+ mval = _mm_aesenc_si128(mval, mval);
+ return _mm_cvtsi128_si32(mval);
+ } else if (size <= 32) {
+ // Make second starting seed.
+ mseed2 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 16));
+ mseed2 = _mm_aesenc_si128(mseed2, mseed2);
+ // Load data to be hashed.
+ mval = _mm_loadu_si128(p);
+ mval2 = _mm_loadu_si128((void*)((char*)p + size - 16));
+
+ // Scramble 3 times.
+ mval = _mm_aesenc_si128(mval, mseed);
+ mval2 = _mm_aesenc_si128(mval2, mseed2);
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+
+ // Combine results.
+ mval ^= mval2;
+ return _mm_cvtsi128_si32(mval);
+ } else if (size <= 64) {
+ // Make 3 more starting seeds.
+ mseed3 = mseed2;
+ mseed4 = mseed2;
+ mseed2 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 16));
+ mseed3 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 32));
+ mseed4 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 48));
+ mseed2 = _mm_aesenc_si128(mseed2, mseed2);
+ mseed3 = _mm_aesenc_si128(mseed3, mseed3);
+ mseed4 = _mm_aesenc_si128(mseed4, mseed4);
+
+ mval = _mm_loadu_si128(p);
+ mval2 = _mm_loadu_si128((void*)((char*)p + 16));
+ mval3 = _mm_loadu_si128((void*)((char*)p + size - 32));
+ mval4 = _mm_loadu_si128((void*)((char*)p + size - 16));
+
+ mval = _mm_aesenc_si128(mval, mseed);
+ mval2 = _mm_aesenc_si128(mval2, mseed2);
+ mval3 = _mm_aesenc_si128(mval3, mseed3);
+ mval4 = _mm_aesenc_si128(mval4, mseed4);
+
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval3 = _mm_aesenc_si128(mval3, mval3);
+ mval4 = _mm_aesenc_si128(mval4, mval4);
+
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval3 = _mm_aesenc_si128(mval3, mval3);
+ mval4 = _mm_aesenc_si128(mval4, mval4);
+
+ mval ^= mval3;
+ mval2 ^= mval4;
+ mval ^= mval2;
+ return _mm_cvtsi128_si32(mval);
+ } else {
+ // Make 3 more starting seeds.
+ mseed3 = mseed2;
+ mseed4 = mseed2;
+ mseed2 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 16));
+ mseed3 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 32));
+ mseed4 ^= _mm_loadu_si128((void*)((char*)aeskeysched.__values + 48));
+ mseed2 = _mm_aesenc_si128(mseed2, mseed2);
+ mseed3 = _mm_aesenc_si128(mseed3, mseed3);
+ mseed4 = _mm_aesenc_si128(mseed4, mseed4);
+
+ // Start with last (possibly overlapping) block.
+ mval = _mm_loadu_si128((void*)((char*)p + size - 64));
+ mval2 = _mm_loadu_si128((void*)((char*)p + size - 48));
+ mval3 = _mm_loadu_si128((void*)((char*)p + size - 32));
+ mval4 = _mm_loadu_si128((void*)((char*)p + size - 16));
+
+ // Scramble state once.
+ mval = _mm_aesenc_si128(mval, mseed);
+ mval2 = _mm_aesenc_si128(mval2, mseed2);
+ mval3 = _mm_aesenc_si128(mval3, mseed3);
+ mval4 = _mm_aesenc_si128(mval4, mseed4);
+
+ // Compute number of remaining 64-byte blocks.
+ size--;
+ size >>= 6;
+ do {
+ // Scramble state, XOR in a block.
+ mval = _mm_aesenc_si128(mval, _mm_loadu_si128(p));
+ mval2 = _mm_aesenc_si128(mval2, _mm_loadu_si128((void*)((char*)p + 16)));
+ mval3 = _mm_aesenc_si128(mval3, _mm_loadu_si128((void*)((char*)p + 32)));
+ mval4 = _mm_aesenc_si128(mval4, _mm_loadu_si128((void*)((char*)p + 48)));
+
+ // Scramble state.
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval3 = _mm_aesenc_si128(mval3, mval3);
+ mval4 = _mm_aesenc_si128(mval4, mval4);
+
+ p = (void*)((char*)p + 64);
+ } while (--size > 0);
+
+ // 2 more scrambles to finish.
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval3 = _mm_aesenc_si128(mval3, mval3);
+ mval4 = _mm_aesenc_si128(mval4, mval4);
+
+ mval = _mm_aesenc_si128(mval, mval);
+ mval2 = _mm_aesenc_si128(mval2, mval2);
+ mval3 = _mm_aesenc_si128(mval3, mval3);
+ mval4 = _mm_aesenc_si128(mval4, mval4);
+
+ mval ^= mval3;
+ mval2 ^= mval4;
+ mval ^= mval2;
+ return _mm_cvtsi128_si32(mval);
+ }
+}
+
+#endif // !defined(__x86_64__)
+
+#else // !defined(__i386__) && !defined(__x86_64__) || !defined(HAVE_AS_X86_AES)
+
+uintptr aeshashbody(void* p __attribute__((unused)),
+ uintptr seed __attribute__((unused)),
+ uintptr size __attribute__((unused)),
+ Slice aeskeysched __attribute__((unused))) {
+ // We should never get here on a non-x86 system.
+ runtime_throw("impossible call to aeshashbody");
+}
+
+#endif // !defined(__i386__) && !defined(__x86_64__) || !defined(HAVE_AS_X86_AES)
diff --git a/libgo/runtime/chan.goc b/libgo/runtime/chan.goc
deleted file mode 100644
index 0cc823d8ac..0000000000
--- a/libgo/runtime/chan.goc
+++ /dev/null
@@ -1,1136 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-#include "runtime.h"
-#include "arch.h"
-#include "go-type.h"
-#include "malloc.h"
-#include "chan.h"
-
-uint32 runtime_Hchansize = sizeof(Hchan);
-
-static void dequeueg(WaitQ*);
-static SudoG* dequeue(WaitQ*);
-static void enqueue(WaitQ*, SudoG*);
-
-static Hchan*
-makechan(ChanType *t, int64 hint)
-{
- Hchan *c;
- uintptr n;
- const Type *elem;
-
- elem = t->__element_type;
-
- // compiler checks this but be safe.
- if(elem->__size >= (1<<16))
- runtime_throw("makechan: invalid channel element type");
-
- if(hint < 0 || (intgo)hint != hint || (elem->__size > 0 && (uintptr)hint > (MaxMem - sizeof(*c)) / elem->__size))
- runtime_panicstring("makechan: size out of range");
-
- n = sizeof(*c);
- n = ROUND(n, elem->__align);
-
- // allocate memory in one call
- c = (Hchan*)runtime_mallocgc(sizeof(*c) + hint*elem->__size, (uintptr)t | TypeInfo_Chan, 0);
- c->elemsize = elem->__size;
- c->elemtype = elem;
- c->dataqsiz = hint;
-
- if(debug)
- runtime_printf("makechan: chan=%p; elemsize=%D; dataqsiz=%D\n",
- c, (int64)elem->__size, (int64)c->dataqsiz);
-
- return c;
-}
-
-func reflect.makechan(t *ChanType, size uint64) (c *Hchan) {
- c = makechan(t, size);
-}
-
-Hchan*
-__go_new_channel(ChanType *t, uintptr hint)
-{
- return makechan(t, hint);
-}
-
-Hchan*
-__go_new_channel_big(ChanType *t, uint64 hint)
-{
- return makechan(t, hint);
-}
-
-/*
- * generic single channel send/recv
- * if the bool pointer is nil,
- * then the full exchange will
- * occur. if pres is not nil,
- * then the protocol will not
- * sleep but return if it could
- * not complete.
- *
- * sleep can wake up with g->param == nil
- * when a channel involved in the sleep has
- * been closed. it is easiest to loop and re-run
- * the operation; we'll see that it's now closed.
- */
-static bool
-chansend(ChanType *t, Hchan *c, byte *ep, bool block, void *pc)
-{
- USED(pc);
- SudoG *sg;
- SudoG mysg;
- G* gp;
- int64 t0;
- G* g;
-
- g = runtime_g();
-
- if(c == nil) {
- USED(t);
- if(!block)
- return false;
- runtime_park(nil, nil, "chan send (nil chan)");
- return false; // not reached
- }
-
- if(runtime_gcwaiting())
- runtime_gosched();
-
- if(debug) {
- runtime_printf("chansend: chan=%p\n", c);
- }
-
- t0 = 0;
- mysg.releasetime = 0;
- if(runtime_blockprofilerate > 0) {
- t0 = runtime_cputicks();
- mysg.releasetime = -1;
- }
-
- runtime_lock(c);
- if(c->closed)
- goto closed;
-
- if(c->dataqsiz > 0)
- goto asynch;
-
- sg = dequeue(&c->recvq);
- if(sg != nil) {
- runtime_unlock(c);
-
- gp = sg->g;
- gp->param = sg;
- if(sg->elem != nil)
- runtime_memmove(sg->elem, ep, c->elemsize);
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- return true;
- }
-
- if(!block) {
- runtime_unlock(c);
- return false;
- }
-
- mysg.elem = ep;
- mysg.g = g;
- mysg.selectdone = nil;
- g->param = nil;
- enqueue(&c->sendq, &mysg);
- runtime_parkunlock(c, "chan send");
-
- if(g->param == nil) {
- runtime_lock(c);
- if(!c->closed)
- runtime_throw("chansend: spurious wakeup");
- goto closed;
- }
-
- if(mysg.releasetime > 0)
- runtime_blockevent(mysg.releasetime - t0, 2);
-
- return true;
-
-asynch:
- if(c->closed)
- goto closed;
-
- if(c->qcount >= c->dataqsiz) {
- if(!block) {
- runtime_unlock(c);
- return false;
- }
- mysg.g = g;
- mysg.elem = nil;
- mysg.selectdone = nil;
- enqueue(&c->sendq, &mysg);
- runtime_parkunlock(c, "chan send");
-
- runtime_lock(c);
- goto asynch;
- }
-
- runtime_memmove(chanbuf(c, c->sendx), ep, c->elemsize);
- if(++c->sendx == c->dataqsiz)
- c->sendx = 0;
- c->qcount++;
-
- sg = dequeue(&c->recvq);
- if(sg != nil) {
- gp = sg->g;
- runtime_unlock(c);
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- } else
- runtime_unlock(c);
- if(mysg.releasetime > 0)
- runtime_blockevent(mysg.releasetime - t0, 2);
- return true;
-
-closed:
- runtime_unlock(c);
- runtime_panicstring("send on closed channel");
- return false; // not reached
-}
-
-
-static bool
-chanrecv(ChanType *t, Hchan* c, byte *ep, bool block, bool *received)
-{
- SudoG *sg;
- SudoG mysg;
- G *gp;
- int64 t0;
- G *g;
-
- if(runtime_gcwaiting())
- runtime_gosched();
-
- if(debug)
- runtime_printf("chanrecv: chan=%p\n", c);
-
- g = runtime_g();
-
- if(c == nil) {
- USED(t);
- if(!block)
- return false;
- runtime_park(nil, nil, "chan receive (nil chan)");
- return false; // not reached
- }
-
- t0 = 0;
- mysg.releasetime = 0;
- if(runtime_blockprofilerate > 0) {
- t0 = runtime_cputicks();
- mysg.releasetime = -1;
- }
-
- runtime_lock(c);
- if(c->dataqsiz > 0)
- goto asynch;
-
- if(c->closed)
- goto closed;
-
- sg = dequeue(&c->sendq);
- if(sg != nil) {
- runtime_unlock(c);
-
- if(ep != nil)
- runtime_memmove(ep, sg->elem, c->elemsize);
- gp = sg->g;
- gp->param = sg;
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
-
- if(received != nil)
- *received = true;
- return true;
- }
-
- if(!block) {
- runtime_unlock(c);
- return false;
- }
-
- mysg.elem = ep;
- mysg.g = g;
- mysg.selectdone = nil;
- g->param = nil;
- enqueue(&c->recvq, &mysg);
- runtime_parkunlock(c, "chan receive");
-
- if(g->param == nil) {
- runtime_lock(c);
- if(!c->closed)
- runtime_throw("chanrecv: spurious wakeup");
- goto closed;
- }
-
- if(received != nil)
- *received = true;
- if(mysg.releasetime > 0)
- runtime_blockevent(mysg.releasetime - t0, 2);
- return true;
-
-asynch:
- if(c->qcount <= 0) {
- if(c->closed)
- goto closed;
-
- if(!block) {
- runtime_unlock(c);
- if(received != nil)
- *received = false;
- return false;
- }
- mysg.g = g;
- mysg.elem = nil;
- mysg.selectdone = nil;
- enqueue(&c->recvq, &mysg);
- runtime_parkunlock(c, "chan receive");
-
- runtime_lock(c);
- goto asynch;
- }
-
- if(ep != nil)
- runtime_memmove(ep, chanbuf(c, c->recvx), c->elemsize);
- runtime_memclr(chanbuf(c, c->recvx), c->elemsize);
- if(++c->recvx == c->dataqsiz)
- c->recvx = 0;
- c->qcount--;
-
- sg = dequeue(&c->sendq);
- if(sg != nil) {
- gp = sg->g;
- runtime_unlock(c);
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- } else
- runtime_unlock(c);
-
- if(received != nil)
- *received = true;
- if(mysg.releasetime > 0)
- runtime_blockevent(mysg.releasetime - t0, 2);
- return true;
-
-closed:
- if(ep != nil)
- runtime_memclr(ep, c->elemsize);
- if(received != nil)
- *received = false;
- runtime_unlock(c);
- if(mysg.releasetime > 0)
- runtime_blockevent(mysg.releasetime - t0, 2);
- return true;
-}
-
-// The compiler generates a call to __go_send_small to send a value 8
-// bytes or smaller.
-void
-__go_send_small(ChanType *t, Hchan* c, uint64 val)
-{
- union
- {
- byte b[sizeof(uint64)];
- uint64 v;
- } u;
- byte *v;
-
- u.v = val;
-#ifndef WORDS_BIGENDIAN
- v = u.b;
-#else
- v = u.b + sizeof(uint64) - t->__element_type->__size;
-#endif
- chansend(t, c, v, true, runtime_getcallerpc(&t));
-}
-
-// The compiler generates a call to __go_send_big to send a value
-// larger than 8 bytes or smaller.
-void
-__go_send_big(ChanType *t, Hchan* c, byte* v)
-{
- chansend(t, c, v, true, runtime_getcallerpc(&t));
-}
-
-// The compiler generates a call to __go_receive to receive a
-// value from a channel.
-void
-__go_receive(ChanType *t, Hchan* c, byte* v)
-{
- chanrecv(t, c, v, true, nil);
-}
-
-_Bool runtime_chanrecv2(ChanType *t, Hchan* c, byte* v)
- __asm__ (GOSYM_PREFIX "runtime.chanrecv2");
-
-_Bool
-runtime_chanrecv2(ChanType *t, Hchan* c, byte* v)
-{
- bool received = false;
-
- chanrecv(t, c, v, true, &received);
- return received;
-}
-
-// compiler implements
-//
-// select {
-// case c <- v:
-// ... foo
-// default:
-// ... bar
-// }
-//
-// as
-//
-// if selectnbsend(c, v) {
-// ... foo
-// } else {
-// ... bar
-// }
-//
-func selectnbsend(t *ChanType, c *Hchan, elem *byte) (selected bool) {
- selected = chansend(t, c, elem, false, runtime_getcallerpc(&t));
-}
-
-// compiler implements
-//
-// select {
-// case v = <-c:
-// ... foo
-// default:
-// ... bar
-// }
-//
-// as
-//
-// if selectnbrecv(&v, c) {
-// ... foo
-// } else {
-// ... bar
-// }
-//
-func selectnbrecv(t *ChanType, elem *byte, c *Hchan) (selected bool) {
- selected = chanrecv(t, c, elem, false, nil);
-}
-
-// compiler implements
-//
-// select {
-// case v, ok = <-c:
-// ... foo
-// default:
-// ... bar
-// }
-//
-// as
-//
-// if c != nil && selectnbrecv2(&v, &ok, c) {
-// ... foo
-// } else {
-// ... bar
-// }
-//
-func selectnbrecv2(t *ChanType, elem *byte, received *bool, c *Hchan) (selected bool) {
- bool r;
-
- selected = chanrecv(t, c, elem, false, received == nil ? nil : &r);
- if(received != nil)
- *received = r;
-}
-
-func reflect.chansend(t *ChanType, c *Hchan, elem *byte, nb bool) (selected bool) {
- selected = chansend(t, c, elem, !nb, runtime_getcallerpc(&t));
-}
-
-func reflect.chanrecv(t *ChanType, c *Hchan, nb bool, elem *byte) (selected bool, received bool) {
- received = false;
- selected = chanrecv(t, c, elem, !nb, &received);
-}
-
-static Select* newselect(int32);
-
-func newselect(size int32) (sel *byte) {
- sel = (byte*)newselect(size);
-}
-
-static Select*
-newselect(int32 size)
-{
- int32 n;
- Select *sel;
-
- n = 0;
- if(size > 1)
- n = size-1;
-
- // allocate all the memory we need in a single allocation
- // start with Select with size cases
- // then lockorder with size entries
- // then pollorder with size entries
- sel = runtime_mal(sizeof(*sel) +
- n*sizeof(sel->scase[0]) +
- size*sizeof(sel->lockorder[0]) +
- size*sizeof(sel->pollorder[0]));
-
- sel->tcase = size;
- sel->ncase = 0;
- sel->lockorder = (void*)(sel->scase + size);
- sel->pollorder = (void*)(sel->lockorder + size);
-
- if(debug)
- runtime_printf("newselect s=%p size=%d\n", sel, size);
- return sel;
-}
-
-// cut in half to give stack a chance to split
-static void selectsend(Select *sel, Hchan *c, int index, void *elem);
-
-func selectsend(sel *Select, c *Hchan, elem *byte, index int32) {
- // nil cases do not compete
- if(c != nil)
- selectsend(sel, c, index, elem);
-}
-
-static void
-selectsend(Select *sel, Hchan *c, int index, void *elem)
-{
- int32 i;
- Scase *cas;
-
- i = sel->ncase;
- if(i >= sel->tcase)
- runtime_throw("selectsend: too many cases");
- sel->ncase = i+1;
- cas = &sel->scase[i];
-
- cas->index = index;
- cas->chan = c;
- cas->kind = CaseSend;
- cas->sg.elem = elem;
-
- if(debug)
- runtime_printf("selectsend s=%p index=%d chan=%p\n",
- sel, cas->index, cas->chan);
-}
-
-// cut in half to give stack a chance to split
-static void selectrecv(Select *sel, Hchan *c, int index, void *elem, bool*);
-
-func selectrecv(sel *Select, c *Hchan, elem *byte, index int32) {
- // nil cases do not compete
- if(c != nil)
- selectrecv(sel, c, index, elem, nil);
-}
-
-func selectrecv2(sel *Select, c *Hchan, elem *byte, received *bool, index int32) {
- // nil cases do not compete
- if(c != nil)
- selectrecv(sel, c, index, elem, received);
-}
-
-static void
-selectrecv(Select *sel, Hchan *c, int index, void *elem, bool *received)
-{
- int32 i;
- Scase *cas;
-
- i = sel->ncase;
- if(i >= sel->tcase)
- runtime_throw("selectrecv: too many cases");
- sel->ncase = i+1;
- cas = &sel->scase[i];
- cas->index = index;
- cas->chan = c;
-
- cas->kind = CaseRecv;
- cas->sg.elem = elem;
- cas->receivedp = received;
-
- if(debug)
- runtime_printf("selectrecv s=%p index=%d chan=%p\n",
- sel, cas->index, cas->chan);
-}
-
-// cut in half to give stack a chance to split
-static void selectdefault(Select*, int);
-
-func selectdefault(sel *Select, index int32) {
- selectdefault(sel, index);
-}
-
-static void
-selectdefault(Select *sel, int32 index)
-{
- int32 i;
- Scase *cas;
-
- i = sel->ncase;
- if(i >= sel->tcase)
- runtime_throw("selectdefault: too many cases");
- sel->ncase = i+1;
- cas = &sel->scase[i];
- cas->index = index;
- cas->chan = nil;
-
- cas->kind = CaseDefault;
-
- if(debug)
- runtime_printf("selectdefault s=%p index=%d\n",
- sel, cas->index);
-}
-
-static void
-sellock(Select *sel)
-{
- uint32 i;
- Hchan *c, *c0;
-
- c = nil;
- for(i=0; i<sel->ncase; i++) {
- c0 = sel->lockorder[i];
- if(c0 && c0 != c) {
- c = sel->lockorder[i];
- runtime_lock(c);
- }
- }
-}
-
-static void
-selunlock(Select *sel)
-{
- int32 i, n, r;
- Hchan *c;
-
- // We must be very careful here to not touch sel after we have unlocked
- // the last lock, because sel can be freed right after the last unlock.
- // Consider the following situation.
- // First M calls runtime_park() in runtime_selectgo() passing the sel.
- // Once runtime_park() has unlocked the last lock, another M makes
- // the G that calls select runnable again and schedules it for execution.
- // When the G runs on another M, it locks all the locks and frees sel.
- // Now if the first M touches sel, it will access freed memory.
- n = (int32)sel->ncase;
- r = 0;
- // skip the default case
- if(n>0 && sel->lockorder[0] == nil)
- r = 1;
- for(i = n-1; i >= r; i--) {
- c = sel->lockorder[i];
- if(i>0 && sel->lockorder[i-1] == c)
- continue; // will unlock it on the next iteration
- runtime_unlock(c);
- }
-}
-
-static bool
-selparkcommit(G *gp, void *sel)
-{
- USED(gp);
- selunlock(sel);
- return true;
-}
-
-func block() {
- runtime_park(nil, nil, "select (no cases)"); // forever
-}
-
-static int selectgo(Select**);
-
-// selectgo(sel *byte);
-
-func selectgo(sel *Select) (ret int32) {
- return selectgo(&sel);
-}
-
-static int
-selectgo(Select **selp)
-{
- Select *sel;
- uint32 o, i, j, k, done;
- int64 t0;
- Scase *cas, *dfl;
- Hchan *c;
- SudoG *sg;
- G *gp;
- int index;
- G *g;
-
- sel = *selp;
- if(runtime_gcwaiting())
- runtime_gosched();
-
- if(debug)
- runtime_printf("select: sel=%p\n", sel);
-
- g = runtime_g();
-
- t0 = 0;
- if(runtime_blockprofilerate > 0) {
- t0 = runtime_cputicks();
- for(i=0; i<sel->ncase; i++)
- sel->scase[i].sg.releasetime = -1;
- }
-
- // The compiler rewrites selects that statically have
- // only 0 or 1 cases plus default into simpler constructs.
- // The only way we can end up with such small sel->ncase
- // values here is for a larger select in which most channels
- // have been nilled out. The general code handles those
- // cases correctly, and they are rare enough not to bother
- // optimizing (and needing to test).
-
- // generate permuted order
- for(i=0; i<sel->ncase; i++)
- sel->pollorder[i] = i;
- for(i=1; i<sel->ncase; i++) {
- o = sel->pollorder[i];
- j = runtime_fastrand1()%(i+1);
- sel->pollorder[i] = sel->pollorder[j];
- sel->pollorder[j] = o;
- }
-
- // sort the cases by Hchan address to get the locking order.
- // simple heap sort, to guarantee n log n time and constant stack footprint.
- for(i=0; i<sel->ncase; i++) {
- j = i;
- c = sel->scase[j].chan;
- while(j > 0 && sel->lockorder[k=(j-1)/2] < c) {
- sel->lockorder[j] = sel->lockorder[k];
- j = k;
- }
- sel->lockorder[j] = c;
- }
- for(i=sel->ncase; i-->0; ) {
- c = sel->lockorder[i];
- sel->lockorder[i] = sel->lockorder[0];
- j = 0;
- for(;;) {
- k = j*2+1;
- if(k >= i)
- break;
- if(k+1 < i && sel->lockorder[k] < sel->lockorder[k+1])
- k++;
- if(c < sel->lockorder[k]) {
- sel->lockorder[j] = sel->lockorder[k];
- j = k;
- continue;
- }
- break;
- }
- sel->lockorder[j] = c;
- }
- /*
- for(i=0; i+1<sel->ncase; i++)
- if(sel->lockorder[i] > sel->lockorder[i+1]) {
- runtime_printf("i=%d %p %p\n", i, sel->lockorder[i], sel->lockorder[i+1]);
- runtime_throw("select: broken sort");
- }
- */
- sellock(sel);
-
-loop:
- // pass 1 - look for something already waiting
- dfl = nil;
- for(i=0; i<sel->ncase; i++) {
- o = sel->pollorder[i];
- cas = &sel->scase[o];
- c = cas->chan;
-
- switch(cas->kind) {
- case CaseRecv:
- if(c->dataqsiz > 0) {
- if(c->qcount > 0)
- goto asyncrecv;
- } else {
- sg = dequeue(&c->sendq);
- if(sg != nil)
- goto syncrecv;
- }
- if(c->closed)
- goto rclose;
- break;
-
- case CaseSend:
- if(c->closed)
- goto sclose;
- if(c->dataqsiz > 0) {
- if(c->qcount < c->dataqsiz)
- goto asyncsend;
- } else {
- sg = dequeue(&c->recvq);
- if(sg != nil)
- goto syncsend;
- }
- break;
-
- case CaseDefault:
- dfl = cas;
- break;
- }
- }
-
- if(dfl != nil) {
- selunlock(sel);
- cas = dfl;
- goto retc;
- }
-
-
- // pass 2 - enqueue on all chans
- done = 0;
- for(i=0; i<sel->ncase; i++) {
- o = sel->pollorder[i];
- cas = &sel->scase[o];
- c = cas->chan;
- sg = &cas->sg;
- sg->g = g;
- sg->selectdone = &done;
-
- switch(cas->kind) {
- case CaseRecv:
- enqueue(&c->recvq, sg);
- break;
-
- case CaseSend:
- enqueue(&c->sendq, sg);
- break;
- }
- }
-
- g->param = nil;
- runtime_park(selparkcommit, sel, "select");
-
- sellock(sel);
- sg = g->param;
-
- // pass 3 - dequeue from unsuccessful chans
- // otherwise they stack up on quiet channels
- for(i=0; i<sel->ncase; i++) {
- cas = &sel->scase[i];
- if(cas != (Scase*)sg) {
- c = cas->chan;
- if(cas->kind == CaseSend)
- dequeueg(&c->sendq);
- else
- dequeueg(&c->recvq);
- }
- }
-
- if(sg == nil)
- goto loop;
-
- cas = (Scase*)sg;
- c = cas->chan;
-
- if(c->dataqsiz > 0)
- runtime_throw("selectgo: shouldn't happen");
-
- if(debug)
- runtime_printf("wait-return: sel=%p c=%p cas=%p kind=%d\n",
- sel, c, cas, cas->kind);
-
- if(cas->kind == CaseRecv) {
- if(cas->receivedp != nil)
- *cas->receivedp = true;
- }
-
- selunlock(sel);
- goto retc;
-
-asyncrecv:
- // can receive from buffer
- if(cas->receivedp != nil)
- *cas->receivedp = true;
- if(cas->sg.elem != nil)
- runtime_memmove(cas->sg.elem, chanbuf(c, c->recvx), c->elemsize);
- runtime_memclr(chanbuf(c, c->recvx), c->elemsize);
- if(++c->recvx == c->dataqsiz)
- c->recvx = 0;
- c->qcount--;
- sg = dequeue(&c->sendq);
- if(sg != nil) {
- gp = sg->g;
- selunlock(sel);
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- } else {
- selunlock(sel);
- }
- goto retc;
-
-asyncsend:
- // can send to buffer
- runtime_memmove(chanbuf(c, c->sendx), cas->sg.elem, c->elemsize);
- if(++c->sendx == c->dataqsiz)
- c->sendx = 0;
- c->qcount++;
- sg = dequeue(&c->recvq);
- if(sg != nil) {
- gp = sg->g;
- selunlock(sel);
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- } else {
- selunlock(sel);
- }
- goto retc;
-
-syncrecv:
- // can receive from sleeping sender (sg)
- selunlock(sel);
- if(debug)
- runtime_printf("syncrecv: sel=%p c=%p o=%d\n", sel, c, o);
- if(cas->receivedp != nil)
- *cas->receivedp = true;
- if(cas->sg.elem != nil)
- runtime_memmove(cas->sg.elem, sg->elem, c->elemsize);
- gp = sg->g;
- gp->param = sg;
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- goto retc;
-
-rclose:
- // read at end of closed channel
- selunlock(sel);
- if(cas->receivedp != nil)
- *cas->receivedp = false;
- if(cas->sg.elem != nil)
- runtime_memclr(cas->sg.elem, c->elemsize);
- goto retc;
-
-syncsend:
- // can send to sleeping receiver (sg)
- selunlock(sel);
- if(debug)
- runtime_printf("syncsend: sel=%p c=%p o=%d\n", sel, c, o);
- if(sg->elem != nil)
- runtime_memmove(sg->elem, cas->sg.elem, c->elemsize);
- gp = sg->g;
- gp->param = sg;
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
-
-retc:
- // return index corresponding to chosen case
- index = cas->index;
- if(cas->sg.releasetime > 0)
- runtime_blockevent(cas->sg.releasetime - t0, 2);
- runtime_free(sel);
- return index;
-
-sclose:
- // send on closed channel
- selunlock(sel);
- runtime_panicstring("send on closed channel");
- return 0; // not reached
-}
-
-// This struct must match ../reflect/value.go:/runtimeSelect.
-typedef struct runtimeSelect runtimeSelect;
-struct runtimeSelect
-{
- uintptr dir;
- ChanType *typ;
- Hchan *ch;
- byte *val;
-};
-
-// This enum must match ../reflect/value.go:/SelectDir.
-enum SelectDir {
- SelectSend = 1,
- SelectRecv,
- SelectDefault,
-};
-
-func reflect.rselect(cases Slice) (chosen int, recvOK bool) {
- int32 i;
- Select *sel;
- runtimeSelect* rcase, *rc;
-
- chosen = -1;
- recvOK = false;
-
- rcase = (runtimeSelect*)cases.__values;
-
- sel = newselect(cases.__count);
- for(i=0; i<cases.__count; i++) {
- rc = &rcase[i];
- switch(rc->dir) {
- case SelectDefault:
- selectdefault(sel, i);
- break;
- case SelectSend:
- if(rc->ch == nil)
- break;
- selectsend(sel, rc->ch, i, rc->val);
- break;
- case SelectRecv:
- if(rc->ch == nil)
- break;
- selectrecv(sel, rc->ch, i, rc->val, &recvOK);
- break;
- }
- }
-
- chosen = (intgo)(uintptr)selectgo(&sel);
-}
-
-static void closechan(Hchan *c, void *pc);
-
-func closechan(c *Hchan) {
- closechan(c, runtime_getcallerpc(&c));
-}
-
-func reflect.chanclose(c *Hchan) {
- closechan(c, runtime_getcallerpc(&c));
-}
-
-static void
-closechan(Hchan *c, void *pc)
-{
- USED(pc);
- SudoG *sg;
- G* gp;
-
- if(c == nil)
- runtime_panicstring("close of nil channel");
-
- if(runtime_gcwaiting())
- runtime_gosched();
-
- runtime_lock(c);
- if(c->closed) {
- runtime_unlock(c);
- runtime_panicstring("close of closed channel");
- }
- c->closed = true;
-
- // release all readers
- for(;;) {
- sg = dequeue(&c->recvq);
- if(sg == nil)
- break;
- gp = sg->g;
- gp->param = nil;
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- }
-
- // release all writers
- for(;;) {
- sg = dequeue(&c->sendq);
- if(sg == nil)
- break;
- gp = sg->g;
- gp->param = nil;
- if(sg->releasetime)
- sg->releasetime = runtime_cputicks();
- runtime_ready(gp);
- }
-
- runtime_unlock(c);
-}
-
-void
-__go_builtin_close(Hchan *c)
-{
- runtime_closechan(c);
-}
-
-func reflect.chanlen(c *Hchan) (len int) {
- if(c == nil)
- len = 0;
- else
- len = c->qcount;
-}
-
-intgo
-__go_chan_len(Hchan *c)
-{
- return reflect_chanlen(c);
-}
-
-func reflect.chancap(c *Hchan) (cap int) {
- if(c == nil)
- cap = 0;
- else
- cap = c->dataqsiz;
-}
-
-intgo
-__go_chan_cap(Hchan *c)
-{
- return reflect_chancap(c);
-}
-
-static SudoG*
-dequeue(WaitQ *q)
-{
- SudoG *sgp;
-
-loop:
- sgp = q->first;
- if(sgp == nil)
- return nil;
- q->first = sgp->link;
-
- // if sgp participates in a select and is already signaled, ignore it
- if(sgp->selectdone != nil) {
- // claim the right to signal
- if(*sgp->selectdone != 0 || !runtime_cas(sgp->selectdone, 0, 1))
- goto loop;
- }
-
- return sgp;
-}
-
-static void
-dequeueg(WaitQ *q)
-{
- SudoG **l, *sgp, *prevsgp;
- G *g;
-
- g = runtime_g();
- prevsgp = nil;
- for(l=&q->first; (sgp=*l) != nil; l=&sgp->link, prevsgp=sgp) {
- if(sgp->g == g) {
- *l = sgp->link;
- if(q->last == sgp)
- q->last = prevsgp;
- break;
- }
- }
-}
-
-static void
-enqueue(WaitQ *q, SudoG *sgp)
-{
- sgp->link = nil;
- if(q->first == nil) {
- q->first = sgp;
- q->last = sgp;
- return;
- }
- q->last->link = sgp;
- q->last = sgp;
-}
diff --git a/libgo/runtime/chan.h b/libgo/runtime/chan.h
deleted file mode 100644
index 70b0b9d909..0000000000
--- a/libgo/runtime/chan.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-typedef struct WaitQ WaitQ;
-typedef struct SudoG SudoG;
-typedef struct Select Select;
-typedef struct Scase Scase;
-
-typedef struct __go_type_descriptor Type;
-typedef struct __go_channel_type ChanType;
-
-struct SudoG
-{
- G* g;
- uint32* selectdone;
- SudoG* link;
- int64 releasetime;
- byte* elem; // data element
-};
-
-struct WaitQ
-{
- SudoG* first;
- SudoG* last;
-};
-
-// The garbage collector is assuming that Hchan can only contain pointers into the stack
-// and cannot contain pointers into the heap.
-struct Hchan
-{
- uintgo qcount; // total data in the q
- uintgo dataqsiz; // size of the circular q
- uint16 elemsize;
- uint16 pad; // ensures proper alignment of the buffer that follows Hchan in memory
- bool closed;
- const Type* elemtype; // element type
- uintgo sendx; // send index
- uintgo recvx; // receive index
- WaitQ recvq; // list of recv waiters
- WaitQ sendq; // list of send waiters
- Lock;
-};
-
-// Buffer follows Hchan immediately in memory.
-// chanbuf(c, i) is pointer to the i'th slot in the buffer.
-#define chanbuf(c, i) ((byte*)((c)+1)+(uintptr)(c)->elemsize*(i))
-
-enum
-{
- debug = 0,
-
- // Scase.kind
- CaseRecv,
- CaseSend,
- CaseDefault,
-};
-
-struct Scase
-{
- SudoG sg; // must be first member (cast to Scase)
- Hchan* chan; // chan
- uint16 kind;
- uint16 index; // index to return
- bool* receivedp; // pointer to received bool (recv2)
-};
-
-struct Select
-{
- uint16 tcase; // total count of scase[]
- uint16 ncase; // currently filled scase[]
- uint16* pollorder; // case poll order
- Hchan** lockorder; // channel lock order
- Scase scase[1]; // one per case (in order of appearance)
-};
diff --git a/libgo/runtime/cpuprof.goc b/libgo/runtime/cpuprof.goc
deleted file mode 100644
index 7d27bc6a43..0000000000
--- a/libgo/runtime/cpuprof.goc
+++ /dev/null
@@ -1,442 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// CPU profiling.
-// Based on algorithms and data structures used in
-// http://code.google.com/p/google-perftools/.
-//
-// The main difference between this code and the google-perftools
-// code is that this code is written to allow copying the profile data
-// to an arbitrary io.Writer, while the google-perftools code always
-// writes to an operating system file.
-//
-// The signal handler for the profiling clock tick adds a new stack trace
-// to a hash table tracking counts for recent traces. Most clock ticks
-// hit in the cache. In the event of a cache miss, an entry must be
-// evicted from the hash table, copied to a log that will eventually be
-// written as profile data. The google-perftools code flushed the
-// log itself during the signal handler. This code cannot do that, because
-// the io.Writer might block or need system calls or locks that are not
-// safe to use from within the signal handler. Instead, we split the log
-// into two halves and let the signal handler fill one half while a goroutine
-// is writing out the other half. When the signal handler fills its half, it
-// offers to swap with the goroutine. If the writer is not done with its half,
-// we lose the stack trace for this clock tick (and record that loss).
-// The goroutine interacts with the signal handler by calling getprofile() to
-// get the next log piece to write, implicitly handing back the last log
-// piece it obtained.
-//
-// The state of this dance between the signal handler and the goroutine
-// is encoded in the Profile.handoff field. If handoff == 0, then the goroutine
-// is not using either log half and is waiting (or will soon be waiting) for
-// a new piece by calling notesleep(&p->wait). If the signal handler
-// changes handoff from 0 to non-zero, it must call notewakeup(&p->wait)
-// to wake the goroutine. The value indicates the number of entries in the
-// log half being handed off. The goroutine leaves the non-zero value in
-// place until it has finished processing the log half and then flips the number
-// back to zero. Setting the high bit in handoff means that the profiling is over,
-// and the goroutine is now in charge of flushing the data left in the hash table
-// to the log and returning that data.
-//
-// The handoff field is manipulated using atomic operations.
-// For the most part, the manipulation of handoff is orderly: if handoff == 0
-// then the signal handler owns it and can change it to non-zero.
-// If handoff != 0 then the goroutine owns it and can change it to zero.
-// If that were the end of the story then we would not need to manipulate
-// handoff using atomic operations. The operations are needed, however,
-// in order to let the log closer set the high bit to indicate "EOF" safely
-// in the situation when normally the goroutine "owns" handoff.
-
-package runtime
-#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
-
-#include "array.h"
-typedef struct __go_open_array Slice;
-#define array __values
-#define len __count
-#define cap __capacity
-
-enum
-{
- HashSize = 1<<10,
- LogSize = 1<<17,
- Assoc = 4,
- MaxStack = 64,
-};
-
-typedef struct Profile Profile;
-typedef struct Bucket Bucket;
-typedef struct Entry Entry;
-
-struct Entry {
- uintptr count;
- uintptr depth;
- uintptr stack[MaxStack];
-};
-
-struct Bucket {
- Entry entry[Assoc];
-};
-
-struct Profile {
- bool on; // profiling is on
- Note wait; // goroutine waits here
- uintptr count; // tick count
- uintptr evicts; // eviction count
- uintptr lost; // lost ticks that need to be logged
-
- // Active recent stack traces.
- Bucket hash[HashSize];
-
- // Log of traces evicted from hash.
- // Signal handler has filled log[toggle][:nlog].
- // Goroutine is writing log[1-toggle][:handoff].
- uintptr log[2][LogSize/2];
- uintptr nlog;
- int32 toggle;
- uint32 handoff;
-
- // Writer state.
- // Writer maintains its own toggle to avoid races
- // looking at signal handler's toggle.
- uint32 wtoggle;
- bool wholding; // holding & need to release a log half
- bool flushing; // flushing hash table - profile is over
- bool eod_sent; // special end-of-data record sent; => flushing
-};
-
-static Lock lk;
-static Profile *prof;
-
-static void tick(uintptr*, int32);
-static void add(Profile*, uintptr*, int32);
-static bool evict(Profile*, Entry*);
-static bool flushlog(Profile*);
-
-static uintptr eod[3] = {0, 1, 0};
-
-// LostProfileData is a no-op function used in profiles
-// to mark the number of profiling stack traces that were
-// discarded due to slow data writers.
-static void
-LostProfileData(void)
-{
-}
-
-extern void runtime_SetCPUProfileRate(intgo)
- __asm__ (GOSYM_PREFIX "runtime.SetCPUProfileRate");
-
-// SetCPUProfileRate sets the CPU profiling rate.
-// The user documentation is in debug.go.
-void
-runtime_SetCPUProfileRate(intgo hz)
-{
- uintptr *p;
- uintptr n;
-
- // Clamp hz to something reasonable.
- if(hz < 0)
- hz = 0;
- if(hz > 1000000)
- hz = 1000000;
-
- runtime_lock(&lk);
- if(hz > 0) {
- if(prof == nil) {
- prof = runtime_SysAlloc(sizeof *prof, &mstats.other_sys);
- if(prof == nil) {
- runtime_printf("runtime: cpu profiling cannot allocate memory\n");
- runtime_unlock(&lk);
- return;
- }
- }
- if(prof->on || prof->handoff != 0) {
- runtime_printf("runtime: cannot set cpu profile rate until previous profile has finished.\n");
- runtime_unlock(&lk);
- return;
- }
-
- prof->on = true;
- p = prof->log[0];
- // pprof binary header format.
- // http://code.google.com/p/google-perftools/source/browse/trunk/src/profiledata.cc#117
- *p++ = 0; // count for header
- *p++ = 3; // depth for header
- *p++ = 0; // version number
- *p++ = 1000000 / hz; // period (microseconds)
- *p++ = 0;
- prof->nlog = p - prof->log[0];
- prof->toggle = 0;
- prof->wholding = false;
- prof->wtoggle = 0;
- prof->flushing = false;
- prof->eod_sent = false;
- runtime_noteclear(&prof->wait);
-
- runtime_setcpuprofilerate(tick, hz);
- } else if(prof != nil && prof->on) {
- runtime_setcpuprofilerate(nil, 0);
- prof->on = false;
-
- // Now add is not running anymore, and getprofile owns the entire log.
- // Set the high bit in prof->handoff to tell getprofile.
- for(;;) {
- n = prof->handoff;
- if(n&0x80000000)
- runtime_printf("runtime: setcpuprofile(off) twice");
- if(runtime_cas(&prof->handoff, n, n|0x80000000))
- break;
- }
- if(n == 0) {
- // we did the transition from 0 -> nonzero so we wake getprofile
- runtime_notewakeup(&prof->wait);
- }
- }
- runtime_unlock(&lk);
-}
-
-static void
-tick(uintptr *pc, int32 n)
-{
- add(prof, pc, n);
-}
-
-// add adds the stack trace to the profile.
-// It is called from signal handlers and other limited environments
-// and cannot allocate memory or acquire locks that might be
-// held at the time of the signal, nor can it use substantial amounts
-// of stack. It is allowed to call evict.
-static void
-add(Profile *p, uintptr *pc, int32 n)
-{
- int32 i, j;
- uintptr h, x;
- Bucket *b;
- Entry *e;
-
- if(n > MaxStack)
- n = MaxStack;
-
- // Compute hash.
- h = 0;
- for(i=0; i<n; i++) {
- h = h<<8 | (h>>(8*(sizeof(h)-1)));
- x = pc[i];
- h += x*31 + x*7 + x*3;
- }
- p->count++;
-
- // Add to entry count if already present in table.
- b = &p->hash[h%HashSize];
- for(i=0; i<Assoc; i++) {
- e = &b->entry[i];
- if(e->depth != (uintptr)n)
- continue;
- for(j=0; j<n; j++)
- if(e->stack[j] != pc[j])
- goto ContinueAssoc;
- e->count++;
- return;
- ContinueAssoc:;
- }
-
- // Evict entry with smallest count.
- e = &b->entry[0];
- for(i=1; i<Assoc; i++)
- if(b->entry[i].count < e->count)
- e = &b->entry[i];
- if(e->count > 0) {
- if(!evict(p, e)) {
- // Could not evict entry. Record lost stack.
- p->lost++;
- return;
- }
- p->evicts++;
- }
-
- // Reuse the newly evicted entry.
- e->depth = n;
- e->count = 1;
- for(i=0; i<n; i++)
- e->stack[i] = pc[i];
-}
-
-// evict copies the given entry's data into the log, so that
-// the entry can be reused. evict is called from add, which
-// is called from the profiling signal handler, so it must not
-// allocate memory or block. It is safe to call flushLog.
-// evict returns true if the entry was copied to the log,
-// false if there was no room available.
-static bool
-evict(Profile *p, Entry *e)
-{
- int32 i, d, nslot;
- uintptr *log, *q;
-
- d = e->depth;
- nslot = d+2;
- log = p->log[p->toggle];
- if(p->nlog+nslot > nelem(p->log[0])) {
- if(!flushlog(p))
- return false;
- log = p->log[p->toggle];
- }
-
- q = log+p->nlog;
- *q++ = e->count;
- *q++ = d;
- for(i=0; i<d; i++)
- *q++ = e->stack[i];
- p->nlog = q - log;
- e->count = 0;
- return true;
-}
-
-// flushlog tries to flush the current log and switch to the other one.
-// flushlog is called from evict, called from add, called from the signal handler,
-// so it cannot allocate memory or block. It can try to swap logs with
-// the writing goroutine, as explained in the comment at the top of this file.
-static bool
-flushlog(Profile *p)
-{
- uintptr *log, *q;
-
- if(!runtime_cas(&p->handoff, 0, p->nlog))
- return false;
- runtime_notewakeup(&p->wait);
-
- p->toggle = 1 - p->toggle;
- log = p->log[p->toggle];
- q = log;
- if(p->lost > 0) {
- *q++ = p->lost;
- *q++ = 1;
- *q++ = (uintptr)LostProfileData;
- p->lost = 0;
- }
- p->nlog = q - log;
- return true;
-}
-
-// getprofile blocks until the next block of profiling data is available
-// and returns it as a []byte. It is called from the writing goroutine.
-Slice
-getprofile(Profile *p)
-{
- uint32 i, j, n;
- Slice ret;
- Bucket *b;
- Entry *e;
-
- ret.array = nil;
- ret.len = 0;
- ret.cap = 0;
-
- if(p == nil)
- return ret;
-
- if(p->wholding) {
- // Release previous log to signal handling side.
- // Loop because we are racing against SetCPUProfileRate(0).
- for(;;) {
- n = p->handoff;
- if(n == 0) {
- runtime_printf("runtime: phase error during cpu profile handoff\n");
- return ret;
- }
- if(n & 0x80000000) {
- p->wtoggle = 1 - p->wtoggle;
- p->wholding = false;
- p->flushing = true;
- goto flush;
- }
- if(runtime_cas(&p->handoff, n, 0))
- break;
- }
- p->wtoggle = 1 - p->wtoggle;
- p->wholding = false;
- }
-
- if(p->flushing)
- goto flush;
-
- if(!p->on && p->handoff == 0)
- return ret;
-
- // Wait for new log.
- runtime_notetsleepg(&p->wait, -1);
- runtime_noteclear(&p->wait);
-
- n = p->handoff;
- if(n == 0) {
- runtime_printf("runtime: phase error during cpu profile wait\n");
- return ret;
- }
- if(n == 0x80000000) {
- p->flushing = true;
- goto flush;
- }
- n &= ~0x80000000;
-
- // Return new log to caller.
- p->wholding = true;
-
- ret.array = (byte*)p->log[p->wtoggle];
- ret.len = n*sizeof(uintptr);
- ret.cap = ret.len;
- return ret;
-
-flush:
- // In flush mode.
- // Add is no longer being called. We own the log.
- // Also, p->handoff is non-zero, so flushlog will return false.
- // Evict the hash table into the log and return it.
- for(i=0; i<HashSize; i++) {
- b = &p->hash[i];
- for(j=0; j<Assoc; j++) {
- e = &b->entry[j];
- if(e->count > 0 && !evict(p, e)) {
- // Filled the log. Stop the loop and return what we've got.
- goto breakflush;
- }
- }
- }
-breakflush:
-
- // Return pending log data.
- if(p->nlog > 0) {
- // Note that we're using toggle now, not wtoggle,
- // because we're working on the log directly.
- ret.array = (byte*)p->log[p->toggle];
- ret.len = p->nlog*sizeof(uintptr);
- ret.cap = ret.len;
- p->nlog = 0;
- return ret;
- }
-
- // Made it through the table without finding anything to log.
- if(!p->eod_sent) {
- // We may not have space to append this to the partial log buf,
- // so we always return a new slice for the end-of-data marker.
- p->eod_sent = true;
- ret.array = (byte*)eod;
- ret.len = sizeof eod;
- ret.cap = ret.len;
- return ret;
- }
-
- // Finally done. Clean up and return nil.
- p->flushing = false;
- if(!runtime_cas(&p->handoff, p->handoff, 0))
- runtime_printf("runtime: profile flush racing with something\n");
- return ret; // set to nil at top of function
-}
-
-// CPUProfile returns the next cpu profile block as a []byte.
-// The user documentation is in debug.go.
-func CPUProfile() (ret Slice) {
- ret = getprofile(prof);
-}
diff --git a/libgo/runtime/env_posix.c b/libgo/runtime/env_posix.c
index b93edd65a6..3a60682597 100644
--- a/libgo/runtime/env_posix.c
+++ b/libgo/runtime/env_posix.c
@@ -9,7 +9,7 @@
#include "arch.h"
#include "malloc.h"
-extern Slice envs;
+extern Slice runtime_get_envs(void);
String
runtime_getenv(const char *s)
@@ -17,12 +17,14 @@ runtime_getenv(const char *s)
int32 i, j;
intgo len;
const byte *v, *bs;
+ Slice envs;
String* envv;
int32 envc;
String ret;
bs = (const byte*)s;
len = runtime_findnull(bs);
+ envs = runtime_get_envs();
envv = (String*)envs.__values;
envc = envs.__count;
for(i=0; i<envc; i++){
diff --git a/libgo/runtime/go-alloc.h b/libgo/runtime/go-alloc.h
deleted file mode 100644
index c880a043ea..0000000000
--- a/libgo/runtime/go-alloc.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* go-alloc.h -- allocate memory for Go.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-#include <stdint.h>
-
-extern void *__go_alloc (unsigned int __attribute__ ((mode (pointer))));
-extern void __go_free (void *);
diff --git a/libgo/runtime/go-append.c b/libgo/runtime/go-append.c
deleted file mode 100644
index 1b2d49e53c..0000000000
--- a/libgo/runtime/go-append.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/* go-append.c -- the go builtin append function.
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-panic.h"
-#include "go-type.h"
-#include "array.h"
-#include "arch.h"
-#include "malloc.h"
-
-/* We should be OK if we don't split the stack here, since the only
- libc functions we call are memcpy and memmove. If we don't do
- this, we will always split the stack, because of memcpy and
- memmove. */
-extern struct __go_open_array
-__go_append (struct __go_open_array, void *, uintptr_t, uintptr_t)
- __attribute__ ((no_split_stack));
-
-struct __go_open_array
-__go_append (struct __go_open_array a, void *bvalues, uintptr_t bcount,
- uintptr_t element_size)
-{
- uintptr_t ucount;
- intgo count;
-
- if (bvalues == NULL || bcount == 0)
- return a;
-
- ucount = (uintptr_t) a.__count + bcount;
- count = (intgo) ucount;
- if ((uintptr_t) count != ucount || count <= a.__count)
- runtime_panicstring ("append: slice overflow");
-
- if (count > a.__capacity)
- {
- intgo m;
- uintptr capmem;
- void *n;
-
- m = a.__capacity;
- if (m + m < count)
- m = count;
- else
- {
- do
- {
- if (a.__count < 1024)
- m += m;
- else
- m += m / 4;
- }
- while (m < count);
- }
-
- if (element_size > 0 && (uintptr) m > MaxMem / element_size)
- runtime_panicstring ("growslice: cap out of range");
-
- capmem = runtime_roundupsize (m * element_size);
-
- n = __go_alloc (capmem);
- __builtin_memcpy (n, a.__values, a.__count * element_size);
-
- a.__values = n;
- a.__capacity = m;
- }
-
- __builtin_memmove ((char *) a.__values + a.__count * element_size,
- bvalues, bcount * element_size);
- a.__count = count;
- return a;
-}
diff --git a/libgo/runtime/go-assert-interface.c b/libgo/runtime/go-assert-interface.c
deleted file mode 100644
index 427916f8c4..0000000000
--- a/libgo/runtime/go-assert-interface.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/* go-assert-interface.c -- interface type assertion for Go.
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-alloc.h"
-#include "go-assert.h"
-#include "go-panic.h"
-#include "go-type.h"
-#include "interface.h"
-
-/* This is called by the compiler to implement a type assertion from
- one interface type to another. This returns the value that should
- go in the first field of the result tuple. The result may be an
- empty or a non-empty interface. */
-
-const void *
-__go_assert_interface (const struct __go_type_descriptor *lhs_descriptor,
- const struct __go_type_descriptor *rhs_descriptor)
-{
- const struct __go_interface_type *lhs_interface;
-
- if (rhs_descriptor == NULL)
- {
- struct __go_empty_interface panic_arg;
-
- /* A type assertion is not permitted with a nil interface. */
-
- runtime_newTypeAssertionError (NULL, NULL, lhs_descriptor->__reflection,
- NULL, &panic_arg);
- __go_panic (panic_arg);
- }
-
- /* A type assertion to an empty interface just returns the object
- descriptor. */
-
- __go_assert ((lhs_descriptor->__code & GO_CODE_MASK) == GO_INTERFACE);
- lhs_interface = (const struct __go_interface_type *) lhs_descriptor;
- if (lhs_interface->__methods.__count == 0)
- return rhs_descriptor;
-
- return __go_convert_interface_2 (lhs_descriptor, rhs_descriptor, 0);
-}
diff --git a/libgo/runtime/go-byte-array-to-string.c b/libgo/runtime/go-byte-array-to-string.c
deleted file mode 100644
index 088b78690f..0000000000
--- a/libgo/runtime/go-byte-array-to-string.c
+++ /dev/null
@@ -1,24 +0,0 @@
-/* go-byte-array-to-string.c -- convert an array of bytes to a string in Go.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
-
-String
-__go_byte_array_to_string (const void* p, intgo len)
-{
- const unsigned char *bytes;
- unsigned char *retdata;
- String ret;
-
- bytes = (const unsigned char *) p;
- retdata = runtime_mallocgc ((uintptr) len, 0, FlagNoScan);
- __builtin_memcpy (retdata, bytes, len);
- ret.str = retdata;
- ret.len = len;
- return ret;
-}
diff --git a/libgo/runtime/go-caller.c b/libgo/runtime/go-caller.c
index d6901e0737..a35d8d73f4 100644
--- a/libgo/runtime/go-caller.c
+++ b/libgo/runtime/go-caller.c
@@ -1,4 +1,4 @@
-/* go-caller.c -- runtime.Caller and runtime.FuncForPC for Go.
+/* go-caller.c -- look up function/file/line/entry info
Copyright 2009 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
@@ -25,6 +25,7 @@ struct caller
String fn;
String file;
intgo line;
+ intgo index;
};
/* Collect file/line information for a PC value. If this is called
@@ -45,6 +46,12 @@ callback (void *data, uintptr_t pc __attribute__ ((unused)),
c->file = runtime_gostringnocopy ((const byte *) filename);
c->line = lineno;
+ if (c->index == 0)
+ return 1;
+
+ if (c->index > 0)
+ --c->index;
+
return 0;
}
@@ -69,6 +76,10 @@ static void *back_state;
static Lock back_state_lock;
+/* The program arguments. */
+
+extern Slice runtime_get_args(void);
+
/* Fetch back_state, creating it if necessary. */
struct backtrace_state *
@@ -77,15 +88,19 @@ __go_get_backtrace_state ()
runtime_lock (&back_state_lock);
if (back_state == NULL)
{
+ Slice args;
const char *filename;
struct stat s;
- filename = (const char *) runtime_progname ();
+ args = runtime_get_args();
+ filename = NULL;
+ if (args.__count > 0)
+ filename = (const char*)((String*)args.__values)[0].str;
/* If there is no '/' in FILENAME, it was found on PATH, and
might not be the same as the file with the same name in the
current directory. */
- if (__builtin_strchr (filename, '/') == NULL)
+ if (filename != NULL && __builtin_strchr (filename, '/') == NULL)
filename = NULL;
/* If the file is small, then it's not the real executable.
@@ -102,14 +117,17 @@ __go_get_backtrace_state ()
return back_state;
}
-/* Return function/file/line information for PC. */
+/* Return function/file/line information for PC. The index parameter
+ is the entry on the stack of inlined functions; -1 means the last
+ one. */
_Bool
-__go_file_line (uintptr pc, String *fn, String *file, intgo *line)
+__go_file_line (uintptr pc, int index, String *fn, String *file, intgo *line)
{
struct caller c;
runtime_memclr (&c, sizeof c);
+ c.index = index;
backtrace_pcinfo (__go_get_backtrace_state (), pc, callback,
error_callback, &c);
*fn = c.fn;
@@ -153,8 +171,6 @@ struct caller_ret
struct caller_ret Caller (int n) __asm__ (GOSYM_PREFIX "runtime.Caller");
-Func *FuncForPC (uintptr_t) __asm__ (GOSYM_PREFIX "runtime.FuncForPC");
-
/* Implement runtime.Caller. */
struct caller_ret
@@ -175,73 +191,40 @@ Caller (int skip)
return ret;
}
-/* Implement runtime.FuncForPC. */
+/* Look up the function name, file name, and line number for a PC. */
-Func *
-FuncForPC (uintptr_t pc)
-{
- Func *ret;
- String fn;
- String file;
- intgo line;
- uintptr_t val;
-
- if (!__go_file_line (pc, &fn, &file, &line))
- return NULL;
-
- ret = (Func *) runtime_malloc (sizeof (*ret));
- ret->name = fn;
-
- if (__go_symbol_value (pc, &val))
- ret->entry = val;
- else
- ret->entry = 0;
-
- return ret;
-}
-
-/* Look up the file and line information for a PC within a
- function. */
-
-struct funcline_go_return
+struct funcfileline_return
{
+ String retfn;
String retfile;
intgo retline;
};
-struct funcline_go_return
-runtime_funcline_go (Func *f, uintptr targetpc)
- __asm__ (GOSYM_PREFIX "runtime.funcline_go");
+struct funcfileline_return
+runtime_funcfileline (uintptr targetpc, int32 index)
+ __asm__ (GOSYM_PREFIX "runtime.funcfileline");
-struct funcline_go_return
-runtime_funcline_go (Func *f __attribute__((unused)), uintptr targetpc)
+struct funcfileline_return
+runtime_funcfileline (uintptr targetpc, int32 index)
{
- struct funcline_go_return ret;
- String fn;
+ struct funcfileline_return ret;
- if (!__go_file_line (targetpc, &fn, &ret.retfile, &ret.retline))
+ if (!__go_file_line (targetpc, index, &ret.retfn, &ret.retfile,
+ &ret.retline))
runtime_memclr (&ret, sizeof ret);
return ret;
}
-/* Return the name of a function. */
-String runtime_funcname_go (Func *f)
- __asm__ (GOSYM_PREFIX "runtime.funcname_go");
-
-String
-runtime_funcname_go (Func *f)
-{
- if (f == NULL)
- return runtime_gostringnocopy ((const byte *) "");
- return f->name;
-}
-
/* Return the entry point of a function. */
-uintptr runtime_funcentry_go(Func *f)
- __asm__ (GOSYM_PREFIX "runtime.funcentry_go");
+uintptr runtime_funcentry(uintptr)
+ __asm__ (GOSYM_PREFIX "runtime.funcentry");
uintptr
-runtime_funcentry_go (Func *f)
+runtime_funcentry (uintptr pc)
{
- return f->entry;
+ uintptr val;
+
+ if (!__go_symbol_value (pc, &val))
+ return 0;
+ return val;
}
diff --git a/libgo/runtime/go-can-convert-interface.c b/libgo/runtime/go-can-convert-interface.c
deleted file mode 100644
index aac889d346..0000000000
--- a/libgo/runtime/go-can-convert-interface.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/* go-can-convert-interface.c -- can we convert to an interface?
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-assert.h"
-#include "go-string.h"
-#include "go-type.h"
-#include "interface.h"
-
-/* Return whether we can convert from the type in FROM_DESCRIPTOR to
- the interface in TO_DESCRIPTOR. This is used for type
- switches. */
-
-_Bool
-__go_can_convert_to_interface (
- const struct __go_type_descriptor *to_descriptor,
- const struct __go_type_descriptor *from_descriptor)
-{
- const struct __go_interface_type *to_interface;
- int to_method_count;
- const struct __go_interface_method *to_method;
- const struct __go_uncommon_type *from_uncommon;
- int from_method_count;
- const struct __go_method *from_method;
- int i;
-
- /* In a type switch FROM_DESCRIPTOR can be NULL. */
- if (from_descriptor == NULL)
- return 0;
-
- __go_assert ((to_descriptor->__code & GO_CODE_MASK) == GO_INTERFACE);
- to_interface = (const struct __go_interface_type *) to_descriptor;
- to_method_count = to_interface->__methods.__count;
- to_method = ((const struct __go_interface_method *)
- to_interface->__methods.__values);
-
- from_uncommon = from_descriptor->__uncommon;
- if (from_uncommon == NULL)
- {
- from_method_count = 0;
- from_method = NULL;
- }
- else
- {
- from_method_count = from_uncommon->__methods.__count;
- from_method = ((const struct __go_method *)
- from_uncommon->__methods.__values);
- }
-
- for (i = 0; i < to_method_count; ++i)
- {
- while (from_method_count > 0
- && (!__go_ptr_strings_equal (from_method->__name,
- to_method->__name)
- || !__go_ptr_strings_equal (from_method->__pkg_path,
- to_method->__pkg_path)))
- {
- ++from_method;
- --from_method_count;
- }
-
- if (from_method_count == 0)
- return 0;
-
- if (!__go_type_descriptors_equal (from_method->__mtype,
- to_method->__type))
- return 0;
-
- ++to_method;
- ++from_method;
- --from_method_count;
- }
-
- return 1;
-}
diff --git a/libgo/runtime/go-cgo.c b/libgo/runtime/go-cgo.c
index 610bcf5ec4..e80b6b519e 100644
--- a/libgo/runtime/go-cgo.c
+++ b/libgo/runtime/go-cgo.c
@@ -5,194 +5,6 @@
license that can be found in the LICENSE file. */
#include "runtime.h"
-#include "go-alloc.h"
-#include "interface.h"
-#include "go-panic.h"
-#include "go-type.h"
-
-extern void __go_receive (ChanType *, Hchan *, byte *);
-
-/* Prepare to call from code written in Go to code written in C or
- C++. This takes the current goroutine out of the Go scheduler, as
- though it were making a system call. Otherwise the program can
- lock up if the C code goes to sleep on a mutex or for some other
- reason. This idea is to call this function, then immediately call
- the C/C++ function. After the C/C++ function returns, call
- syscall_cgocalldone. The usual Go code would look like
-
- syscall.Cgocall()
- defer syscall.Cgocalldone()
- cfunction()
-
- */
-
-/* We let Go code call these via the syscall package. */
-void syscall_cgocall(void) __asm__ (GOSYM_PREFIX "syscall.Cgocall");
-void syscall_cgocalldone(void) __asm__ (GOSYM_PREFIX "syscall.CgocallDone");
-void syscall_cgocallback(void) __asm__ (GOSYM_PREFIX "syscall.CgocallBack");
-void syscall_cgocallbackdone(void) __asm__ (GOSYM_PREFIX "syscall.CgocallBackDone");
-
-void
-syscall_cgocall ()
-{
- M* m;
- G* g;
-
- if (runtime_needextram && runtime_cas (&runtime_needextram, 1, 0))
- runtime_newextram ();
-
- runtime_lockOSThread();
-
- m = runtime_m ();
- ++m->ncgocall;
- g = runtime_g ();
- ++g->ncgo;
- runtime_entersyscall ();
-}
-
-/* Prepare to return to Go code from C/C++ code. */
-
-void
-syscall_cgocalldone ()
-{
- G* g;
-
- g = runtime_g ();
- __go_assert (g != NULL);
- --g->ncgo;
- if (g->ncgo == 0)
- {
- /* We are going back to Go, and we are not in a recursive call.
- Let the garbage collector clean up any unreferenced
- memory. */
- g->cgomal = NULL;
- }
-
- /* If we are invoked because the C function called _cgo_panic, then
- _cgo_panic will already have exited syscall mode. */
- if (g->status == Gsyscall)
- runtime_exitsyscall ();
-
- runtime_unlockOSThread();
-}
-
-/* Call back from C/C++ code to Go code. */
-
-void
-syscall_cgocallback ()
-{
- M *mp;
-
- mp = runtime_m ();
- if (mp == NULL)
- {
- runtime_needm ();
- mp = runtime_m ();
- mp->dropextram = true;
- }
-
- runtime_exitsyscall ();
-
- if (runtime_g ()->ncgo == 0)
- {
- /* The C call to Go came from a thread not currently running any
- Go. In the case of -buildmode=c-archive or c-shared, this
- call may be coming in before package initialization is
- complete. Wait until it is. */
- __go_receive (NULL, runtime_main_init_done, NULL);
- }
-
- mp = runtime_m ();
- if (mp->needextram)
- {
- mp->needextram = 0;
- runtime_newextram ();
- }
-}
-
-/* Prepare to return to C/C++ code from a callback to Go code. */
-
-void
-syscall_cgocallbackdone ()
-{
- M *mp;
-
- runtime_entersyscall ();
- mp = runtime_m ();
- if (mp->dropextram && runtime_g ()->ncgo == 0)
- {
- mp->dropextram = false;
- runtime_dropm ();
- }
-}
-
-/* Allocate memory and save it in a list visible to the Go garbage
- collector. */
-
-void *
-alloc_saved (size_t n)
-{
- void *ret;
- G *g;
- CgoMal *c;
-
- ret = __go_alloc (n);
-
- g = runtime_g ();
- c = (CgoMal *) __go_alloc (sizeof (CgoMal));
- c->next = g->cgomal;
- c->alloc = ret;
- g->cgomal = c;
-
- return ret;
-}
-
-/* These are routines used by SWIG. The gc runtime library provides
- the same routines under the same name, though in that case the code
- is required to import runtime/cgo. */
-
-void *
-_cgo_allocate (size_t n)
-{
- void *ret;
-
- runtime_exitsyscall ();
- ret = alloc_saved (n);
- runtime_entersyscall ();
- return ret;
-}
-
-extern const struct __go_type_descriptor string_type_descriptor
- __asm__ (GOSYM_PREFIX "__go_tdn_string");
-
-void
-_cgo_panic (const char *p)
-{
- intgo len;
- unsigned char *data;
- String *ps;
- struct __go_empty_interface e;
-
- runtime_exitsyscall ();
- len = __builtin_strlen (p);
- data = alloc_saved (len);
- __builtin_memcpy (data, p, len);
- ps = alloc_saved (sizeof *ps);
- ps->str = data;
- ps->len = len;
- e.__type_descriptor = &string_type_descriptor;
- e.__object = ps;
-
- /* We don't call runtime_entersyscall here, because normally what
- will happen is that we will walk up the stack to a Go deferred
- function that calls recover. However, this will do the wrong
- thing if this panic is recovered and the stack unwinding is
- caught by a C++ exception handler. It might be possible to
- handle this by calling runtime_entersyscall in the personality
- function in go-unwind.c. FIXME. */
-
- __go_panic (e);
-}
/* Used for _cgo_wait_runtime_init_done. This is based on code in
runtime/cgo/gcc_libinit.c in the master library. */
@@ -250,8 +62,3 @@ _cgo_notify_runtime_init_done (void)
// runtime_iscgo is set to true if some cgo code is linked in.
// This is done by a constructor in the cgo generated code.
_Bool runtime_iscgo;
-
-// runtime_cgoHasExtraM is set on startup when an extra M is created
-// for cgo. The extra M must be created before any C/C++ code calls
-// cgocallback.
-_Bool runtime_cgoHasExtraM;
diff --git a/libgo/runtime/go-check-interface.c b/libgo/runtime/go-check-interface.c
deleted file mode 100644
index 722a4219ab..0000000000
--- a/libgo/runtime/go-check-interface.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/* go-check-interface.c -- check an interface type for a conversion
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-panic.h"
-#include "go-type.h"
-#include "interface.h"
-
-/* Check that an interface type matches for a conversion to a
- non-interface type. This panics if the types are bad. The actual
- extraction of the object is inlined. */
-
-void
-__go_check_interface_type (
- const struct __go_type_descriptor *lhs_descriptor,
- const struct __go_type_descriptor *rhs_descriptor,
- const struct __go_type_descriptor *rhs_inter_descriptor)
-{
- if (rhs_descriptor == NULL)
- {
- struct __go_empty_interface panic_arg;
-
- runtime_newTypeAssertionError(NULL, NULL, lhs_descriptor->__reflection,
- NULL, &panic_arg);
- __go_panic(panic_arg);
- }
-
- if (lhs_descriptor != rhs_descriptor
- && !__go_type_descriptors_equal (lhs_descriptor, rhs_descriptor)
- && ((lhs_descriptor->__code & GO_CODE_MASK) != GO_UNSAFE_POINTER
- || !__go_is_pointer_type (rhs_descriptor))
- && ((rhs_descriptor->__code & GO_CODE_MASK) != GO_UNSAFE_POINTER
- || !__go_is_pointer_type (lhs_descriptor)))
- {
- struct __go_empty_interface panic_arg;
-
- runtime_newTypeAssertionError(rhs_inter_descriptor->__reflection,
- rhs_descriptor->__reflection,
- lhs_descriptor->__reflection,
- NULL, &panic_arg);
- __go_panic(panic_arg);
- }
-}
diff --git a/libgo/runtime/go-construct-map.c b/libgo/runtime/go-construct-map.c
index 4bd79d2005..9a48d5733e 100644
--- a/libgo/runtime/go-construct-map.c
+++ b/libgo/runtime/go-construct-map.c
@@ -9,25 +9,33 @@
#include <stdlib.h>
#include "runtime.h"
-#include "map.h"
+#include "go-type.h"
-struct __go_map *
-__go_construct_map (const struct __go_map_descriptor *descriptor,
+extern void *makemap (const struct __go_map_type *, int64_t hint,
+ void *, void *)
+ __asm__ (GOSYM_PREFIX "runtime.makemap");
+
+extern void *mapassign (const struct __go_map_type *, void *hmap,
+ const void *key)
+ __asm__ (GOSYM_PREFIX "runtime.mapassign");
+
+void *
+__go_construct_map (const struct __go_map_type *type,
uintptr_t count, uintptr_t entry_size,
- uintptr_t val_offset, uintptr_t val_size,
- const void *ventries)
+ uintptr_t val_offset, const void *ventries)
{
- struct __go_map *ret;
+ void *ret;
const unsigned char *entries;
uintptr_t i;
+ void *p;
- ret = __go_new_map (descriptor, count);
+ ret = makemap(type, (int64_t) count, NULL, NULL);
entries = (const unsigned char *) ventries;
for (i = 0; i < count; ++i)
{
- void *val = __go_map_index (ret, entries, 1);
- __builtin_memcpy (val, entries + val_offset, val_size);
+ p = mapassign (type, ret, entries);
+ typedmemmove (type->__val_type, p, entries + val_offset);
entries += entry_size;
}
diff --git a/libgo/runtime/go-convert-interface.c b/libgo/runtime/go-convert-interface.c
deleted file mode 100644
index 0e8a306243..0000000000
--- a/libgo/runtime/go-convert-interface.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/* go-convert-interface.c -- convert interfaces for Go.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-alloc.h"
-#include "go-assert.h"
-#include "go-panic.h"
-#include "go-string.h"
-#include "go-type.h"
-#include "interface.h"
-
-/* This is called when converting one interface type into another
- interface type. LHS_DESCRIPTOR is the type descriptor of the
- resulting interface. RHS_DESCRIPTOR is the type descriptor of the
- object being converted. This builds and returns a new interface
- method table. If any method in the LHS_DESCRIPTOR interface is not
- implemented by the object, the conversion fails. If the conversion
- fails, then if MAY_FAIL is true this returns NULL; otherwise, it
- panics. */
-
-void *
-__go_convert_interface_2 (const struct __go_type_descriptor *lhs_descriptor,
- const struct __go_type_descriptor *rhs_descriptor,
- _Bool may_fail)
-{
- const struct __go_interface_type *lhs_interface;
- int lhs_method_count;
- const struct __go_interface_method* lhs_methods;
- const void **methods;
- const struct __go_uncommon_type *rhs_uncommon;
- int rhs_method_count;
- const struct __go_method *p_rhs_method;
- int i;
-
- if (rhs_descriptor == NULL)
- {
- /* A nil value always converts to nil. */
- return NULL;
- }
-
- __go_assert ((lhs_descriptor->__code & GO_CODE_MASK) == GO_INTERFACE);
- lhs_interface = (const struct __go_interface_type *) lhs_descriptor;
- lhs_method_count = lhs_interface->__methods.__count;
- lhs_methods = ((const struct __go_interface_method *)
- lhs_interface->__methods.__values);
-
- /* This should not be called for an empty interface. */
- __go_assert (lhs_method_count > 0);
-
- rhs_uncommon = rhs_descriptor->__uncommon;
- if (rhs_uncommon == NULL || rhs_uncommon->__methods.__count == 0)
- {
- struct __go_empty_interface panic_arg;
-
- if (may_fail)
- return NULL;
-
- runtime_newTypeAssertionError (NULL, rhs_descriptor->__reflection,
- lhs_descriptor->__reflection,
- lhs_methods[0].__name,
- &panic_arg);
- __go_panic (panic_arg);
- }
-
- rhs_method_count = rhs_uncommon->__methods.__count;
- p_rhs_method = ((const struct __go_method *)
- rhs_uncommon->__methods.__values);
-
- methods = NULL;
-
- for (i = 0; i < lhs_method_count; ++i)
- {
- const struct __go_interface_method *p_lhs_method;
-
- p_lhs_method = &lhs_methods[i];
-
- while (rhs_method_count > 0
- && (!__go_ptr_strings_equal (p_lhs_method->__name,
- p_rhs_method->__name)
- || !__go_ptr_strings_equal (p_lhs_method->__pkg_path,
- p_rhs_method->__pkg_path)))
- {
- ++p_rhs_method;
- --rhs_method_count;
- }
-
- if (rhs_method_count == 0
- || !__go_type_descriptors_equal (p_lhs_method->__type,
- p_rhs_method->__mtype))
- {
- struct __go_empty_interface panic_arg;
-
- if (methods != NULL)
- __go_free (methods);
-
- if (may_fail)
- return NULL;
-
- runtime_newTypeAssertionError (NULL, rhs_descriptor->__reflection,
- lhs_descriptor->__reflection,
- p_lhs_method->__name, &panic_arg);
- __go_panic (panic_arg);
- }
-
- if (methods == NULL)
- {
- methods = (const void **) __go_alloc ((lhs_method_count + 1)
- * sizeof (void *));
-
- /* The first field in the method table is always the type of
- the object. */
- methods[0] = rhs_descriptor;
- }
-
- methods[i + 1] = p_rhs_method->__function;
- }
-
- return methods;
-}
-
-/* This is called by the compiler to convert a value from one
- interface type to another. */
-
-void *
-__go_convert_interface (const struct __go_type_descriptor *lhs_descriptor,
- const struct __go_type_descriptor *rhs_descriptor)
-{
- return __go_convert_interface_2 (lhs_descriptor, rhs_descriptor, 0);
-}
diff --git a/libgo/runtime/go-copy.c b/libgo/runtime/go-copy.c
deleted file mode 100644
index 05e16acbf1..0000000000
--- a/libgo/runtime/go-copy.c
+++ /dev/null
@@ -1,22 +0,0 @@
-/* go-append.c -- the go builtin copy function.
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-#include <stdint.h>
-
-/* We should be OK if we don't split the stack here, since we are just
- calling memmove which shouldn't need much stack. If we don't do
- this we will always split the stack, because of memmove. */
-
-extern void
-__go_copy (void *, void *, uintptr_t)
- __attribute__ ((no_split_stack));
-
-void
-__go_copy (void *a, void *b, uintptr_t len)
-{
- __builtin_memmove (a, b, len);
-}
diff --git a/libgo/runtime/go-defer.c b/libgo/runtime/go-defer.c
deleted file mode 100644
index 3a48fe1130..0000000000
--- a/libgo/runtime/go-defer.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/* go-defer.c -- manage the defer stack.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-
-#include "runtime.h"
-#include "go-alloc.h"
-#include "go-panic.h"
-#include "go-defer.h"
-
-/* This function is called each time we need to defer a call. */
-
-void
-__go_defer (_Bool *frame, void (*pfn) (void *), void *arg)
-{
- G *g;
- struct __go_defer_stack *n;
-
- g = runtime_g ();
- n = runtime_newdefer ();
- n->__next = g->defer;
- n->__frame = frame;
- n->__panic = g->panic;
- n->__pfn = pfn;
- n->__arg = arg;
- n->__retaddr = NULL;
- n->__makefunc_can_recover = 0;
- n->__special = 0;
- g->defer = n;
-}
-
-/* This function is called when we want to undefer the stack. */
-
-void
-__go_undefer (_Bool *frame)
-{
- G *g;
-
- g = runtime_g ();
- while (g->defer != NULL && g->defer->__frame == frame)
- {
- struct __go_defer_stack *d;
- void (*pfn) (void *);
-
- d = g->defer;
- pfn = d->__pfn;
- d->__pfn = NULL;
-
- if (pfn != NULL)
- (*pfn) (d->__arg);
-
- g->defer = d->__next;
-
- /* This may be called by a cgo callback routine to defer the
- call to syscall.CgocallBackDone, in which case we will not
- have a memory context. Don't try to free anything in that
- case--the GC will release it later. */
- if (runtime_m () != NULL)
- runtime_freedefer (d);
-
- /* Since we are executing a defer function here, we know we are
- returning from the calling function. If the calling
- function, or one of its callees, paniced, then the defer
- functions would be executed by __go_panic. */
- *frame = 1;
- }
-}
-
-/* This function is called to record the address to which the deferred
- function returns. This may in turn be checked by __go_can_recover.
- The frontend relies on this function returning false. */
-
-_Bool
-__go_set_defer_retaddr (void *retaddr)
-{
- G *g;
-
- g = runtime_g ();
- if (g->defer != NULL)
- g->defer->__retaddr = __builtin_extract_return_addr (retaddr);
- return 0;
-}
diff --git a/libgo/runtime/go-defer.h b/libgo/runtime/go-defer.h
deleted file mode 100644
index acf2d40c69..0000000000
--- a/libgo/runtime/go-defer.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* go-defer.h -- the defer stack.
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-struct __go_panic_stack;
-
-/* The defer stack is a list of these structures. */
-
-struct __go_defer_stack
-{
- /* The next entry in the stack. */
- struct __go_defer_stack *__next;
-
- /* The stack variable for the function which called this defer
- statement. This is set to 1 if we are returning from that
- function, 0 if we are panicing through it. */
- _Bool *__frame;
-
- /* The value of the panic stack when this function is deferred.
- This function can not recover this value from the panic stack.
- This can happen if a deferred function has a defer statement
- itself. */
- struct __go_panic_stack *__panic;
-
- /* The function to call. */
- void (*__pfn) (void *);
-
- /* The argument to pass to the function. */
- void *__arg;
-
- /* The return address that a recover thunk matches against. This is
- set by __go_set_defer_retaddr which is called by the thunks
- created by defer statements. */
- const void *__retaddr;
-
- /* Set to true if a function created by reflect.MakeFunc is
- permitted to recover. The return address of such a function
- function will be somewhere in libffi, so __retaddr is not
- useful. */
- _Bool __makefunc_can_recover;
-
- /* Set to true if this defer stack entry is not part of the defer
- pool. */
- _Bool __special;
-};
diff --git a/libgo/runtime/go-deferred-recover.c b/libgo/runtime/go-deferred-recover.c
deleted file mode 100644
index 78ef287cf0..0000000000
--- a/libgo/runtime/go-deferred-recover.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/* go-deferred-recover.c -- support for a deferred recover function.
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-
-#include "runtime.h"
-#include "go-panic.h"
-#include "go-defer.h"
-
-/* This is called when a call to recover is deferred. That is,
- something like
- defer recover()
-
- We need to handle this specially. In 6g/8g, the recover function
- looks up the stack frame. In particular, that means that a
- deferred recover will not recover a panic thrown in the same
- function that defers the recover. It will only recover a panic
- thrown in a function that defers the deferred call to recover.
-
- In other words:
-
- func f1() {
- defer recover() // does not stop panic
- panic(0)
- }
-
- func f2() {
- defer func() {
- defer recover() // stops panic(0)
- }()
- panic(0)
- }
-
- func f3() {
- defer func() {
- defer recover() // does not stop panic
- panic(0)
- }()
- panic(1)
- }
-
- func f4() {
- defer func() {
- defer func() {
- defer recover() // stops panic(0)
- }()
- panic(0)
- }()
- panic(1)
- }
-
- The interesting case here is f3. As can be seen from f2, the
- deferred recover could pick up panic(1). However, this does not
- happen because it is blocked by the panic(0).
-
- When a function calls recover, then when we invoke it we pass a
- hidden parameter indicating whether it should recover something.
- This parameter is set based on whether the function is being
- invoked directly from defer. The parameter winds up determining
- whether __go_recover or __go_deferred_recover is called at all.
-
- In the case of a deferred recover, the hidden parameter which
- controls the call is actually the one set up for the function which
- runs the defer recover() statement. That is the right thing in all
- the cases above except for f3. In f3 the function is permitted to
- call recover, but the deferred recover call is not. We address
- that here by checking for that specific case before calling
- recover. If this function was deferred when there is already a
- panic on the panic stack, then we can only recover that panic, not
- any other.
-
- Note that we can get away with using a special function here
- because you are not permitted to take the address of a predeclared
- function like recover. */
-
-struct __go_empty_interface
-__go_deferred_recover ()
-{
- G *g;
-
- g = runtime_g ();
- if (g->defer == NULL || g->defer->__panic != g->panic)
- {
- struct __go_empty_interface ret;
-
- ret.__type_descriptor = NULL;
- ret.__object = NULL;
- return ret;
- }
- return __go_recover ();
-}
diff --git a/libgo/runtime/go-eface-compare.c b/libgo/runtime/go-eface-compare.c
deleted file mode 100644
index 40b716eb4a..0000000000
--- a/libgo/runtime/go-eface-compare.c
+++ /dev/null
@@ -1,35 +0,0 @@
-/* go-eface-compare.c -- compare two empty values.
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-type.h"
-#include "interface.h"
-
-/* Compare two interface values. Return 0 for equal, not zero for not
- equal (return value is like strcmp). */
-
-intgo
-__go_empty_interface_compare (struct __go_empty_interface left,
- struct __go_empty_interface right)
-{
- const struct __go_type_descriptor *left_descriptor;
-
- left_descriptor = left.__type_descriptor;
-
- if (left_descriptor == NULL && right.__type_descriptor == NULL)
- return 0;
- if (left_descriptor == NULL || right.__type_descriptor == NULL)
- return 1;
- if (!__go_type_descriptors_equal (left_descriptor,
- right.__type_descriptor))
- return 1;
- if (__go_is_pointer_type (left_descriptor))
- return left.__object == right.__object ? 0 : 1;
- if (!__go_call_equalfn (left_descriptor->__equalfn, left.__object,
- right.__object, left_descriptor->__size))
- return 1;
- return 0;
-}
diff --git a/libgo/runtime/go-eface-val-compare.c b/libgo/runtime/go-eface-val-compare.c
deleted file mode 100644
index e810750d5d..0000000000
--- a/libgo/runtime/go-eface-val-compare.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/* go-eface-val-compare.c -- compare an empty interface with a value.
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-type.h"
-#include "interface.h"
-
-/* Compare an empty interface with a value. Return 0 for equal, not
- zero for not equal (return value is like strcmp). */
-
-intgo
-__go_empty_interface_value_compare (
- struct __go_empty_interface left,
- const struct __go_type_descriptor *right_descriptor,
- const void *val)
-{
- const struct __go_type_descriptor *left_descriptor;
-
- left_descriptor = left.__type_descriptor;
- if (left_descriptor == NULL)
- return 1;
- if (!__go_type_descriptors_equal (left_descriptor, right_descriptor))
- return 1;
- if (__go_is_pointer_type (left_descriptor))
- return left.__object == val ? 0 : 1;
- if (!__go_call_equalfn (left_descriptor->__equalfn, left.__object, val,
- left_descriptor->__size))
- return 1;
- return 0;
-}
diff --git a/libgo/runtime/go-ffi.c b/libgo/runtime/go-ffi.c
index aafc7b205e..b030f5e918 100644
--- a/libgo/runtime/go-ffi.c
+++ b/libgo/runtime/go-ffi.c
@@ -1,346 +1,152 @@
-/* go-ffi.c -- convert Go type description to libffi.
+/* go-ffi.c -- libffi support functions.
Copyright 2009 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file. */
-#include <stdio.h>
-#include <stdint.h>
#include <stdlib.h>
#include "runtime.h"
-#include "go-alloc.h"
-#include "go-assert.h"
-#include "go-type.h"
#ifdef USE_LIBFFI
#include "ffi.h"
-/* The functions in this file are only called from reflect_call and
- reflect.ffi. As these functions call libffi functions, which will
- be compiled without -fsplit-stack, they will always run with a
- large stack. */
-
-static ffi_type *go_array_to_ffi (const struct __go_array_type *)
- __attribute__ ((no_split_stack));
-static ffi_type *go_slice_to_ffi (const struct __go_slice_type *)
- __attribute__ ((no_split_stack));
-static ffi_type *go_struct_to_ffi (const struct __go_struct_type *)
- __attribute__ ((no_split_stack));
-static ffi_type *go_string_to_ffi (void) __attribute__ ((no_split_stack));
-static ffi_type *go_interface_to_ffi (void) __attribute__ ((no_split_stack));
-static ffi_type *go_type_to_ffi (const struct __go_type_descriptor *)
- __attribute__ ((no_split_stack));
-static ffi_type *go_func_return_ffi (const struct __go_func_type *)
- __attribute__ ((no_split_stack));
-
-/* Return an ffi_type for a Go array type. The libffi library does
- not have any builtin support for passing arrays as values. We work
- around this by pretending that the array is a struct. */
-
-static ffi_type *
-go_array_to_ffi (const struct __go_array_type *descriptor)
+/* The functions in this file are called by the Go runtime code to get
+ the libffi type values. */
+
+ffi_type *go_ffi_type_pointer(void) __attribute__ ((no_split_stack));
+ffi_type *go_ffi_type_pointer(void) __asm__ ("runtime.ffi_type_pointer");
+ffi_type *go_ffi_type_sint8(void) __attribute__ ((no_split_stack));
+ffi_type *go_ffi_type_sint8(void) __asm__ ("runtime.ffi_type_sint8");
+ffi_type *go_ffi_type_sint16(void) __attribute__ ((no_split_stack));
+ffi_type *go_ffi_type_sint16(void) __asm__ ("runtime.ffi_type_sint16");
+ffi_type *go_ffi_type_sint32(void) __attribute__ ((no_split_stack));
+ffi_type *go_ffi_type_sint32(void) __asm__ ("runtime.ffi_type_sint32");
+ffi_type *go_ffi_type_sint64(void) __attribute__ ((no_split_stack));
+ffi_type *go_ffi_type_sint64(void) __asm__ ("runtime.ffi_type_sint64");
+ffi_type *go_ffi_type_uint8(void) __attribute__ ((no_split_stack));
+ffi_type *go_ffi_type_uint8(void) __asm__ ("runtime.ffi_type_uint8");
+ffi_type *go_ffi_type_uint16(void) __attribute__ ((no_split_stack));
+ffi_type *go_ffi_type_uint16(void) __asm__ ("runtime.ffi_type_uint16");
+ffi_type *go_ffi_type_uint32(void) __attribute__ ((no_split_stack));
+ffi_type *go_ffi_type_uint32(void) __asm__ ("runtime.ffi_type_uint32");
+ffi_type *go_ffi_type_uint64(void) __attribute__ ((no_split_stack));
+ffi_type *go_ffi_type_uint64(void) __asm__ ("runtime.ffi_type_uint64");
+ffi_type *go_ffi_type_float(void) __attribute__ ((no_split_stack));
+ffi_type *go_ffi_type_float(void) __asm__ ("runtime.ffi_type_float");
+ffi_type *go_ffi_type_double(void) __attribute__ ((no_split_stack));
+ffi_type *go_ffi_type_double(void) __asm__ ("runtime.ffi_type_double");
+ffi_type *go_ffi_type_complex_float(void) __attribute__ ((no_split_stack));
+ffi_type *go_ffi_type_complex_float(void) __asm__ ("runtime.ffi_type_complex_float");
+ffi_type *go_ffi_type_complex_double(void) __attribute__ ((no_split_stack));
+ffi_type *go_ffi_type_complex_double(void) __asm__ ("runtime.ffi_type_complex_double");
+ffi_type *go_ffi_type_void(void) __attribute__ ((no_split_stack));
+ffi_type *go_ffi_type_void(void) __asm__ ("runtime.ffi_type_void");
+
+_Bool go_ffi_supports_complex(void) __attribute__ ((no_split_stack));
+_Bool go_ffi_supports_complex(void) __asm__ ("runtime.ffi_supports_complex");
+
+ffi_type *
+go_ffi_type_pointer(void)
{
- ffi_type *ret;
- uintptr_t len;
- ffi_type *element;
- uintptr_t i;
-
- ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
- ret->type = FFI_TYPE_STRUCT;
- len = descriptor->__len;
- if (len == 0)
- {
- /* The libffi library won't accept an empty struct. */
- ret->elements = (ffi_type **) __go_alloc (2 * sizeof (ffi_type *));
- ret->elements[0] = &ffi_type_void;
- ret->elements[1] = NULL;
- return ret;
- }
- ret->elements = (ffi_type **) __go_alloc ((len + 1) * sizeof (ffi_type *));
- element = go_type_to_ffi (descriptor->__element_type);
- for (i = 0; i < len; ++i)
- ret->elements[i] = element;
- ret->elements[len] = NULL;
- return ret;
+ return &ffi_type_pointer;
}
-/* Return an ffi_type for a Go slice type. This describes the
- __go_open_array type defines in array.h. */
-
-static ffi_type *
-go_slice_to_ffi (
- const struct __go_slice_type *descriptor __attribute__ ((unused)))
+ffi_type *
+go_ffi_type_sint8(void)
{
- ffi_type *ret;
- ffi_type *ffi_intgo;
-
- ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
- ret->type = FFI_TYPE_STRUCT;
- ret->elements = (ffi_type **) __go_alloc (4 * sizeof (ffi_type *));
- ret->elements[0] = &ffi_type_pointer;
- ffi_intgo = sizeof (intgo) == 4 ? &ffi_type_sint32 : &ffi_type_sint64;
- ret->elements[1] = ffi_intgo;
- ret->elements[2] = ffi_intgo;
- ret->elements[3] = NULL;
- return ret;
+ return &ffi_type_sint8;
}
-/* Return an ffi_type for a Go struct type. */
-
-static ffi_type *
-go_struct_to_ffi (const struct __go_struct_type *descriptor)
+ffi_type *
+go_ffi_type_sint16(void)
{
- ffi_type *ret;
- int field_count;
- const struct __go_struct_field *fields;
- int i;
-
- field_count = descriptor->__fields.__count;
- ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
- ret->type = FFI_TYPE_STRUCT;
- if (field_count == 0)
- {
- /* The libffi library won't accept an empty struct. */
- ret->elements = (ffi_type **) __go_alloc (2 * sizeof (ffi_type *));
- ret->elements[0] = &ffi_type_void;
- ret->elements[1] = NULL;
- return ret;
- }
- fields = (const struct __go_struct_field *) descriptor->__fields.__values;
- ret->elements = (ffi_type **) __go_alloc ((field_count + 1)
- * sizeof (ffi_type *));
- for (i = 0; i < field_count; ++i)
- ret->elements[i] = go_type_to_ffi (fields[i].__type);
- ret->elements[field_count] = NULL;
- return ret;
+ return &ffi_type_sint16;
}
-/* Return an ffi_type for a Go string type. This describes the String
- struct. */
-
-static ffi_type *
-go_string_to_ffi (void)
+ffi_type *
+go_ffi_type_sint32(void)
{
- ffi_type *ret;
- ffi_type *ffi_intgo;
-
- ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
- ret->type = FFI_TYPE_STRUCT;
- ret->elements = (ffi_type **) __go_alloc (3 * sizeof (ffi_type *));
- ret->elements[0] = &ffi_type_pointer;
- ffi_intgo = sizeof (intgo) == 4 ? &ffi_type_sint32 : &ffi_type_sint64;
- ret->elements[1] = ffi_intgo;
- ret->elements[2] = NULL;
- return ret;
+ return &ffi_type_sint32;
}
-/* Return an ffi_type for a Go interface type. This describes the
- __go_interface and __go_empty_interface structs. */
-
-static ffi_type *
-go_interface_to_ffi (void)
+ffi_type *
+go_ffi_type_sint64(void)
{
- ffi_type *ret;
-
- ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
- ret->type = FFI_TYPE_STRUCT;
- ret->elements = (ffi_type **) __go_alloc (3 * sizeof (ffi_type *));
- ret->elements[0] = &ffi_type_pointer;
- ret->elements[1] = &ffi_type_pointer;
- ret->elements[2] = NULL;
- return ret;
+ return &ffi_type_sint64;
}
+ffi_type *
+go_ffi_type_uint8(void)
+{
+ return &ffi_type_uint8;
+}
-#ifndef FFI_TARGET_HAS_COMPLEX_TYPE
-/* If libffi hasn't been updated for this target to support complex,
- pretend complex is a structure. Warning: This does not work for
- all ABIs. Eventually libffi should be updated for all targets
- and this should go away. */
-
-static ffi_type *go_complex_to_ffi (ffi_type *)
- __attribute__ ((no_split_stack));
-
-static ffi_type *
-go_complex_to_ffi (ffi_type *float_type)
+ffi_type *
+go_ffi_type_uint16(void)
{
- ffi_type *ret;
+ return &ffi_type_uint16;
+}
- ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
- ret->type = FFI_TYPE_STRUCT;
- ret->elements = (ffi_type **) __go_alloc (3 * sizeof (ffi_type *));
- ret->elements[0] = float_type;
- ret->elements[1] = float_type;
- ret->elements[2] = NULL;
- return ret;
+ffi_type *
+go_ffi_type_uint32(void)
+{
+ return &ffi_type_uint32;
}
-#endif
-/* Return an ffi_type for a type described by a
- __go_type_descriptor. */
+ffi_type *
+go_ffi_type_uint64(void)
+{
+ return &ffi_type_uint64;
+}
-static ffi_type *
-go_type_to_ffi (const struct __go_type_descriptor *descriptor)
+ffi_type *
+go_ffi_type_float(void)
{
- switch (descriptor->__code & GO_CODE_MASK)
- {
- case GO_BOOL:
- if (sizeof (_Bool) == 1)
- return &ffi_type_uint8;
- else if (sizeof (_Bool) == sizeof (int))
- return &ffi_type_uint;
- abort ();
- case GO_FLOAT32:
- if (sizeof (float) == 4)
return &ffi_type_float;
- abort ();
- case GO_FLOAT64:
- if (sizeof (double) == 8)
+}
+
+ffi_type *
+go_ffi_type_double(void)
+{
return &ffi_type_double;
- abort ();
- case GO_COMPLEX64:
- if (sizeof (float) == 4)
- {
+}
+
+_Bool
+go_ffi_supports_complex(void)
+{
#ifdef FFI_TARGET_HAS_COMPLEX_TYPE
- return &ffi_type_complex_float;
+ return true;
#else
- return go_complex_to_ffi (&ffi_type_float);
+ return false;
#endif
- }
- abort ();
- case GO_COMPLEX128:
- if (sizeof (double) == 8)
- {
+}
+
+ffi_type *
+go_ffi_type_complex_float(void)
+{
#ifdef FFI_TARGET_HAS_COMPLEX_TYPE
- return &ffi_type_complex_double;
+ return &ffi_type_complex_float;
#else
- return go_complex_to_ffi (&ffi_type_double);
+ abort();
#endif
- }
- abort ();
- case GO_INT16:
- return &ffi_type_sint16;
- case GO_INT32:
- return &ffi_type_sint32;
- case GO_INT64:
- return &ffi_type_sint64;
- case GO_INT8:
- return &ffi_type_sint8;
- case GO_INT:
- return sizeof (intgo) == 4 ? &ffi_type_sint32 : &ffi_type_sint64;
- case GO_UINT16:
- return &ffi_type_uint16;
- case GO_UINT32:
- return &ffi_type_uint32;
- case GO_UINT64:
- return &ffi_type_uint64;
- case GO_UINT8:
- return &ffi_type_uint8;
- case GO_UINT:
- return sizeof (uintgo) == 4 ? &ffi_type_uint32 : &ffi_type_uint64;
- case GO_UINTPTR:
- if (sizeof (void *) == 2)
- return &ffi_type_uint16;
- else if (sizeof (void *) == 4)
- return &ffi_type_uint32;
- else if (sizeof (void *) == 8)
- return &ffi_type_uint64;
- abort ();
- case GO_ARRAY:
- return go_array_to_ffi ((const struct __go_array_type *) descriptor);
- case GO_SLICE:
- return go_slice_to_ffi ((const struct __go_slice_type *) descriptor);
- case GO_STRUCT:
- return go_struct_to_ffi ((const struct __go_struct_type *) descriptor);
- case GO_STRING:
- return go_string_to_ffi ();
- case GO_INTERFACE:
- return go_interface_to_ffi ();
- case GO_CHAN:
- case GO_FUNC:
- case GO_MAP:
- case GO_PTR:
- case GO_UNSAFE_POINTER:
- /* These types are always pointers, and for FFI purposes nothing
- else matters. */
- return &ffi_type_pointer;
- default:
- abort ();
- }
}
-/* Return the return type for a function, given the number of out
- parameters and their types. */
-
-static ffi_type *
-go_func_return_ffi (const struct __go_func_type *func)
+ffi_type *
+go_ffi_type_complex_double(void)
{
- int count;
- const struct __go_type_descriptor **types;
- ffi_type *ret;
- int i;
-
- count = func->__out.__count;
- if (count == 0)
- return &ffi_type_void;
-
- types = (const struct __go_type_descriptor **) func->__out.__values;
-
- if (count == 1)
- return go_type_to_ffi (types[0]);
-
- ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
- ret->type = FFI_TYPE_STRUCT;
- ret->elements = (ffi_type **) __go_alloc ((count + 1) * sizeof (ffi_type *));
- for (i = 0; i < count; ++i)
- ret->elements[i] = go_type_to_ffi (types[i]);
- ret->elements[count] = NULL;
- return ret;
+#ifdef FFI_TARGET_HAS_COMPLEX_TYPE
+ return &ffi_type_complex_double;
+#else
+ abort();
+#endif
}
-/* Build an ffi_cif structure for a function described by a
- __go_func_type structure. */
-
-void
-__go_func_to_cif (const struct __go_func_type *func, _Bool is_interface,
- _Bool is_method, ffi_cif *cif)
+ffi_type *
+go_ffi_type_void(void)
{
- int num_params;
- const struct __go_type_descriptor **in_types;
- size_t num_args;
- ffi_type **args;
- int off;
- int i;
- ffi_type *rettype;
- ffi_status status;
-
- num_params = func->__in.__count;
- in_types = ((const struct __go_type_descriptor **)
- func->__in.__values);
-
- num_args = num_params + (is_interface ? 1 : 0);
- args = (ffi_type **) __go_alloc (num_args * sizeof (ffi_type *));
- i = 0;
- off = 0;
- if (is_interface)
- {
- args[0] = &ffi_type_pointer;
- off = 1;
- }
- else if (is_method)
- {
- args[0] = &ffi_type_pointer;
- i = 1;
- }
- for (; i < num_params; ++i)
- args[i + off] = go_type_to_ffi (in_types[i]);
-
- rettype = go_func_return_ffi (func);
-
- status = ffi_prep_cif (cif, FFI_DEFAULT_ABI, num_args, rettype, args);
- __go_assert (status == FFI_OK);
+ return &ffi_type_void;
}
#endif /* defined(USE_LIBFFI) */
diff --git a/libgo/runtime/go-ffi.h b/libgo/runtime/go-ffi.h
deleted file mode 100644
index afae4b6d6e..0000000000
--- a/libgo/runtime/go-ffi.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* go-ffi.c -- convert Go type description to libffi.
-
- Copyright 2014 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "config.h"
-#include "go-type.h"
-
-#ifdef USE_LIBFFI
-
-#include "ffi.h"
-
-void __go_func_to_cif (const struct __go_func_type *, _Bool, _Bool, ffi_cif *);
-
-#endif
diff --git a/libgo/runtime/go-fieldtrack.c b/libgo/runtime/go-fieldtrack.c
index a7e2c13344..c4f27ef079 100644
--- a/libgo/runtime/go-fieldtrack.c
+++ b/libgo/runtime/go-fieldtrack.c
@@ -6,7 +6,6 @@
#include "runtime.h"
#include "go-type.h"
-#include "map.h"
/* The compiler will track fields that have the tag go:"track". Any
function that refers to such a field will call this function with a
@@ -34,16 +33,26 @@ extern const char _edata[] __attribute__ ((weak));
extern const char __edata[] __attribute__ ((weak));
extern const char __bss_start[] __attribute__ ((weak));
-void runtime_Fieldtrack (struct __go_map *) __asm__ (GOSYM_PREFIX "runtime.Fieldtrack");
+extern void *mapassign (const struct __go_map_type *, void *hmap,
+ const void *key)
+ __asm__ (GOSYM_PREFIX "runtime.mapassign");
+
+// The type descriptor for map[string] bool. */
+extern const char __go_td_MN6_string__N4_bool[] __attribute__ ((weak));
+
+void runtime_Fieldtrack (void *) __asm__ (GOSYM_PREFIX "runtime.Fieldtrack");
void
-runtime_Fieldtrack (struct __go_map *m)
+runtime_Fieldtrack (void *m)
{
const char *p;
const char *pend;
const char *prefix;
size_t prefix_len;
+ if (__go_td_MN6_string__N4_bool == NULL)
+ return;
+
p = __data_start;
if (p == NULL)
p = __etext;
@@ -86,14 +95,12 @@ runtime_Fieldtrack (struct __go_map *m)
if (__builtin_memchr (q1, '\0', q2 - q1) == NULL)
{
String s;
- void *v;
- _Bool *pb;
+ void *p;
s.str = (const byte *) q1;
s.len = q2 - q1;
- v = __go_map_index (m, &s, 1);
- pb = (_Bool *) v;
- *pb = 1;
+ p = mapassign((const void*) __go_td_MN6_string__N4_bool, m, &s);
+ *(_Bool*)p = 1;
}
p = q2;
diff --git a/libgo/runtime/go-iface.goc b/libgo/runtime/go-iface.goc
deleted file mode 100644
index 0d5cb5e97a..0000000000
--- a/libgo/runtime/go-iface.goc
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-#include "runtime.h"
-#include "go-type.h"
-#include "interface.h"
-
-typedef struct __go_type_descriptor descriptor;
-typedef const struct __go_type_descriptor const_descriptor;
-typedef struct __go_interface interface;
-typedef struct __go_empty_interface empty_interface;
-
-// Compare two type descriptors.
-func ifacetypeeq(a *descriptor, b *descriptor) (eq bool) {
- eq = __go_type_descriptors_equal(a, b);
-}
-
-// Return the descriptor for an empty interface type.n
-func efacetype(e empty_interface) (d *const_descriptor) {
- return e.__type_descriptor;
-}
-
-// Return the descriptor for a non-empty interface type.
-func ifacetype(i interface) (d *const_descriptor) {
- if (i.__methods == nil) {
- return nil;
- }
- d = i.__methods[0];
-}
-
-// Convert an empty interface to an empty interface.
-func ifaceE2E2(e empty_interface) (ret empty_interface, ok bool) {
- ret = e;
- ok = ret.__type_descriptor != nil;
-}
-
-// Convert a non-empty interface to an empty interface.
-func ifaceI2E2(i interface) (ret empty_interface, ok bool) {
- if (i.__methods == nil) {
- ret.__type_descriptor = nil;
- ret.__object = nil;
- ok = 0;
- } else {
- ret.__type_descriptor = i.__methods[0];
- ret.__object = i.__object;
- ok = 1;
- }
-}
-
-// Convert an empty interface to a non-empty interface.
-func ifaceE2I2(inter *descriptor, e empty_interface) (ret interface, ok bool) {
- if (e.__type_descriptor == nil) {
- ret.__methods = nil;
- ret.__object = nil;
- ok = 0;
- } else {
- ret.__methods = __go_convert_interface_2(inter,
- e.__type_descriptor,
- 1);
- ret.__object = e.__object;
- ok = ret.__methods != nil;
- }
-}
-
-// Convert a non-empty interface to a non-empty interface.
-func ifaceI2I2(inter *descriptor, i interface) (ret interface, ok bool) {
- if (i.__methods == nil) {
- ret.__methods = nil;
- ret.__object = nil;
- ok = 0;
- } else {
- ret.__methods = __go_convert_interface_2(inter,
- i.__methods[0], 1);
- ret.__object = i.__object;
- ok = ret.__methods != nil;
- }
-}
-
-// Convert an empty interface to a pointer type.
-func ifaceE2T2P(inter *descriptor, e empty_interface) (ret *void, ok bool) {
- if (!__go_type_descriptors_equal(inter, e.__type_descriptor)) {
- ret = nil;
- ok = 0;
- } else {
- ret = e.__object;
- ok = 1;
- }
-}
-
-// Convert a non-empty interface to a pointer type.
-func ifaceI2T2P(inter *descriptor, i interface) (ret *void, ok bool) {
- if (i.__methods == nil
- || !__go_type_descriptors_equal(inter, i.__methods[0])) {
- ret = nil;
- ok = 0;
- } else {
- ret = i.__object;
- ok = 1;
- }
-}
-
-// Convert an empty interface to a non-pointer type.
-func ifaceE2T2(inter *descriptor, e empty_interface, ret *void) (ok bool) {
- if (!__go_type_descriptors_equal(inter, e.__type_descriptor)) {
- __builtin_memset(ret, 0, inter->__size);
- ok = 0;
- } else {
- __builtin_memcpy(ret, e.__object, inter->__size);
- ok = 1;
- }
-}
-
-// Convert a non-empty interface to a non-pointer type.
-func ifaceI2T2(inter *descriptor, i interface, ret *void) (ok bool) {
- if (i.__methods == nil
- || !__go_type_descriptors_equal(inter, i.__methods[0])) {
- __builtin_memset(ret, 0, inter->__size);
- ok = 0;
- } else {
- __builtin_memcpy(ret, i.__object, inter->__size);
- ok = 1;
- }
-}
-
-// Return whether we can convert an interface to a type.
-func ifaceI2Tp(to *descriptor, from *descriptor) (ok bool) {
- ok = __go_can_convert_to_interface(to, from);
-}
diff --git a/libgo/runtime/go-int-array-to-string.c b/libgo/runtime/go-int-array-to-string.c
deleted file mode 100644
index f37213125a..0000000000
--- a/libgo/runtime/go-int-array-to-string.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/* go-int-array-to-string.c -- convert an array of ints to a string in Go.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "go-assert.h"
-#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
-
-String
-__go_int_array_to_string (const void* p, intgo len)
-{
- const int32 *ints;
- intgo slen;
- intgo i;
- unsigned char *retdata;
- String ret;
- unsigned char *s;
-
- ints = (const int32 *) p;
-
- slen = 0;
- for (i = 0; i < len; ++i)
- {
- int32 v;
-
- v = ints[i];
-
- if (v < 0 || v > 0x10ffff)
- v = 0xfffd;
- else if (0xd800 <= v && v <= 0xdfff)
- v = 0xfffd;
-
- if (v <= 0x7f)
- slen += 1;
- else if (v <= 0x7ff)
- slen += 2;
- else if (v <= 0xffff)
- slen += 3;
- else
- slen += 4;
- }
-
- retdata = runtime_mallocgc ((uintptr) slen, 0, FlagNoScan);
- ret.str = retdata;
- ret.len = slen;
-
- s = retdata;
- for (i = 0; i < len; ++i)
- {
- int32 v;
-
- v = ints[i];
-
- /* If V is out of range for UTF-8, substitute the replacement
- character. */
- if (v < 0 || v > 0x10ffff)
- v = 0xfffd;
- else if (0xd800 <= v && v <= 0xdfff)
- v = 0xfffd;
-
- if (v <= 0x7f)
- *s++ = v;
- else if (v <= 0x7ff)
- {
- *s++ = 0xc0 | ((v >> 6) & 0x1f);
- *s++ = 0x80 | (v & 0x3f);
- }
- else if (v <= 0xffff)
- {
- *s++ = 0xe0 | ((v >> 12) & 0xf);
- *s++ = 0x80 | ((v >> 6) & 0x3f);
- *s++ = 0x80 | (v & 0x3f);
- }
- else
- {
- *s++ = 0xf0 | ((v >> 18) & 0x7);
- *s++ = 0x80 | ((v >> 12) & 0x3f);
- *s++ = 0x80 | ((v >> 6) & 0x3f);
- *s++ = 0x80 | (v & 0x3f);
- }
- }
-
- __go_assert (s - retdata == slen);
-
- return ret;
-}
diff --git a/libgo/runtime/go-int-to-string.c b/libgo/runtime/go-int-to-string.c
deleted file mode 100644
index d90b1ddfed..0000000000
--- a/libgo/runtime/go-int-to-string.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/* go-int-to-string.c -- convert an integer to a string in Go.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
-
-String
-__go_int_to_string (intgo v)
-{
- char buf[4];
- int len;
- unsigned char *retdata;
- String ret;
-
- /* A negative value is not valid UTF-8; turn it into the replacement
- character. */
- if (v < 0)
- v = 0xfffd;
-
- if (v <= 0x7f)
- {
- buf[0] = v;
- len = 1;
- }
- else if (v <= 0x7ff)
- {
- buf[0] = 0xc0 + (v >> 6);
- buf[1] = 0x80 + (v & 0x3f);
- len = 2;
- }
- else
- {
- /* If the value is out of range for UTF-8, turn it into the
- "replacement character". */
- if (v > 0x10ffff)
- v = 0xfffd;
- /* If the value is a surrogate pair, which is invalid in UTF-8,
- turn it into the replacement character. */
- if (v >= 0xd800 && v < 0xe000)
- v = 0xfffd;
-
- if (v <= 0xffff)
- {
- buf[0] = 0xe0 + (v >> 12);
- buf[1] = 0x80 + ((v >> 6) & 0x3f);
- buf[2] = 0x80 + (v & 0x3f);
- len = 3;
- }
- else
- {
- buf[0] = 0xf0 + (v >> 18);
- buf[1] = 0x80 + ((v >> 12) & 0x3f);
- buf[2] = 0x80 + ((v >> 6) & 0x3f);
- buf[3] = 0x80 + (v & 0x3f);
- len = 4;
- }
- }
-
- retdata = runtime_mallocgc (len, 0, FlagNoScan);
- __builtin_memcpy (retdata, buf, len);
- ret.str = retdata;
- ret.len = len;
-
- return ret;
-}
diff --git a/libgo/runtime/go-interface-compare.c b/libgo/runtime/go-interface-compare.c
deleted file mode 100644
index 1d367753a1..0000000000
--- a/libgo/runtime/go-interface-compare.c
+++ /dev/null
@@ -1,35 +0,0 @@
-/* go-interface-compare.c -- compare two interface values.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-
-#include "runtime.h"
-#include "go-type.h"
-#include "interface.h"
-
-/* Compare two interface values. Return 0 for equal, not zero for not
- equal (return value is like strcmp). */
-
-int
-__go_interface_compare (struct __go_interface left,
- struct __go_interface right)
-{
- const struct __go_type_descriptor *left_descriptor;
-
- if (left.__methods == NULL && right.__methods == NULL)
- return 0;
- if (left.__methods == NULL || right.__methods == NULL)
- return 1;
- left_descriptor = left.__methods[0];
- if (!__go_type_descriptors_equal (left_descriptor, right.__methods[0]))
- return 1;
- if (__go_is_pointer_type (left_descriptor))
- return left.__object == right.__object ? 0 : 1;
- if (!__go_call_equalfn (left_descriptor->__equalfn, left.__object,
- right.__object, left_descriptor->__size))
- return 1;
- return 0;
-}
diff --git a/libgo/runtime/go-interface-eface-compare.c b/libgo/runtime/go-interface-eface-compare.c
deleted file mode 100644
index d1e6fd084d..0000000000
--- a/libgo/runtime/go-interface-eface-compare.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/* go-interface-eface-compare.c -- compare non-empty and empty interface.
-
- Copyright 2011 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-type.h"
-#include "interface.h"
-
-/* Compare a non-empty interface value with an empty interface value.
- Return 0 for equal, not zero for not equal (return value is like
- strcmp). */
-
-intgo
-__go_interface_empty_compare (struct __go_interface left,
- struct __go_empty_interface right)
-{
- const struct __go_type_descriptor *left_descriptor;
-
- if (left.__methods == NULL && right.__type_descriptor == NULL)
- return 0;
- if (left.__methods == NULL || right.__type_descriptor == NULL)
- return 1;
- left_descriptor = left.__methods[0];
- if (!__go_type_descriptors_equal (left_descriptor, right.__type_descriptor))
- return 1;
- if (__go_is_pointer_type (left_descriptor))
- return left.__object == right.__object ? 0 : 1;
- if (!__go_call_equalfn (left_descriptor->__equalfn, left.__object,
- right.__object, left_descriptor->__size))
- return 1;
- return 0;
-}
diff --git a/libgo/runtime/go-interface-val-compare.c b/libgo/runtime/go-interface-val-compare.c
deleted file mode 100644
index 36b6efdc9f..0000000000
--- a/libgo/runtime/go-interface-val-compare.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/* go-interface-val-compare.c -- compare an interface to a value.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-type.h"
-#include "interface.h"
-
-/* Compare two interface values. Return 0 for equal, not zero for not
- equal (return value is like strcmp). */
-
-intgo
-__go_interface_value_compare (
- struct __go_interface left,
- const struct __go_type_descriptor *right_descriptor,
- const void *val)
-{
- const struct __go_type_descriptor *left_descriptor;
-
- if (left.__methods == NULL)
- return 1;
- left_descriptor = left.__methods[0];
- if (!__go_type_descriptors_equal (left_descriptor, right_descriptor))
- return 1;
- if (__go_is_pointer_type (left_descriptor))
- return left.__object == val ? 0 : 1;
- if (!__go_call_equalfn (left_descriptor->__equalfn, left.__object, val,
- left_descriptor->__size))
- return 1;
- return 0;
-}
diff --git a/libgo/runtime/go-libmain.c b/libgo/runtime/go-libmain.c
index 6884f3a5f5..8e07e90178 100644
--- a/libgo/runtime/go-libmain.c
+++ b/libgo/runtime/go-libmain.c
@@ -13,7 +13,6 @@
#include <unistd.h>
#include "runtime.h"
-#include "go-alloc.h"
#include "array.h"
#include "arch.h"
#include "malloc.h"
@@ -61,6 +60,8 @@ initfn (int argc, char **argv, char** env __attribute__ ((unused)))
runtime_isarchive = true;
+ setIsCgo ();
+ runtime_cpuinit ();
runtime_initsig(true);
a = (struct args *) malloc (sizeof *a);
diff --git a/libgo/runtime/go-main.c b/libgo/runtime/go-main.c
index ff2958c239..dba8085260 100644
--- a/libgo/runtime/go-main.c
+++ b/libgo/runtime/go-main.c
@@ -15,7 +15,6 @@
#endif
#include "runtime.h"
-#include "go-alloc.h"
#include "array.h"
#include "arch.h"
#include "malloc.h"
@@ -46,7 +45,11 @@ main (int argc, char **argv)
return 0;
runtime_isstarted = true;
+ if (runtime_iscgo)
+ setIsCgo ();
+
__go_end = (uintptr)_end;
+ runtime_cpuinit ();
runtime_check ();
runtime_args (argc, (byte **) argv);
runtime_osinit ();
diff --git a/libgo/runtime/go-make-slice.c b/libgo/runtime/go-make-slice.c
deleted file mode 100644
index ccd07e5ac5..0000000000
--- a/libgo/runtime/go-make-slice.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/* go-make-slice.c -- make a slice.
-
- Copyright 2011 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stdint.h>
-
-#include "runtime.h"
-#include "go-alloc.h"
-#include "go-assert.h"
-#include "go-panic.h"
-#include "go-type.h"
-#include "array.h"
-#include "arch.h"
-#include "malloc.h"
-
-/* Dummy word to use as base pointer for make([]T, 0).
- Since you cannot take the address of such a slice,
- you can't tell that they all have the same base pointer. */
-uintptr runtime_zerobase;
-
-struct __go_open_array
-__go_make_slice2 (const struct __go_type_descriptor *td, uintptr_t len,
- uintptr_t cap)
-{
- const struct __go_slice_type* std;
- intgo ilen;
- intgo icap;
- uintptr_t size;
- struct __go_open_array ret;
-
- __go_assert ((td->__code & GO_CODE_MASK) == GO_SLICE);
- std = (const struct __go_slice_type *) td;
-
- ilen = (intgo) len;
- if (ilen < 0
- || (uintptr_t) ilen != len
- || (std->__element_type->__size > 0
- && len > MaxMem / std->__element_type->__size))
- runtime_panicstring ("makeslice: len out of range");
-
- icap = (intgo) cap;
- if (cap < len
- || (uintptr_t) icap != cap
- || (std->__element_type->__size > 0
- && cap > MaxMem / std->__element_type->__size))
- runtime_panicstring ("makeslice: cap out of range");
-
- ret.__count = ilen;
- ret.__capacity = icap;
-
- size = cap * std->__element_type->__size;
-
- if (size == 0)
- ret.__values = &runtime_zerobase;
- else if ((std->__element_type->__code & GO_NO_POINTERS) != 0)
- ret.__values =
- runtime_mallocgc (size,
- (uintptr) std->__element_type | TypeInfo_Array,
- FlagNoScan);
- else
- ret.__values =
- runtime_mallocgc (size,
- (uintptr) std->__element_type | TypeInfo_Array,
- 0);
-
- return ret;
-}
-
-struct __go_open_array
-__go_make_slice1 (const struct __go_type_descriptor *td, uintptr_t len)
-{
- return __go_make_slice2 (td, len, len);
-}
-
-struct __go_open_array
-__go_make_slice2_big (const struct __go_type_descriptor *td, uint64_t len,
- uint64_t cap)
-{
- uintptr_t slen;
- uintptr_t scap;
-
- slen = (uintptr_t) len;
- if ((uint64_t) slen != len)
- runtime_panicstring ("makeslice: len out of range");
-
- scap = (uintptr_t) cap;
- if ((uint64_t) scap != cap)
- runtime_panicstring ("makeslice: cap out of range");
-
- return __go_make_slice2 (td, slen, scap);
-}
-
-struct __go_open_array
-__go_make_slice1_big (const struct __go_type_descriptor *td, uint64_t len)
-{
- return __go_make_slice2_big (td, len, len);
-}
diff --git a/libgo/runtime/go-map-delete.c b/libgo/runtime/go-map-delete.c
deleted file mode 100644
index fb7c331856..0000000000
--- a/libgo/runtime/go-map-delete.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/* go-map-delete.c -- delete an entry from a map.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-#include <stdlib.h>
-
-#include "runtime.h"
-#include "malloc.h"
-#include "go-alloc.h"
-#include "go-assert.h"
-#include "map.h"
-
-/* Delete the entry matching KEY from MAP. */
-
-void
-__go_map_delete (struct __go_map *map, const void *key)
-{
- const struct __go_map_descriptor *descriptor;
- const struct __go_type_descriptor *key_descriptor;
- uintptr_t key_offset;
- const FuncVal *equalfn;
- size_t key_hash;
- size_t key_size;
- size_t bucket_index;
- void **pentry;
-
- if (map == NULL)
- return;
-
- descriptor = map->__descriptor;
-
- key_descriptor = descriptor->__map_descriptor->__key_type;
- key_offset = descriptor->__key_offset;
- key_size = key_descriptor->__size;
- if (key_size == 0)
- return;
-
- __go_assert (key_size != -1UL);
- equalfn = key_descriptor->__equalfn;
-
- key_hash = __go_call_hashfn (key_descriptor->__hashfn, key, key_size);
- bucket_index = key_hash % map->__bucket_count;
-
- pentry = map->__buckets + bucket_index;
- while (*pentry != NULL)
- {
- char *entry = (char *) *pentry;
- if (__go_call_equalfn (equalfn, key, entry + key_offset, key_size))
- {
- *pentry = *(void **) entry;
- if (descriptor->__entry_size >= TinySize)
- __go_free (entry);
- map->__element_count -= 1;
- break;
- }
- pentry = (void **) entry;
- }
-}
diff --git a/libgo/runtime/go-map-index.c b/libgo/runtime/go-map-index.c
deleted file mode 100644
index 353041db6c..0000000000
--- a/libgo/runtime/go-map-index.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/* go-map-index.c -- find or insert an entry in a map.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-#include <stdlib.h>
-
-#include "runtime.h"
-#include "malloc.h"
-#include "go-alloc.h"
-#include "go-assert.h"
-#include "map.h"
-
-/* Rehash MAP to a larger size. */
-
-static void
-__go_map_rehash (struct __go_map *map)
-{
- const struct __go_map_descriptor *descriptor;
- const struct __go_type_descriptor *key_descriptor;
- uintptr_t key_offset;
- size_t key_size;
- const FuncVal *hashfn;
- uintptr_t old_bucket_count;
- void **old_buckets;
- uintptr_t new_bucket_count;
- void **new_buckets;
- uintptr_t i;
-
- descriptor = map->__descriptor;
-
- key_descriptor = descriptor->__map_descriptor->__key_type;
- key_offset = descriptor->__key_offset;
- key_size = key_descriptor->__size;
- hashfn = key_descriptor->__hashfn;
-
- old_bucket_count = map->__bucket_count;
- old_buckets = map->__buckets;
-
- new_bucket_count = __go_map_next_prime (old_bucket_count * 2);
- new_buckets = (void **) __go_alloc (new_bucket_count * sizeof (void *));
- __builtin_memset (new_buckets, 0, new_bucket_count * sizeof (void *));
-
- for (i = 0; i < old_bucket_count; ++i)
- {
- char* entry;
- char* next;
-
- for (entry = old_buckets[i]; entry != NULL; entry = next)
- {
- size_t key_hash;
- size_t new_bucket_index;
-
- /* We could speed up rehashing at the cost of memory space
- by caching the hash code. */
- key_hash = __go_call_hashfn (hashfn, entry + key_offset, key_size);
- new_bucket_index = key_hash % new_bucket_count;
-
- next = *(char **) entry;
- *(char **) entry = new_buckets[new_bucket_index];
- new_buckets[new_bucket_index] = entry;
- }
- }
-
- if (old_bucket_count * sizeof (void *) >= TinySize)
- __go_free (old_buckets);
-
- map->__bucket_count = new_bucket_count;
- map->__buckets = new_buckets;
-}
-
-/* Find KEY in MAP, return a pointer to the value. If KEY is not
- present, then if INSERT is false, return NULL, and if INSERT is
- true, insert a new value and zero-initialize it before returning a
- pointer to it. */
-
-void *
-__go_map_index (struct __go_map *map, const void *key, _Bool insert)
-{
- const struct __go_map_descriptor *descriptor;
- const struct __go_type_descriptor *key_descriptor;
- uintptr_t key_offset;
- const FuncVal *equalfn;
- size_t key_hash;
- size_t key_size;
- size_t bucket_index;
- char *entry;
-
- if (map == NULL)
- {
- if (insert)
- runtime_panicstring ("assignment to entry in nil map");
- return NULL;
- }
-
- descriptor = map->__descriptor;
-
- key_descriptor = descriptor->__map_descriptor->__key_type;
- key_offset = descriptor->__key_offset;
- key_size = key_descriptor->__size;
- __go_assert (key_size != -1UL);
- equalfn = key_descriptor->__equalfn;
-
- key_hash = __go_call_hashfn (key_descriptor->__hashfn, key, key_size);
- bucket_index = key_hash % map->__bucket_count;
-
- entry = (char *) map->__buckets[bucket_index];
- while (entry != NULL)
- {
- if (__go_call_equalfn (equalfn, key, entry + key_offset, key_size))
- return entry + descriptor->__val_offset;
- entry = *(char **) entry;
- }
-
- if (!insert)
- return NULL;
-
- if (map->__element_count >= map->__bucket_count)
- {
- __go_map_rehash (map);
- bucket_index = key_hash % map->__bucket_count;
- }
-
- entry = (char *) __go_alloc (descriptor->__entry_size);
- __builtin_memset (entry, 0, descriptor->__entry_size);
-
- __builtin_memcpy (entry + key_offset, key, key_size);
-
- *(char **) entry = map->__buckets[bucket_index];
- map->__buckets[bucket_index] = entry;
-
- map->__element_count += 1;
-
- return entry + descriptor->__val_offset;
-}
diff --git a/libgo/runtime/go-map-len.c b/libgo/runtime/go-map-len.c
deleted file mode 100644
index 7da10c2494..0000000000
--- a/libgo/runtime/go-map-len.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/* go-map-len.c -- return the length of a map.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-
-#include "runtime.h"
-#include "go-assert.h"
-#include "map.h"
-
-/* Return the length of a map. This could be done inline, of course,
- but I'm doing it as a function for now to make it easy to change
- the map structure. */
-
-intgo
-__go_map_len (struct __go_map *map)
-{
- if (map == NULL)
- return 0;
- __go_assert (map->__element_count
- == (uintptr_t) (intgo) map->__element_count);
- return map->__element_count;
-}
diff --git a/libgo/runtime/go-map-range.c b/libgo/runtime/go-map-range.c
deleted file mode 100644
index 5dbb92ccb8..0000000000
--- a/libgo/runtime/go-map-range.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/* go-map-range.c -- implement a range clause over a map.
-
- Copyright 2009, 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-assert.h"
-#include "map.h"
-
-/* Initialize a range over a map. */
-
-void
-__go_mapiterinit (const struct __go_map *h, struct __go_hash_iter *it)
-{
- it->entry = NULL;
- if (h != NULL)
- {
- it->map = h;
- it->next_entry = NULL;
- it->bucket = 0;
- --it->bucket;
- __go_mapiternext(it);
- }
-}
-
-/* Move to the next iteration, updating *HITER. */
-
-void
-__go_mapiternext (struct __go_hash_iter *it)
-{
- const void *entry;
-
- entry = it->next_entry;
- if (entry == NULL)
- {
- const struct __go_map *map;
- uintptr_t bucket;
-
- map = it->map;
- bucket = it->bucket;
- while (1)
- {
- ++bucket;
- if (bucket >= map->__bucket_count)
- {
- /* Map iteration is complete. */
- it->entry = NULL;
- return;
- }
- entry = map->__buckets[bucket];
- if (entry != NULL)
- break;
- }
- it->bucket = bucket;
- }
- it->entry = entry;
- it->next_entry = *(const void * const *) entry;
-}
-
-/* Get the key of the current iteration. */
-
-void
-__go_mapiter1 (struct __go_hash_iter *it, unsigned char *key)
-{
- const struct __go_map *map;
- const struct __go_map_descriptor *descriptor;
- const struct __go_type_descriptor *key_descriptor;
- const char *p;
-
- map = it->map;
- descriptor = map->__descriptor;
- key_descriptor = descriptor->__map_descriptor->__key_type;
- p = it->entry;
- __go_assert (p != NULL);
- __builtin_memcpy (key, p + descriptor->__key_offset, key_descriptor->__size);
-}
-
-/* Get the key and value of the current iteration. */
-
-void
-__go_mapiter2 (struct __go_hash_iter *it, unsigned char *key,
- unsigned char *val)
-{
- const struct __go_map *map;
- const struct __go_map_descriptor *descriptor;
- const struct __go_map_type *map_descriptor;
- const struct __go_type_descriptor *key_descriptor;
- const struct __go_type_descriptor *val_descriptor;
- const char *p;
-
- map = it->map;
- descriptor = map->__descriptor;
- map_descriptor = descriptor->__map_descriptor;
- key_descriptor = map_descriptor->__key_type;
- val_descriptor = map_descriptor->__val_type;
- p = it->entry;
- __go_assert (p != NULL);
- __builtin_memcpy (key, p + descriptor->__key_offset,
- key_descriptor->__size);
- __builtin_memcpy (val, p + descriptor->__val_offset,
- val_descriptor->__size);
-}
diff --git a/libgo/runtime/go-memclr.c b/libgo/runtime/go-memclr.c
new file mode 100644
index 0000000000..e478b658b9
--- /dev/null
+++ b/libgo/runtime/go-memclr.c
@@ -0,0 +1,16 @@
+/* go-memclr.c -- clear a memory buffer
+
+ Copyright 2016 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file. */
+
+#include "runtime.h"
+
+void memclrNoHeapPointers(void *, uintptr)
+ __asm__ (GOSYM_PREFIX "runtime.memclrNoHeapPointers");
+
+void
+memclrNoHeapPointers (void *p1, uintptr len)
+{
+ __builtin_memset (p1, 0, len);
+}
diff --git a/libgo/runtime/go-memequal.c b/libgo/runtime/go-memequal.c
new file mode 100644
index 0000000000..5f514aaae0
--- /dev/null
+++ b/libgo/runtime/go-memequal.c
@@ -0,0 +1,16 @@
+/* go-memequal.c -- compare memory buffers for equality
+
+ Copyright 2016 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file. */
+
+#include "runtime.h"
+
+_Bool memequal (void *, void *, uintptr)
+ __asm__ (GOSYM_PREFIX "runtime.memequal");
+
+_Bool
+memequal (void *p1, void *p2, uintptr len)
+{
+ return __builtin_memcmp (p1, p2, len) == 0;
+}
diff --git a/libgo/runtime/go-memmove.c b/libgo/runtime/go-memmove.c
new file mode 100644
index 0000000000..a6fda08c47
--- /dev/null
+++ b/libgo/runtime/go-memmove.c
@@ -0,0 +1,16 @@
+/* go-memmove.c -- move one memory buffer to another
+
+ Copyright 2016 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file. */
+
+#include "runtime.h"
+
+void move(void *, void *, uintptr)
+ __asm__ (GOSYM_PREFIX "runtime.memmove");
+
+void
+move (void *p1, void *p2, uintptr len)
+{
+ __builtin_memmove (p1, p2, len);
+}
diff --git a/libgo/runtime/go-nanotime.c b/libgo/runtime/go-nanotime.c
index 7e5e3e098a..d221847ada 100644
--- a/libgo/runtime/go-nanotime.c
+++ b/libgo/runtime/go-nanotime.c
@@ -14,8 +14,8 @@ int64 runtime_nanotime (void)
int64
runtime_nanotime (void)
{
- struct timeval tv;
+ struct timespec ts;
- gettimeofday (&tv, NULL);
- return (int64) tv.tv_sec * 1000000000 + (int64) tv.tv_usec * 1000;
+ clock_gettime (CLOCK_MONOTONIC, &ts);
+ return (int64) ts.tv_sec * 1000000000 + (int64) ts.tv_nsec;
}
diff --git a/libgo/runtime/go-new-map.c b/libgo/runtime/go-new-map.c
deleted file mode 100644
index c289bc0bea..0000000000
--- a/libgo/runtime/go-new-map.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/* go-new-map.c -- allocate a new map.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-alloc.h"
-#include "map.h"
-
-/* List of prime numbers, copied from libstdc++/src/hashtable.c. */
-
-static const unsigned long prime_list[] = /* 256 + 1 or 256 + 48 + 1 */
-{
- 2ul, 3ul, 5ul, 7ul, 11ul, 13ul, 17ul, 19ul, 23ul, 29ul, 31ul,
- 37ul, 41ul, 43ul, 47ul, 53ul, 59ul, 61ul, 67ul, 71ul, 73ul, 79ul,
- 83ul, 89ul, 97ul, 103ul, 109ul, 113ul, 127ul, 137ul, 139ul, 149ul,
- 157ul, 167ul, 179ul, 193ul, 199ul, 211ul, 227ul, 241ul, 257ul,
- 277ul, 293ul, 313ul, 337ul, 359ul, 383ul, 409ul, 439ul, 467ul,
- 503ul, 541ul, 577ul, 619ul, 661ul, 709ul, 761ul, 823ul, 887ul,
- 953ul, 1031ul, 1109ul, 1193ul, 1289ul, 1381ul, 1493ul, 1613ul,
- 1741ul, 1879ul, 2029ul, 2179ul, 2357ul, 2549ul, 2753ul, 2971ul,
- 3209ul, 3469ul, 3739ul, 4027ul, 4349ul, 4703ul, 5087ul, 5503ul,
- 5953ul, 6427ul, 6949ul, 7517ul, 8123ul, 8783ul, 9497ul, 10273ul,
- 11113ul, 12011ul, 12983ul, 14033ul, 15173ul, 16411ul, 17749ul,
- 19183ul, 20753ul, 22447ul, 24281ul, 26267ul, 28411ul, 30727ul,
- 33223ul, 35933ul, 38873ul, 42043ul, 45481ul, 49201ul, 53201ul,
- 57557ul, 62233ul, 67307ul, 72817ul, 78779ul, 85229ul, 92203ul,
- 99733ul, 107897ul, 116731ul, 126271ul, 136607ul, 147793ul,
- 159871ul, 172933ul, 187091ul, 202409ul, 218971ul, 236897ul,
- 256279ul, 277261ul, 299951ul, 324503ul, 351061ul, 379787ul,
- 410857ul, 444487ul, 480881ul, 520241ul, 562841ul, 608903ul,
- 658753ul, 712697ul, 771049ul, 834181ul, 902483ul, 976369ul,
- 1056323ul, 1142821ul, 1236397ul, 1337629ul, 1447153ul, 1565659ul,
- 1693859ul, 1832561ul, 1982627ul, 2144977ul, 2320627ul, 2510653ul,
- 2716249ul, 2938679ul, 3179303ul, 3439651ul, 3721303ul, 4026031ul,
- 4355707ul, 4712381ul, 5098259ul, 5515729ul, 5967347ul, 6456007ul,
- 6984629ul, 7556579ul, 8175383ul, 8844859ul, 9569143ul, 10352717ul,
- 11200489ul, 12117689ul, 13109983ul, 14183539ul, 15345007ul,
- 16601593ul, 17961079ul, 19431899ul, 21023161ul, 22744717ul,
- 24607243ul, 26622317ul, 28802401ul, 31160981ul, 33712729ul,
- 36473443ul, 39460231ul, 42691603ul, 46187573ul, 49969847ul,
- 54061849ul, 58488943ul, 63278561ul, 68460391ul, 74066549ul,
- 80131819ul, 86693767ul, 93793069ul, 101473717ul, 109783337ul,
- 118773397ul, 128499677ul, 139022417ul, 150406843ul, 162723577ul,
- 176048909ul, 190465427ul, 206062531ul, 222936881ul, 241193053ul,
- 260944219ul, 282312799ul, 305431229ul, 330442829ul, 357502601ul,
- 386778277ul, 418451333ul, 452718089ul, 489790921ul, 529899637ul,
- 573292817ul, 620239453ul, 671030513ul, 725980837ul, 785430967ul,
- 849749479ul, 919334987ul, 994618837ul, 1076067617ul, 1164186217ul,
- 1259520799ul, 1362662261ul, 1474249943ul, 1594975441ul, 1725587117ul,
- 1866894511ul, 2019773507ul, 2185171673ul, 2364114217ul, 2557710269ul,
- 2767159799ul, 2993761039ul, 3238918481ul, 3504151727ul, 3791104843ul,
- 4101556399ul, 4294967291ul,
-#if __SIZEOF_LONG__ >= 8
- 6442450933ul, 8589934583ul, 12884901857ul, 17179869143ul,
- 25769803693ul, 34359738337ul, 51539607367ul, 68719476731ul,
- 103079215087ul, 137438953447ul, 206158430123ul, 274877906899ul,
- 412316860387ul, 549755813881ul, 824633720731ul, 1099511627689ul,
- 1649267441579ul, 2199023255531ul, 3298534883309ul, 4398046511093ul,
- 6597069766607ul, 8796093022151ul, 13194139533241ul, 17592186044399ul,
- 26388279066581ul, 35184372088777ul, 52776558133177ul, 70368744177643ul,
- 105553116266399ul, 140737488355213ul, 211106232532861ul, 281474976710597ul,
- 562949953421231ul, 1125899906842597ul, 2251799813685119ul,
- 4503599627370449ul, 9007199254740881ul, 18014398509481951ul,
- 36028797018963913ul, 72057594037927931ul, 144115188075855859ul,
- 288230376151711717ul, 576460752303423433ul,
- 1152921504606846883ul, 2305843009213693951ul,
- 4611686018427387847ul, 9223372036854775783ul,
- 18446744073709551557ul
-#endif
-};
-
-/* Return the next number from PRIME_LIST >= N. */
-
-uintptr_t
-__go_map_next_prime (uintptr_t n)
-{
- size_t low;
- size_t high;
-
- low = 0;
- high = sizeof prime_list / sizeof prime_list[0];
- while (low < high)
- {
- size_t mid;
-
- mid = (low + high) / 2;
-
- /* Here LOW <= MID < HIGH. */
-
- if (prime_list[mid] < n)
- low = mid + 1;
- else if (prime_list[mid] > n)
- high = mid;
- else
- return n;
- }
- if (low >= sizeof prime_list / sizeof prime_list[0])
- return n;
- return prime_list[low];
-}
-
-/* Allocate a new map. */
-
-struct __go_map *
-__go_new_map (const struct __go_map_descriptor *descriptor, uintptr_t entries)
-{
- int32 ientries;
- struct __go_map *ret;
-
- /* The master library limits map entries to int32, so we do too. */
- ientries = (int32) entries;
- if (ientries < 0 || (uintptr_t) ientries != entries)
- runtime_panicstring ("map size out of range");
-
- if (entries == 0)
- entries = 5;
- else
- entries = __go_map_next_prime (entries);
- ret = (struct __go_map *) __go_alloc (sizeof (struct __go_map));
- ret->__descriptor = descriptor;
- ret->__element_count = 0;
- ret->__bucket_count = entries;
- ret->__buckets = (void **) __go_alloc (entries * sizeof (void *));
- __builtin_memset (ret->__buckets, 0, entries * sizeof (void *));
- return ret;
-}
-
-/* Allocate a new map when the argument to make is a large type. */
-
-struct __go_map *
-__go_new_map_big (const struct __go_map_descriptor *descriptor,
- uint64_t entries)
-{
- uintptr_t sentries;
-
- sentries = (uintptr_t) entries;
- if ((uint64_t) sentries != entries)
- runtime_panicstring ("map size out of range");
- return __go_new_map (descriptor, sentries);
-}
diff --git a/libgo/runtime/go-new.c b/libgo/runtime/go-new.c
index 01bc2af312..da44074a5d 100644
--- a/libgo/runtime/go-new.c
+++ b/libgo/runtime/go-new.c
@@ -4,7 +4,6 @@
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file. */
-#include "go-alloc.h"
#include "runtime.h"
#include "arch.h"
#include "malloc.h"
diff --git a/libgo/runtime/go-nosys.c b/libgo/runtime/go-nosys.c
index 0a94de0523..be8fb3ef19 100644
--- a/libgo/runtime/go-nosys.c
+++ b/libgo/runtime/go-nosys.c
@@ -14,11 +14,16 @@
#include <errno.h>
#include <fcntl.h>
#include <math.h>
+#include <pthread.h>
#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
#include <sys/types.h>
+#include <sys/resource.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
+#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
@@ -448,3 +453,54 @@ log1pl (long double a)
return (long double) log1p ((double) a);
}
#endif
+
+#ifndef HAVE_STRERROR_R
+
+/* Some old systems do not have strerror_r. This is a replacement.
+ It assumes that nothing else in the program calls strerror. */
+
+static pthread_mutex_t strerror_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int
+strerror_r (int errnum, char *buf, size_t buflen)
+{
+ int i;
+ char *errmsg;
+ size_t len;
+ int ret;
+
+ i = pthread_mutex_lock (&strerror_lock);
+ if (i != 0)
+ abort ();
+
+ errmsg = strerror (errnum);
+ len = strlen (errmsg);
+ if (len >= buflen)
+ ret = ERANGE;
+ else
+ {
+ memcpy (buf, errmsg, len + 1);
+ ret = 0;
+ }
+
+ i = pthread_mutex_unlock (&strerror_lock);
+ if (i != 0)
+ abort ();
+
+ return ret;
+}
+
+#endif /* ! HAVE_STRERROR_R */
+
+#ifndef HAVE_WAIT4
+
+/* Some old systems do not have wait4. This is a replacement that
+ uses waitpid. */
+
+pid_t
+wait4 (pid_t pid, int *status, int options, struct rusage *rusage __attribute__ ((unused)))
+{
+ return waitpid (pid, status, options);
+}
+
+#endif
diff --git a/libgo/runtime/go-panic.c b/libgo/runtime/go-panic.c
deleted file mode 100644
index 77975c6e08..0000000000
--- a/libgo/runtime/go-panic.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/* go-panic.c -- support for the go panic function.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
-#include "go-alloc.h"
-#include "go-defer.h"
-#include "go-panic.h"
-#include "interface.h"
-
-/* Print the panic stack. This is used when there is no recover. */
-
-static void
-__printpanics (struct __go_panic_stack *p)
-{
- if (p->__next != NULL)
- {
- __printpanics (p->__next);
- runtime_printf ("\t");
- }
- runtime_printf ("panic: ");
- runtime_printany (p->__arg);
- if (p->__was_recovered)
- runtime_printf (" [recovered]");
- runtime_printf ("\n");
-}
-
-/* This implements __go_panic which is used for the panic
- function. */
-
-void
-__go_panic (struct __go_empty_interface arg)
-{
- G *g;
- struct __go_panic_stack *n;
-
- g = runtime_g ();
-
- n = (struct __go_panic_stack *) __go_alloc (sizeof (struct __go_panic_stack));
- n->__arg = arg;
- n->__next = g->panic;
- g->panic = n;
-
- /* Run all the defer functions. */
-
- while (1)
- {
- struct __go_defer_stack *d;
- void (*pfn) (void *);
-
- d = g->defer;
- if (d == NULL)
- break;
-
- pfn = d->__pfn;
- d->__pfn = NULL;
-
- if (pfn != NULL)
- {
- (*pfn) (d->__arg);
-
- if (n->__was_recovered)
- {
- /* Some defer function called recover. That means that
- we should stop running this panic. */
-
- g->panic = n->__next;
- __go_free (n);
-
- /* Now unwind the stack by throwing an exception. The
- compiler has arranged to create exception handlers in
- each function which uses a defer statement. These
- exception handlers will check whether the entry on
- the top of the defer stack is from the current
- function. If it is, we have unwound the stack far
- enough. */
- __go_unwind_stack ();
-
- /* __go_unwind_stack should not return. */
- abort ();
- }
-
- /* Because we executed that defer function by a panic, and
- it did not call recover, we know that we are not
- returning from the calling function--we are panicing
- through it. */
- *d->__frame = 0;
- }
-
- g->defer = d->__next;
-
- /* This may be called by a cgo callback routine to defer the
- call to syscall.CgocallBackDone, in which case we will not
- have a memory context. Don't try to free anything in that
- case--the GC will release it later. */
- if (runtime_m () != NULL)
- runtime_freedefer (d);
- }
-
- /* The panic was not recovered. */
-
- runtime_startpanic ();
- __printpanics (g->panic);
- runtime_dopanic (0);
-}
diff --git a/libgo/runtime/go-panic.h b/libgo/runtime/go-panic.h
deleted file mode 100644
index d29fe88b57..0000000000
--- a/libgo/runtime/go-panic.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* go-panic.h -- declare the go panic functions.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#ifndef LIBGO_GO_PANIC_H
-#define LIBGO_GO_PANIC_H
-
-#include "interface.h"
-
-struct String;
-struct __go_type_descriptor;
-struct __go_defer_stack;
-
-/* The stack of panic calls. */
-
-struct __go_panic_stack
-{
- /* The next entry in the stack. */
- struct __go_panic_stack *__next;
-
- /* The value associated with this panic. */
- struct __go_empty_interface __arg;
-
- /* Whether this panic has been recovered. */
- _Bool __was_recovered;
-
- /* Whether this panic was pushed on the stack because of an
- exception thrown in some other language. */
- _Bool __is_foreign;
-};
-
-extern void __go_panic (struct __go_empty_interface)
- __attribute__ ((noreturn));
-
-extern void __go_print_string (struct String);
-
-extern struct __go_empty_interface __go_recover (void);
-
-extern _Bool __go_can_recover (void *);
-
-extern void __go_makefunc_can_recover (void *retaddr);
-
-struct Location;
-extern void __go_makefunc_ffi_can_recover (struct Location *, int);
-
-extern void __go_makefunc_returning (void);
-
-extern void __go_unwind_stack (void);
-
-#endif /* !defined(LIBGO_GO_PANIC_H) */
diff --git a/libgo/runtime/go-print.c b/libgo/runtime/go-print.c
deleted file mode 100644
index 4c520de3ce..0000000000
--- a/libgo/runtime/go-print.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/* go-print.c -- support for the go print statement.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <math.h>
-#include <stdint.h>
-#include <stdio.h>
-
-#include "runtime.h"
-#include "array.h"
-#include "go-panic.h"
-#include "interface.h"
-
-/* This implements the various little functions which are called by
- the predeclared functions print/println/panic/panicln. */
-
-void
-__go_print_empty_interface (struct __go_empty_interface e)
-{
- runtime_printf ("(%p,%p)", e.__type_descriptor, e.__object);
-}
-
-void
-__go_print_interface (struct __go_interface i)
-{
- runtime_printf ("(%p,%p)", i.__methods, i.__object);
-}
-
-void
-__go_print_slice (struct __go_open_array val)
-{
- runtime_printf ("[%d/%d]", val.__count, val.__capacity);
- runtime_printpointer (val.__values);
-}
diff --git a/libgo/runtime/go-recover.c b/libgo/runtime/go-recover.c
deleted file mode 100644
index fc66f61cab..0000000000
--- a/libgo/runtime/go-recover.c
+++ /dev/null
@@ -1,275 +0,0 @@
-/* go-recover.c -- support for the go recover function.
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "interface.h"
-#include "go-panic.h"
-#include "go-defer.h"
-
-/* If the top of the defer stack can be recovered, then return it.
- Otherwise return NULL. */
-
-static struct __go_defer_stack *
-current_defer ()
-{
- G *g;
- struct __go_defer_stack *d;
-
- g = runtime_g ();
-
- d = g->defer;
- if (d == NULL)
- return NULL;
-
- /* The panic which would be recovered is the one on the top of the
- panic stack. We do not want to recover it if that panic was on
- the top of the panic stack when this function was deferred. */
- if (d->__panic == g->panic)
- return NULL;
-
- /* The deferred thunk will call _go_set_defer_retaddr. If this has
- not happened, then we have not been called via defer, and we can
- not recover. */
- if (d->__retaddr == NULL)
- return NULL;
-
- return d;
-}
-
-/* This is called by a thunk to see if the real function should be
- permitted to recover a panic value. Recovering a value is
- permitted if the thunk was called directly by defer. RETADDR is
- the return address of the function which is calling
- __go_can_recover--this is, the thunk. */
-
-_Bool
-__go_can_recover (void *retaddr)
-{
- struct __go_defer_stack *d;
- const char* ret;
- const char* dret;
- Location locs[16];
- const byte *name;
- intgo len;
- int n;
- int i;
- _Bool found_ffi_callback;
-
- d = current_defer ();
- if (d == NULL)
- return 0;
-
- ret = (const char *) __builtin_extract_return_addr (retaddr);
-
- dret = (const char *) d->__retaddr;
- if (ret <= dret && ret + 16 >= dret)
- return 1;
-
- /* On some systems, in some cases, the return address does not work
- reliably. See http://gcc.gnu.org/PR60406. If we are permitted
- to call recover, the call stack will look like this:
- __go_panic, __go_undefer, etc.
- thunk to call deferred function (calls __go_set_defer_retaddr)
- function that calls __go_can_recover (passing return address)
- __go_can_recover
- Calling runtime_callers will skip the thunks. So if our caller's
- caller starts with __go, then we are permitted to call
- recover. */
-
- if (runtime_callers (1, &locs[0], 2, false) < 2)
- return 0;
-
- name = locs[1].function.str;
- len = locs[1].function.len;
-
- /* Although locs[1].function is a Go string, we know it is
- NUL-terminated. */
- if (len > 4
- && __builtin_strchr ((const char *) name, '.') == NULL
- && __builtin_strncmp ((const char *) name, "__go_", 4) == 0)
- return 1;
-
- /* If we are called from __go_makefunc_can_recover, then we need to
- look one level higher. */
- if (locs[0].function.len > 0
- && __builtin_strcmp ((const char *) locs[0].function.str,
- "__go_makefunc_can_recover") == 0)
- {
- if (runtime_callers (3, &locs[0], 1, false) < 1)
- return 0;
- name = locs[0].function.str;
- len = locs[0].function.len;
- if (len > 4
- && __builtin_strchr ((const char *) name, '.') == NULL
- && __builtin_strncmp ((const char *) name, "__go_", 4) == 0)
- return 1;
- }
-
- /* If the function calling recover was created by reflect.MakeFunc,
- then __go_makefunc_can_recover or __go_makefunc_ffi_can_recover
- will have set the __makefunc_can_recover field. */
- if (!d->__makefunc_can_recover)
- return 0;
-
- /* We look up the stack, ignoring libffi functions and functions in
- the reflect package, until we find reflect.makeFuncStub or
- reflect.ffi_callback called by FFI functions. Then we check the
- caller of that function. */
-
- n = runtime_callers (2, &locs[0], sizeof locs / sizeof locs[0], false);
- found_ffi_callback = 0;
- for (i = 0; i < n; i++)
- {
- const byte *name;
-
- if (locs[i].function.len == 0)
- {
- /* No function name means this caller isn't Go code. Assume
- that this is libffi. */
- continue;
- }
-
- /* Ignore functions in libffi. */
- name = locs[i].function.str;
- if (__builtin_strncmp ((const char *) name, "ffi_", 4) == 0)
- continue;
-
- if (found_ffi_callback)
- break;
-
- if (__builtin_strcmp ((const char *) name, "reflect.ffi_callback") == 0)
- {
- found_ffi_callback = 1;
- continue;
- }
-
- if (__builtin_strcmp ((const char *) name, "reflect.makeFuncStub") == 0)
- {
- i++;
- break;
- }
-
- /* Ignore other functions in the reflect package. */
- if (__builtin_strncmp ((const char *) name, "reflect.", 8) == 0)
- continue;
-
- /* We should now be looking at the real caller. */
- break;
- }
-
- if (i < n && locs[i].function.len > 0)
- {
- name = locs[i].function.str;
- if (__builtin_strncmp ((const char *) name, "__go_", 4) == 0)
- return 1;
- }
-
- return 0;
-}
-
-/* This function is called when code is about to enter a function
- created by reflect.MakeFunc. It is called by the function stub
- used by MakeFunc. If the stub is permitted to call recover, then a
- real MakeFunc function is permitted to call recover. */
-
-void
-__go_makefunc_can_recover (void *retaddr)
-{
- struct __go_defer_stack *d;
-
- d = current_defer ();
- if (d == NULL)
- return;
-
- /* If we are already in a call stack of MakeFunc functions, there is
- nothing we can usefully check here. */
- if (d->__makefunc_can_recover)
- return;
-
- if (__go_can_recover (retaddr))
- d->__makefunc_can_recover = 1;
-}
-
-/* This function is called when code is about to enter a function
- created by the libffi version of reflect.MakeFunc. This function
- is passed the names of the callers of the libffi code that called
- the stub. It uses to decide whether it is permitted to call
- recover, and sets d->__makefunc_can_recover so that __go_recover
- can make the same decision. */
-
-void
-__go_makefunc_ffi_can_recover (struct Location *loc, int n)
-{
- struct __go_defer_stack *d;
- const byte *name;
- intgo len;
-
- d = current_defer ();
- if (d == NULL)
- return;
-
- /* If we are already in a call stack of MakeFunc functions, there is
- nothing we can usefully check here. */
- if (d->__makefunc_can_recover)
- return;
-
- /* LOC points to the caller of our caller. That will be a thunk.
- If its caller was a runtime function, then it was called directly
- by defer. */
-
- if (n < 2)
- return;
-
- name = (loc + 1)->function.str;
- len = (loc + 1)->function.len;
- if (len > 4
- && __builtin_strchr ((const char *) name, '.') == NULL
- && __builtin_strncmp ((const char *) name, "__go_", 4) == 0)
- d->__makefunc_can_recover = 1;
-}
-
-/* This function is called when code is about to exit a function
- created by reflect.MakeFunc. It is called by the function stub
- used by MakeFunc. It clears the __makefunc_can_recover field.
- It's OK to always clear this field, because __go_can_recover will
- only be called by a stub created for a function that calls recover.
- That stub will not call a function created by reflect.MakeFunc, so
- by the time we get here any caller higher up on the call stack no
- longer needs the information. */
-
-void
-__go_makefunc_returning (void)
-{
- struct __go_defer_stack *d;
-
- d = runtime_g ()->defer;
- if (d != NULL)
- d->__makefunc_can_recover = 0;
-}
-
-/* This is only called when it is valid for the caller to recover the
- value on top of the panic stack, if there is one. */
-
-struct __go_empty_interface
-__go_recover ()
-{
- G *g;
- struct __go_panic_stack *p;
-
- g = runtime_g ();
-
- if (g->panic == NULL || g->panic->__was_recovered)
- {
- struct __go_empty_interface ret;
-
- ret.__type_descriptor = NULL;
- ret.__object = NULL;
- return ret;
- }
- p = g->panic;
- p->__was_recovered = 1;
- return p->__arg;
-}
diff --git a/libgo/runtime/go-reflect-call.c b/libgo/runtime/go-reflect-call.c
index 2a14d6c6ad..6a9a7f35a1 100644
--- a/libgo/runtime/go-reflect-call.c
+++ b/libgo/runtime/go-reflect-call.c
@@ -9,10 +9,12 @@
#include <stdlib.h>
#include "runtime.h"
-#include "go-alloc.h"
#include "go-assert.h"
#include "go-type.h"
-#include "go-ffi.h"
+
+#ifdef USE_LIBFFI
+#include "ffi.h"
+#endif
#if defined(USE_LIBFFI) && FFI_GO_CLOSURES
@@ -197,6 +199,11 @@ go_set_results (const struct __go_func_type *func, unsigned char *call_result,
}
}
+/* The code that converts the Go type to an FFI type is written in Go,
+ so that it can allocate Go heap memory. */
+extern void ffiFuncToCIF(const struct __go_func_type*, _Bool, _Bool, ffi_cif*)
+ __asm__ ("runtime.ffiFuncToCIF");
+
/* Call a function. The type of the function is FUNC_TYPE, and the
closure is FUNC_VAL. PARAMS is an array of parameter addresses.
RESULTS is an array of result addresses.
@@ -218,7 +225,7 @@ reflect_call (const struct __go_func_type *func_type, FuncVal *func_val,
unsigned char *call_result;
__go_assert ((func_type->__common.__code & GO_CODE_MASK) == GO_FUNC);
- __go_func_to_cif (func_type, is_interface, is_method, &cif);
+ ffiFuncToCIF (func_type, is_interface, is_method, &cif);
call_result = (unsigned char *) malloc (go_results_size (func_type));
diff --git a/libgo/runtime/go-reflect-map.c b/libgo/runtime/go-reflect-map.c
deleted file mode 100644
index 36f31025d3..0000000000
--- a/libgo/runtime/go-reflect-map.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/* go-reflect-map.c -- map reflection support for Go.
-
- Copyright 2009, 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stdlib.h>
-#include <stdint.h>
-
-#include "runtime.h"
-#include "go-alloc.h"
-#include "go-assert.h"
-#include "go-type.h"
-#include "map.h"
-
-/* This file implements support for reflection on maps. These
- functions are called from reflect/value.go. */
-
-extern void *mapaccess (struct __go_map_type *, void *, void *)
- __asm__ (GOSYM_PREFIX "reflect.mapaccess");
-
-void *
-mapaccess (struct __go_map_type *mt, void *m, void *key)
-{
- struct __go_map *map = (struct __go_map *) m;
-
- __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP);
- if (map == NULL)
- return NULL;
- else
- return __go_map_index (map, key, 0);
-}
-
-extern void mapassign (struct __go_map_type *, void *, void *, void *)
- __asm__ (GOSYM_PREFIX "reflect.mapassign");
-
-void
-mapassign (struct __go_map_type *mt, void *m, void *key, void *val)
-{
- struct __go_map *map = (struct __go_map *) m;
- void *p;
-
- __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP);
- if (map == NULL)
- runtime_panicstring ("assignment to entry in nil map");
- p = __go_map_index (map, key, 1);
- __builtin_memcpy (p, val, mt->__val_type->__size);
-}
-
-extern void mapdelete (struct __go_map_type *, void *, void *)
- __asm__ (GOSYM_PREFIX "reflect.mapdelete");
-
-void
-mapdelete (struct __go_map_type *mt, void *m, void *key)
-{
- struct __go_map *map = (struct __go_map *) m;
-
- __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP);
- if (map == NULL)
- return;
- __go_map_delete (map, key);
-}
-
-extern int32_t maplen (void *) __asm__ (GOSYM_PREFIX "reflect.maplen");
-
-int32_t
-maplen (void *m)
-{
- struct __go_map *map = (struct __go_map *) m;
-
- if (map == NULL)
- return 0;
- return (int32_t) map->__element_count;
-}
-
-extern unsigned char *mapiterinit (struct __go_map_type *, void *)
- __asm__ (GOSYM_PREFIX "reflect.mapiterinit");
-
-unsigned char *
-mapiterinit (struct __go_map_type *mt, void *m)
-{
- struct __go_hash_iter *it;
-
- __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP);
- it = __go_alloc (sizeof (struct __go_hash_iter));
- __go_mapiterinit ((struct __go_map *) m, it);
- return (unsigned char *) it;
-}
-
-extern void mapiternext (void *) __asm__ (GOSYM_PREFIX "reflect.mapiternext");
-
-void
-mapiternext (void *it)
-{
- __go_mapiternext ((struct __go_hash_iter *) it);
-}
-
-extern void *mapiterkey (void *) __asm__ (GOSYM_PREFIX "reflect.mapiterkey");
-
-void *
-mapiterkey (void *ita)
-{
- struct __go_hash_iter *it = (struct __go_hash_iter *) ita;
- const struct __go_type_descriptor *key_descriptor;
- void *key;
-
- if (it->entry == NULL)
- return NULL;
-
- key_descriptor = it->map->__descriptor->__map_descriptor->__key_type;
- key = __go_alloc (key_descriptor->__size);
- __go_mapiter1 (it, key);
- return key;
-}
-
-/* Make a new map. We have to build our own map descriptor. */
-
-extern struct __go_map *makemap (const struct __go_map_type *)
- __asm__ (GOSYM_PREFIX "reflect.makemap");
-
-struct __go_map *
-makemap (const struct __go_map_type *t)
-{
- struct __go_map_descriptor *md;
- unsigned int o;
- const struct __go_type_descriptor *kt;
- const struct __go_type_descriptor *vt;
-
- md = (struct __go_map_descriptor *) __go_alloc (sizeof (*md));
- md->__map_descriptor = t;
- o = sizeof (void *);
- kt = t->__key_type;
- o = (o + kt->__field_align - 1) & ~ (kt->__field_align - 1);
- md->__key_offset = o;
- o += kt->__size;
- vt = t->__val_type;
- o = (o + vt->__field_align - 1) & ~ (vt->__field_align - 1);
- md->__val_offset = o;
- o += vt->__size;
- o = (o + sizeof (void *) - 1) & ~ (sizeof (void *) - 1);
- o = (o + kt->__field_align - 1) & ~ (kt->__field_align - 1);
- o = (o + vt->__field_align - 1) & ~ (vt->__field_align - 1);
- md->__entry_size = o;
-
- return __go_new_map (md, 0);
-}
-
-extern _Bool ismapkey (const struct __go_type_descriptor *)
- __asm__ (GOSYM_PREFIX "reflect.ismapkey");
-
-_Bool
-ismapkey (const struct __go_type_descriptor *typ)
-{
- return (typ != NULL
- && (void *) typ->__hashfn->fn != (void *) __go_type_hash_error);
-}
diff --git a/libgo/runtime/go-rune.c b/libgo/runtime/go-rune.c
deleted file mode 100644
index 4c65e21516..0000000000
--- a/libgo/runtime/go-rune.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/* go-rune.c -- rune functions for Go.
-
- Copyright 2009, 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-
-#include "runtime.h"
-#include "go-string.h"
-
-/* Get a character from the UTF-8 string STR, of length LEN. Store
- the Unicode character, if any, in *RUNE. Return the number of
- characters used from STR. */
-
-int
-__go_get_rune (const unsigned char *str, size_t len, int32 *rune)
-{
- int c, c1, c2, c3, l;
-
- /* Default to the "replacement character". */
- *rune = 0xfffd;
-
- if (len <= 0)
- return 1;
-
- c = *str;
- if (c <= 0x7f)
- {
- *rune = c;
- return 1;
- }
-
- if (len <= 1)
- return 1;
-
- c1 = str[1];
- if ((c & 0xe0) == 0xc0
- && (c1 & 0xc0) == 0x80)
- {
- l = (((c & 0x1f) << 6) + (c1 & 0x3f));
- if (l <= 0x7f)
- return 1;
- *rune = l;
- return 2;
- }
-
- if (len <= 2)
- return 1;
-
- c2 = str[2];
- if ((c & 0xf0) == 0xe0
- && (c1 & 0xc0) == 0x80
- && (c2 & 0xc0) == 0x80)
- {
- l = (((c & 0xf) << 12)
- + ((c1 & 0x3f) << 6)
- + (c2 & 0x3f));
-
- if (l <= 0x7ff)
- return 1;
-
- if (l >= 0xd800 && l < 0xe000)
- {
- /* Invalid surrogate half; return replace character. */
- return 1;
- }
-
- *rune = l;
-
- return 3;
- }
-
- if (len <= 3)
- return 1;
-
- c3 = str[3];
- if ((c & 0xf8) == 0xf0
- && (c1 & 0xc0) == 0x80
- && (c2 & 0xc0) == 0x80
- && (c3 & 0xc0) == 0x80)
- {
- l = (((c & 0x7) << 18)
- + ((c1 & 0x3f) << 12)
- + ((c2 & 0x3f) << 6)
- + (c3 & 0x3f));
-
- if (l <= 0xffff || l > 0x10ffff)
- return 1;
-
- *rune = l;
- return 4;
- }
-
- /* Invalid encoding. Return 1 so that we advance. */
- return 1;
-}
diff --git a/libgo/runtime/go-setenv.c b/libgo/runtime/go-setenv.c
index a75d7c4127..81b1775d2c 100644
--- a/libgo/runtime/go-setenv.c
+++ b/libgo/runtime/go-setenv.c
@@ -9,10 +9,7 @@
#include <stddef.h>
#include <stdlib.h>
-#include "go-alloc.h"
#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
/* Set the C environment from Go. This is called by syscall.Setenv. */
@@ -25,7 +22,6 @@ setenv_c (String k, String v)
unsigned char *kn;
const byte *vs;
unsigned char *vn;
- intgo len;
ks = k.str;
if (ks == NULL)
@@ -39,25 +35,23 @@ setenv_c (String k, String v)
#ifdef HAVE_SETENV
- if (ks != NULL && ks[k.len] != 0)
+ if (ks[k.len] != 0)
{
- // Objects that are explicitly freed must be at least 16 bytes in size,
- // so that they are not allocated using tiny alloc.
- len = k.len + 1;
- if (len < TinySize)
- len = TinySize;
- kn = __go_alloc (len);
+ kn = malloc (k.len + 1);
+ if (kn == NULL)
+ runtime_throw ("out of malloc memory");
__builtin_memcpy (kn, ks, k.len);
+ kn[k.len] = '\0';
ks = kn;
}
- if (vs != NULL && vs[v.len] != 0)
+ if (vs[v.len] != 0)
{
- len = v.len + 1;
- if (len < TinySize)
- len = TinySize;
- vn = __go_alloc (len);
+ vn = malloc (v.len + 1);
+ if (vn == NULL)
+ runtime_throw ("out of malloc memory");
__builtin_memcpy (vn, vs, v.len);
+ vn[v.len] = '\0';
vs = vn;
}
@@ -66,19 +60,20 @@ setenv_c (String k, String v)
#else /* !defined(HAVE_SETENV) */
len = k.len + v.len + 2;
- if (len < TinySize)
- len = TinySize;
- kn = __go_alloc (len);
+ kn = malloc (len);
+ if (kn == NULL)
+ runtime_throw ("out of malloc memory");
__builtin_memcpy (kn, ks, k.len);
kn[k.len] = '=';
__builtin_memcpy (kn + k.len + 1, vs, v.len);
kn[k.len + v.len + 1] = '\0';
putenv ((char *) kn);
+ kn = NULL; /* putenv takes ownership of the string. */
#endif /* !defined(HAVE_SETENV) */
if (kn != NULL)
- __go_free (kn);
+ free (kn);
if (vn != NULL)
- __go_free (vn);
+ free (vn);
}
diff --git a/libgo/runtime/go-signal.c b/libgo/runtime/go-signal.c
index a948c31cca..711f71e873 100644
--- a/libgo/runtime/go-signal.c
+++ b/libgo/runtime/go-signal.c
@@ -8,11 +8,9 @@
#include <stdlib.h>
#include <unistd.h>
#include <sys/time.h>
+#include <ucontext.h>
#include "runtime.h"
-#include "go-assert.h"
-#include "go-panic.h"
-#include "signal_unix.h"
#ifndef SA_RESTART
#define SA_RESTART 0
@@ -24,528 +22,270 @@ extern void __splitstack_getcontext(void *context[10]);
extern void __splitstack_setcontext(void *context[10]);
-#endif
-
-#define N SigNotify
-#define K SigKill
-#define T SigThrow
-#define P SigPanic
-#define D SigDefault
-
-/* Signal actions. This collects the sigtab tables for several
- different targets from the master library. SIGKILL and SIGSTOP are
- not listed, as we don't want to set signal handlers for them. */
-
-SigTab runtime_sigtab[] = {
-#ifdef SIGHUP
- { SIGHUP, N + K, NULL },
-#endif
-#ifdef SIGINT
- { SIGINT, N + K, NULL },
-#endif
-#ifdef SIGQUIT
- { SIGQUIT, N + T, NULL },
-#endif
-#ifdef SIGILL
- { SIGILL, T, NULL },
-#endif
-#ifdef SIGTRAP
- { SIGTRAP, T, NULL },
-#endif
-#ifdef SIGABRT
- { SIGABRT, N + T, NULL },
-#endif
-#ifdef SIGBUS
- { SIGBUS, P, NULL },
-#endif
-#ifdef SIGFPE
- { SIGFPE, P, NULL },
-#endif
-#ifdef SIGUSR1
- { SIGUSR1, N, NULL },
-#endif
-#ifdef SIGSEGV
- { SIGSEGV, P, NULL },
-#endif
-#ifdef SIGUSR2
- { SIGUSR2, N, NULL },
-#endif
-#ifdef SIGPIPE
- { SIGPIPE, N, NULL },
-#endif
-#ifdef SIGALRM
- { SIGALRM, N, NULL },
-#endif
-#ifdef SIGTERM
- { SIGTERM, N + K, NULL },
-#endif
-#ifdef SIGSTKFLT
- { SIGSTKFLT, T, NULL },
-#endif
-#ifdef SIGCHLD
- { SIGCHLD, N, NULL },
-#endif
-#ifdef SIGCONT
- { SIGCONT, N + D, NULL },
-#endif
-#ifdef SIGTSTP
- { SIGTSTP, N + D, NULL },
-#endif
-#ifdef SIGTTIN
- { SIGTTIN, N + D, NULL },
-#endif
-#ifdef SIGTTOU
- { SIGTTOU, N + D, NULL },
-#endif
-#ifdef SIGURG
- { SIGURG, N, NULL },
-#endif
-#ifdef SIGXCPU
- { SIGXCPU, N, NULL },
-#endif
-#ifdef SIGXFSZ
- { SIGXFSZ, N, NULL },
-#endif
-#ifdef SIGVTALRM
- { SIGVTALRM, N, NULL },
-#endif
-#ifdef SIGPROF
- { SIGPROF, N, NULL },
-#endif
-#ifdef SIGWINCH
- { SIGWINCH, N, NULL },
-#endif
-#ifdef SIGIO
- { SIGIO, N, NULL },
-#endif
-#ifdef SIGPWR
- { SIGPWR, N, NULL },
-#endif
-#ifdef SIGSYS
- { SIGSYS, N, NULL },
-#endif
-#ifdef SIGEMT
- { SIGEMT, T, NULL },
-#endif
-#ifdef SIGINFO
- { SIGINFO, N, NULL },
-#endif
-#ifdef SIGTHR
- { SIGTHR, N, NULL },
-#endif
- { -1, 0, NULL }
-};
-#undef N
-#undef K
-#undef T
-#undef P
-#undef D
-
-/* Handle a signal, for cases where we don't panic. We can split the
- stack here. */
-
-void
-runtime_sighandler (int sig, Siginfo *info,
- void *context __attribute__ ((unused)), G *gp)
-{
- M *m;
- int i;
-
- m = runtime_m ();
-
-#ifdef SIGPROF
- if (sig == SIGPROF)
- {
- if (m != NULL && gp != m->g0 && gp != m->gsignal)
- runtime_sigprof ();
- return;
- }
-#endif
-
- if (m == NULL)
- {
- runtime_badsignal (sig);
- return;
- }
-
- for (i = 0; runtime_sigtab[i].sig != -1; ++i)
- {
- SigTab *t;
- bool notify, crash;
-
- t = &runtime_sigtab[i];
+extern void *__splitstack_find_context(void *context[10], size_t *,
+ void **, void **, void **);
- if (t->sig != sig)
- continue;
-
- notify = false;
-#ifdef SA_SIGINFO
- notify = info != NULL && info->si_code == SI_USER;
#endif
- if (notify || (t->flags & SigNotify) != 0)
- {
- if (__go_sigsend (sig))
- return;
- }
- if ((t->flags & SigKill) != 0)
- runtime_exit (2);
- if ((t->flags & SigThrow) == 0)
- return;
-
- runtime_startpanic ();
- {
- const char *name = NULL;
+// The rest of the signal handler, written in Go.
-#ifdef HAVE_STRSIGNAL
- name = strsignal (sig);
-#endif
+extern void sigtrampgo(uint32, siginfo_t *, void *)
+ __asm__(GOSYM_PREFIX "runtime.sigtrampgo");
- if (name == NULL)
- runtime_printf ("Signal %d\n", sig);
- else
- runtime_printf ("%s\n", name);
- }
+// The Go signal handler, written in C. This should be running on the
+// alternate signal stack. This is responsible for setting up the
+// split stack context so that stack guard checks will work as
+// expected.
- if (m->lockedg != NULL && m->ncgo > 0 && gp == m->g0)
- {
- runtime_printf("signal arrived during cgo execution\n");
- gp = m->lockedg;
- }
+void sigtramp(int, siginfo_t *, void *)
+ __attribute__ ((no_split_stack));
- runtime_printf ("\n");
+void sigtramp(int, siginfo_t *, void *)
+ __asm__ (GOSYM_PREFIX "runtime.sigtramp");
- if (runtime_gotraceback (&crash))
- {
- G *g;
-
- g = runtime_g ();
- runtime_traceback ();
- runtime_tracebackothers (g);
-
- /* The gc library calls runtime_dumpregs here, and provides
- a function that prints the registers saved in context in
- a readable form. */
- }
-
- if (crash)
- runtime_crash ();
-
- runtime_exit (2);
- }
-
- __builtin_unreachable ();
-}
+#ifndef USING_SPLIT_STACK
-/* The start of handling a signal which panics. */
+// When not using split stacks, there are no stack checks, and there
+// is nothing special for this function to do.
-static void
-sig_panic_leadin (G *gp)
+void
+sigtramp(int sig, siginfo_t *info, void *context)
{
- int i;
- sigset_t clear;
-
- if (!runtime_canpanic (gp))
- runtime_throw ("unexpected signal during runtime execution");
-
- /* The signal handler blocked signals; unblock them. */
- i = sigfillset (&clear);
- __go_assert (i == 0);
- i = pthread_sigmask (SIG_UNBLOCK, &clear, NULL);
- __go_assert (i == 0);
+ sigtrampgo(sig, info, context);
}
-#ifdef SA_SIGINFO
-
-/* Signal dispatch for signals which panic, on systems which support
- SA_SIGINFO. This is called on the thread stack, and as such it is
- permitted to split the stack. */
+#else // USING_SPLIT_STACK
-static void
-sig_panic_info_handler (int sig, Siginfo *info, void *context)
+void
+sigtramp(int sig, siginfo_t *info, void *context)
{
- G *g;
-
- g = runtime_g ();
- if (g == NULL || info->si_code == SI_USER)
- {
- runtime_sighandler (sig, info, context, g);
- return;
- }
-
- g->sig = sig;
- g->sigcode0 = info->si_code;
- g->sigcode1 = (uintptr_t) info->si_addr;
-
- /* It would be nice to set g->sigpc here as the gc library does, but
- I don't know how to get it portably. */
-
- sig_panic_leadin (g);
-
- switch (sig)
- {
-#ifdef SIGBUS
- case SIGBUS:
- if ((info->si_code == BUS_ADRERR && (uintptr_t) info->si_addr < 0x1000)
- || g->paniconfault)
- runtime_panicstring ("invalid memory address or "
- "nil pointer dereference");
- runtime_printf ("unexpected fault address %p\n", info->si_addr);
- runtime_throw ("fault");
-#endif
-
-#ifdef SIGSEGV
- case SIGSEGV:
- if (((info->si_code == 0
- || info->si_code == SEGV_MAPERR
- || info->si_code == SEGV_ACCERR)
- && (uintptr_t) info->si_addr < 0x1000)
- || g->paniconfault)
- runtime_panicstring ("invalid memory address or "
- "nil pointer dereference");
- runtime_printf ("unexpected fault address %p\n", info->si_addr);
- runtime_throw ("fault");
-#endif
+ G *gp;
+ void *stack_context[10];
+ void *stack;
+ size_t stack_size;
+ void *next_segment;
+ void *next_sp;
+ void *initial_sp;
+ uintptr sp;
+ stack_t st;
+ uintptr stsp;
+
+ gp = runtime_g();
+
+ if (gp == nil) {
+ // Let the Go code handle this case.
+ // It should only call nosplit functions in this case.
+ sigtrampgo(sig, info, context);
+ return;
+ }
-#ifdef SIGFPE
- case SIGFPE:
- switch (info->si_code)
- {
- case FPE_INTDIV:
- runtime_panicstring ("integer divide by zero");
- case FPE_INTOVF:
- runtime_panicstring ("integer overflow");
+ // If this signal is one for which we will panic, we are not
+ // on the alternate signal stack. It's OK to call split-stack
+ // functions here.
+ if (sig == SIGBUS || sig == SIGFPE || sig == SIGSEGV) {
+ sigtrampgo(sig, info, context);
+ return;
}
- runtime_panicstring ("floating point error");
-#endif
- }
- /* All signals with SigPanic should be in cases above, and this
- handler should only be invoked for those signals. */
- __builtin_unreachable ();
-}
+ // We are running on the alternate signal stack.
+
+ __splitstack_getcontext(&stack_context[0]);
+
+ stack = __splitstack_find_context(&gp->m->gsignal->stackcontext[0],
+ &stack_size, &next_segment,
+ &next_sp, &initial_sp);
+
+ // If some non-Go code called sigaltstack, adjust.
+ sp = (uintptr)(&stack_size);
+ if (sp < (uintptr)(stack) || sp >= (uintptr)(stack) + stack_size) {
+ sigaltstack(nil, &st);
+ if ((st.ss_flags & SS_DISABLE) != 0) {
+ runtime_printf("signal %d received on thread with no signal stack\n", (int32)(sig));
+ runtime_throw("non-Go code disabled sigaltstack");
+ }
+
+ stsp = (uintptr)(st.ss_sp);
+ if (sp < stsp || sp >= stsp + st.ss_size) {
+ runtime_printf("signal %d received but handler not on signal stack\n", (int32)(sig));
+ runtime_throw("non-Go code set up signal handler without SA_ONSTACK flag");
+ }
+
+ // Unfortunately __splitstack_find_context will return NULL
+ // when it is called on a context that has never been used.
+ // There isn't much we can do but assume all is well.
+ if (stack != NULL) {
+ // Here the gc runtime adjusts the gsignal
+ // stack guard to match the values returned by
+ // sigaltstack. Unfortunately we have no way
+ // to do that.
+ runtime_printf("signal %d received on unknown signal stack\n", (int32)(sig));
+ runtime_throw("non-Go code changed signal stack");
+ }
+ }
-#else /* !defined (SA_SIGINFO) */
+ // Set the split stack context so that the stack guards are
+ // checked correctly.
-static void
-sig_panic_handler (int sig)
-{
- G *g;
-
- g = runtime_g ();
- if (g == NULL)
- {
- runtime_sighandler (sig, NULL, NULL, g);
- return;
- }
-
- g->sig = sig;
- g->sigcode0 = 0;
- g->sigcode1 = 0;
-
- sig_panic_leadin (g);
-
- switch (sig)
- {
-#ifdef SIGBUS
- case SIGBUS:
- runtime_panicstring ("invalid memory address or "
- "nil pointer dereference");
-#endif
+ __splitstack_setcontext(&gp->m->gsignal->stackcontext[0]);
-#ifdef SIGSEGV
- case SIGSEGV:
- runtime_panicstring ("invalid memory address or "
- "nil pointer dereference");
-#endif
+ sigtrampgo(sig, info, context);
-#ifdef SIGFPE
- case SIGFPE:
- runtime_panicstring ("integer divide by zero or floating point error");
-#endif
- }
+ // We are going to return back to the signal trampoline and
+ // then to whatever we were doing before we got the signal.
+ // Restore the split stack context so that stack guards are
+ // checked correctly.
- /* All signals with SigPanic should be in cases above, and this
- handler should only be invoked for those signals. */
- __builtin_unreachable ();
+ __splitstack_setcontext(&stack_context[0]);
}
-#endif /* !defined (SA_SIGINFO) */
+#endif // USING_SPLIT_STACK
-/* A signal handler used for signals which are not going to panic.
- This is called on the alternate signal stack so it may not split
- the stack. */
+// C function to return the address of the sigtramp function.
+uintptr getSigtramp(void) __asm__ (GOSYM_PREFIX "runtime.getSigtramp");
-static void
-sig_tramp_info (int, Siginfo *, void *) __attribute__ ((no_split_stack));
-
-static void
-sig_tramp_info (int sig, Siginfo *info, void *context)
+uintptr
+getSigtramp()
{
- G *gp;
- M *mp;
-#ifdef USING_SPLIT_STACK
- void *stack_context[10];
-#endif
-
- /* We are now running on the stack registered via sigaltstack.
- (Actually there is a small span of time between runtime_siginit
- and sigaltstack when the program starts.) */
- gp = runtime_g ();
- mp = runtime_m ();
-
- if (gp != NULL)
- {
-#ifdef USING_SPLIT_STACK
- __splitstack_getcontext (&stack_context[0]);
-#endif
- }
-
- if (gp != NULL && mp->gsignal != NULL)
- {
- /* We are running on the signal stack. Set the split stack
- context so that the stack guards are checked correctly. */
-#ifdef USING_SPLIT_STACK
- __splitstack_setcontext (&mp->gsignal->stack_context[0]);
-#endif
- }
-
- runtime_sighandler (sig, info, context, gp);
-
- /* We are going to return back to the signal trampoline and then to
- whatever we were doing before we got the signal. Restore the
- split stack context so that stack guards are checked
- correctly. */
-
- if (gp != NULL)
- {
-#ifdef USING_SPLIT_STACK
- __splitstack_setcontext (&stack_context[0]);
-#endif
- }
+ return (uintptr)(void*)sigtramp;
}
-#ifndef SA_SIGINFO
+// C code to manage the sigaction sa_sigaction field, which is
+// typically a union and so hard for mksysinfo.sh to handle.
-static void sig_tramp (int sig) __attribute__ ((no_split_stack));
+uintptr getSigactionHandler(struct sigaction*)
+ __attribute__ ((no_split_stack));
-static void
-sig_tramp (int sig)
+uintptr getSigactionHandler(struct sigaction*)
+ __asm__ (GOSYM_PREFIX "runtime.getSigactionHandler");
+
+uintptr
+getSigactionHandler(struct sigaction* sa)
{
- sig_tramp_info (sig, NULL, NULL);
+ return (uintptr)(sa->sa_sigaction);
}
-#endif
+void setSigactionHandler(struct sigaction*, uintptr)
+ __attribute__ ((no_split_stack));
+
+void setSigactionHandler(struct sigaction*, uintptr)
+ __asm__ (GOSYM_PREFIX "runtime.setSigactionHandler");
void
-runtime_setsig (int32 i, GoSighandler *fn, bool restart)
+setSigactionHandler(struct sigaction* sa, uintptr handler)
{
- struct sigaction sa;
- int r;
- SigTab *t;
-
- memset (&sa, 0, sizeof sa);
-
- r = sigfillset (&sa.sa_mask);
- __go_assert (r == 0);
-
- t = &runtime_sigtab[i];
-
- if ((t->flags & SigPanic) == 0)
- {
-#ifdef SA_SIGINFO
- sa.sa_flags = SA_ONSTACK | SA_SIGINFO;
- if (fn == runtime_sighandler)
- fn = (void *) sig_tramp_info;
- sa.sa_sigaction = (void *) fn;
-#else
- sa.sa_flags = SA_ONSTACK;
- if (fn == runtime_sighandler)
- fn = (void *) sig_tramp;
- sa.sa_handler = (void *) fn;
-#endif
- }
- else
- {
-#ifdef SA_SIGINFO
- sa.sa_flags = SA_SIGINFO;
- if (fn == runtime_sighandler)
- fn = (void *) sig_panic_info_handler;
- sa.sa_sigaction = (void *) fn;
-#else
- sa.sa_flags = 0;
- if (fn == runtime_sighandler)
- fn = (void *) sig_panic_handler;
- sa.sa_handler = (void *) fn;
-#endif
- }
-
- if (restart)
- sa.sa_flags |= SA_RESTART;
-
- if (sigaction (t->sig, &sa, NULL) != 0)
- __go_assert (0);
+ sa->sa_sigaction = (void*)(handler);
}
-GoSighandler*
-runtime_getsig (int32 i)
-{
- struct sigaction sa;
- int r;
- SigTab *t;
-
- memset (&sa, 0, sizeof sa);
-
- r = sigemptyset (&sa.sa_mask);
- __go_assert (r == 0);
+// C code to fetch values from the siginfo_t and ucontext_t pointers
+// passed to a signal handler.
- t = &runtime_sigtab[i];
-
- if (sigaction (t->sig, NULL, &sa) != 0)
- runtime_throw ("sigaction read failure");
-
- if ((void *) sa.sa_handler == sig_tramp_info)
- return runtime_sighandler;
-#ifdef SA_SIGINFO
- if ((void *) sa.sa_handler == sig_panic_info_handler)
- return runtime_sighandler;
-#else
- if ((void *) sa.sa_handler == sig_tramp
- || (void *) sa.sa_handler == sig_panic_handler)
- return runtime_sighandler;
-#endif
-
- return (void *) sa.sa_handler;
-}
-
-/* Used by the os package to raise SIGPIPE. */
+struct getSiginfoRet {
+ uintptr sigaddr;
+ uintptr sigpc;
+};
-void os_sigpipe (void) __asm__ (GOSYM_PREFIX "os.sigpipe");
+struct getSiginfoRet getSiginfo(siginfo_t *, void *)
+ __asm__(GOSYM_PREFIX "runtime.getSiginfo");
-void
-os_sigpipe (void)
+struct getSiginfoRet
+getSiginfo(siginfo_t *info, void *context __attribute__((unused)))
{
- struct sigaction sa;
- int i;
+ struct getSiginfoRet ret;
+ Location loc[1];
+ int32 n;
+
+ if (info == nil) {
+ ret.sigaddr = 0;
+ } else {
+ ret.sigaddr = (uintptr)(info->si_addr);
+ }
+ ret.sigpc = 0;
- if (__go_sigsend (SIGPIPE))
- return;
+ // There doesn't seem to be a portable way to get the PC.
+ // Use unportable code to pull it from context, and if that fails
+ // try a stack backtrace across the signal handler.
- memset (&sa, 0, sizeof sa);
+#ifdef __x86_64__
+ #ifdef __linux__
+ ret.sigpc = ((ucontext_t*)(context))->uc_mcontext.gregs[REG_RIP];
+ #endif
+#endif
+#ifdef __i386__
+ #ifdef __linux__
+ ret.sigpc = ((ucontext_t*)(context))->uc_mcontext.gregs[REG_EIP];
+ #endif
+#endif
- sa.sa_handler = SIG_DFL;
+ if (ret.sigpc == 0) {
+ // Skip getSiginfo/sighandler/sigtrampgo/sigtramp/handler.
+ n = runtime_callers(5, &loc[0], 1, false);
+ if (n > 0) {
+ ret.sigpc = loc[0].pc;
+ }
+ }
- i = sigemptyset (&sa.sa_mask);
- __go_assert (i == 0);
+ return ret;
+}
- if (sigaction (SIGPIPE, &sa, NULL) != 0)
- abort ();
+// Dump registers when crashing in a signal.
+// There is no portable way to write this,
+// so we just have some CPU/OS specific implementations.
- raise (SIGPIPE);
-}
+void dumpregs(siginfo_t *, void *)
+ __asm__(GOSYM_PREFIX "runtime.dumpregs");
void
-runtime_setprof(bool on)
+dumpregs(siginfo_t *info __attribute__((unused)), void *context __attribute__((unused)))
{
- USED(on);
+#ifdef __x86_64__
+ #ifdef __linux__
+ {
+ mcontext_t *m = &((ucontext_t*)(context))->uc_mcontext;
+
+ runtime_printf("rax %X\n", m->gregs[REG_RAX]);
+ runtime_printf("rbx %X\n", m->gregs[REG_RBX]);
+ runtime_printf("rcx %X\n", m->gregs[REG_RCX]);
+ runtime_printf("rdx %X\n", m->gregs[REG_RDX]);
+ runtime_printf("rdi %X\n", m->gregs[REG_RDI]);
+ runtime_printf("rsi %X\n", m->gregs[REG_RSI]);
+ runtime_printf("rbp %X\n", m->gregs[REG_RBP]);
+ runtime_printf("rsp %X\n", m->gregs[REG_RSP]);
+ runtime_printf("r8 %X\n", m->gregs[REG_R8]);
+ runtime_printf("r9 %X\n", m->gregs[REG_R9]);
+ runtime_printf("r10 %X\n", m->gregs[REG_R10]);
+ runtime_printf("r11 %X\n", m->gregs[REG_R11]);
+ runtime_printf("r12 %X\n", m->gregs[REG_R12]);
+ runtime_printf("r13 %X\n", m->gregs[REG_R13]);
+ runtime_printf("r14 %X\n", m->gregs[REG_R14]);
+ runtime_printf("r15 %X\n", m->gregs[REG_R15]);
+ runtime_printf("rip %X\n", m->gregs[REG_RIP]);
+ runtime_printf("rflags %X\n", m->gregs[REG_EFL]);
+ runtime_printf("cs %X\n", m->gregs[REG_CSGSFS] & 0xffff);
+ runtime_printf("fs %X\n", (m->gregs[REG_CSGSFS] >> 16) & 0xffff);
+ runtime_printf("gs %X\n", (m->gregs[REG_CSGSFS] >> 32) & 0xffff);
+ }
+ #endif
+#endif
+
+#ifdef __i386__
+ #ifdef __linux__
+ {
+ mcontext_t *m = &((ucontext_t*)(context))->uc_mcontext;
+
+ runtime_printf("eax %X\n", m->gregs[REG_EAX]);
+ runtime_printf("ebx %X\n", m->gregs[REG_EBX]);
+ runtime_printf("ecx %X\n", m->gregs[REG_ECX]);
+ runtime_printf("edx %X\n", m->gregs[REG_EDX]);
+ runtime_printf("edi %X\n", m->gregs[REG_EDI]);
+ runtime_printf("esi %X\n", m->gregs[REG_ESI]);
+ runtime_printf("ebp %X\n", m->gregs[REG_EBP]);
+ runtime_printf("esp %X\n", m->gregs[REG_ESP]);
+ runtime_printf("eip %X\n", m->gregs[REG_EIP]);
+ runtime_printf("eflags %X\n", m->gregs[REG_EFL]);
+ runtime_printf("cs %X\n", m->gregs[REG_CS]);
+ runtime_printf("fs %X\n", m->gregs[REG_FS]);
+ runtime_printf("gs %X\n", m->gregs[REG_GS]);
+ }
+ #endif
+#endif
}
diff --git a/libgo/runtime/go-strcmp.c b/libgo/runtime/go-strcmp.c
deleted file mode 100644
index bcc270bf8a..0000000000
--- a/libgo/runtime/go-strcmp.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/* go-strcmp.c -- the go string comparison function.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-
-intgo
-__go_strcmp(String s1, String s2)
-{
- int i;
-
- i = __builtin_memcmp(s1.str, s2.str,
- (s1.len < s2.len ? s1.len : s2.len));
- if (i != 0)
- return i;
-
- if (s1.len < s2.len)
- return -1;
- else if (s1.len > s2.len)
- return 1;
- else
- return 0;
-}
diff --git a/libgo/runtime/go-string-to-byte-array.c b/libgo/runtime/go-string-to-byte-array.c
deleted file mode 100644
index 61591eb975..0000000000
--- a/libgo/runtime/go-string-to-byte-array.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/* go-string-to-byte-array.c -- convert a string to an array of bytes in Go.
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "array.h"
-#include "arch.h"
-#include "malloc.h"
-
-struct __go_open_array
-__go_string_to_byte_array (String str)
-{
- uintptr cap;
- unsigned char *data;
- struct __go_open_array ret;
-
- cap = runtime_roundupsize (str.len);
- data = (unsigned char *) runtime_mallocgc (cap, 0, FlagNoScan | FlagNoZero);
- __builtin_memcpy (data, str.str, str.len);
- if (cap != (uintptr) str.len)
- __builtin_memset (data + str.len, 0, cap - (uintptr) str.len);
- ret.__values = (void *) data;
- ret.__count = str.len;
- ret.__capacity = str.len;
- return ret;
-}
diff --git a/libgo/runtime/go-string-to-int-array.c b/libgo/runtime/go-string-to-int-array.c
deleted file mode 100644
index 5546889131..0000000000
--- a/libgo/runtime/go-string-to-int-array.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/* go-string-to-int-array.c -- convert a string to an array of ints in Go.
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-alloc.h"
-#include "go-string.h"
-#include "array.h"
-#include "arch.h"
-#include "malloc.h"
-
-struct __go_open_array
-__go_string_to_int_array (String str)
-{
- size_t c;
- const unsigned char *p;
- const unsigned char *pend;
- uintptr mem;
- uint32_t *data;
- uint32_t *pd;
- struct __go_open_array ret;
-
- c = 0;
- p = str.str;
- pend = p + str.len;
- while (p < pend)
- {
- int rune;
-
- ++c;
- p += __go_get_rune (p, pend - p, &rune);
- }
-
- if (c > MaxMem / sizeof (uint32_t))
- runtime_throw ("out of memory");
-
- mem = runtime_roundupsize (c * sizeof (uint32_t));
- data = (uint32_t *) runtime_mallocgc (mem, 0, FlagNoScan | FlagNoZero);
- p = str.str;
- pd = data;
- while (p < pend)
- {
- int rune;
-
- p += __go_get_rune (p, pend - p, &rune);
- *pd++ = rune;
- }
- if (mem > (uintptr) c * sizeof (uint32_t))
- __builtin_memset (data + c, 0, mem - (uintptr) c * sizeof (uint32_t));
- ret.__values = (void *) data;
- ret.__count = c;
- ret.__capacity = (intgo) (mem / sizeof (uint32_t));
- return ret;
-}
diff --git a/libgo/runtime/go-strplus.c b/libgo/runtime/go-strplus.c
deleted file mode 100644
index 13915e3e67..0000000000
--- a/libgo/runtime/go-strplus.c
+++ /dev/null
@@ -1,30 +0,0 @@
-/* go-strplus.c -- the go string append function.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
-
-String
-__go_string_plus (String s1, String s2)
-{
- int len;
- byte *retdata;
- String ret;
-
- if (s1.len == 0)
- return s2;
- else if (s2.len == 0)
- return s1;
-
- len = s1.len + s2.len;
- retdata = runtime_mallocgc (len, 0, FlagNoScan | FlagNoZero);
- __builtin_memcpy (retdata, s1.str, s1.len);
- __builtin_memcpy (retdata + s1.len, s2.str, s2.len);
- ret.str = retdata;
- ret.len = len;
- return ret;
-}
diff --git a/libgo/runtime/go-strslice.c b/libgo/runtime/go-strslice.c
index 21e1bc031d..c9f196bc9c 100644
--- a/libgo/runtime/go-strslice.c
+++ b/libgo/runtime/go-strslice.c
@@ -4,10 +4,7 @@
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file. */
-#include "go-panic.h"
#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
String
__go_string_slice (String s, intgo start, intgo end)
diff --git a/libgo/runtime/go-traceback.c b/libgo/runtime/go-traceback.c
deleted file mode 100644
index 7b33cca868..0000000000
--- a/libgo/runtime/go-traceback.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/* go-traceback.c -- stack backtrace for Go.
-
- Copyright 2012 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "config.h"
-
-#include "runtime.h"
-
-/* Print a stack trace for the current goroutine. */
-
-void
-runtime_traceback ()
-{
- Location locbuf[100];
- int32 c;
-
- c = runtime_callers (1, locbuf, nelem (locbuf), false);
- runtime_printtrace (locbuf, c, true);
-}
-
-void
-runtime_printtrace (Location *locbuf, int32 c, bool current)
-{
- int32 i;
-
- for (i = 0; i < c; ++i)
- {
- if (runtime_showframe (locbuf[i].function, current))
- {
- runtime_printf ("%S\n", locbuf[i].function);
- runtime_printf ("\t%S:%D\n", locbuf[i].filename,
- (int64) locbuf[i].lineno);
- }
- }
-}
diff --git a/libgo/runtime/go-trampoline.c b/libgo/runtime/go-trampoline.c
deleted file mode 100644
index 17f73d4f56..0000000000
--- a/libgo/runtime/go-trampoline.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/* go-trampoline.c -- allocate a trampoline for a nested function.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "config.h"
-
-#include <stddef.h>
-#include <stdint.h>
-#include <unistd.h>
-
-#ifdef HAVE_SYS_MMAN_H
-#include <sys/mman.h>
-#endif
-
-#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
-#include "go-assert.h"
-
-/* Trampolines need to run in memory that is both writable and
- executable. In order to implement them, we grab a page of memory
- and mprotect it. We fill in the page with trampolines as they are
- required. When we run out of space, we drop the pointer to the
- page and allocate a new one. The page will be freed by the garbage
- collector when there are no more variables of type func pointing to
- it. */
-
-/* A lock to control access to the page of closures. */
-
-static Lock trampoline_lock;
-
-/* The page of closures. */
-
-static unsigned char *trampoline_page;
-
-/* The size of trampoline_page. */
-
-static uintptr_t trampoline_page_size;
-
-/* The number of bytes we have used on trampoline_page. */
-
-static uintptr_t trampoline_page_used;
-
-/* Allocate a trampoline of SIZE bytes that will use the closure in
- CLOSURE. */
-
-void *
-__go_allocate_trampoline (uintptr_t size, void *closure)
-{
- uintptr_t ptr_size;
- uintptr_t full_size;
- unsigned char *ret;
-
- /* Because the garbage collector only looks at aligned addresses, we
- need to store the closure at an aligned address to ensure that it
- sees it. */
- ptr_size = sizeof (void *);
- full_size = (((size + ptr_size - 1) / ptr_size) * ptr_size);
- full_size += ptr_size;
-
- runtime_lock (&trampoline_lock);
-
- if (full_size < trampoline_page_size - trampoline_page_used)
- trampoline_page = NULL;
-
- if (trampoline_page == NULL)
- {
- uintptr_t page_size;
- unsigned char *page;
-
- page_size = getpagesize ();
- __go_assert (page_size >= full_size);
- page = (unsigned char *) runtime_mallocgc (2 * page_size - 1, 0, 0, 0);
- page = (unsigned char *) (((uintptr_t) page + page_size - 1)
- & ~ (page_size - 1));
-
-#ifdef HAVE_SYS_MMAN_H
- {
- int i;
-
- i = mprotect (page, page_size, PROT_READ | PROT_WRITE | PROT_EXEC);
- __go_assert (i == 0);
- }
-#endif
-
- trampoline_page = page;
- trampoline_page_size = page_size;
- trampoline_page_used = 0;
- }
-
- ret = trampoline_page + trampoline_page_used;
- trampoline_page_used += full_size;
-
- runtime_unlock (&trampoline_lock);
-
- __builtin_memcpy (ret + full_size - ptr_size, &closure, ptr_size);
-
- return (void *) ret;
-}
-
-/* Scan the trampoline page when running the garbage collector. This
- just makes sure that the garbage collector sees the pointer in
- trampoline_page, so that the page itself is not freed if there are
- no other references to it. */
-
-void
-runtime_trampoline_scan (void (*addroot) (Obj))
-{
- if (trampoline_page != NULL)
- addroot ((Obj){(byte *) &trampoline_page, sizeof trampoline_page, 0});
-}
diff --git a/libgo/runtime/go-type-complex.c b/libgo/runtime/go-type-complex.c
deleted file mode 100644
index 585984e9fe..0000000000
--- a/libgo/runtime/go-type-complex.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/* go-type-complex.c -- hash and equality complex functions.
-
- Copyright 2012 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <complex.h>
-#include <math.h>
-#include <stdint.h>
-#include <string.h>
-#include "runtime.h"
-#include "go-type.h"
-
-/* Hash function for float types. */
-
-uintptr_t
-__go_type_hash_complex (const void *vkey, uintptr_t key_size)
-{
- if (key_size == 8)
- {
- const complex float *cfp;
- complex float cf;
- float cfr;
- float cfi;
- uint64_t fi;
-
- cfp = (const complex float *) vkey;
- cf = *cfp;
-
- cfr = crealf (cf);
- cfi = cimagf (cf);
-
- if (isinf (cfr) || isinf (cfi))
- return 0;
-
- /* NaN != NaN, so the hash code of a NaN is irrelevant. Make it
- random so that not all NaNs wind up in the same place. */
- if (isnan (cfr) || isnan (cfi))
- return runtime_fastrand1 ();
-
- /* Avoid negative zero. */
- if (cfr == 0 && cfi == 0)
- return 0;
- else if (cfr == 0)
- cf = cfi * I;
- else if (cfi == 0)
- cf = cfr;
-
- memcpy (&fi, &cf, 8);
- return (uintptr_t) cfi;
- }
- else if (key_size == 16)
- {
- const complex double *cdp;
- complex double cd;
- double cdr;
- double cdi;
- uint64_t di[2];
-
- cdp = (const complex double *) vkey;
- cd = *cdp;
-
- cdr = creal (cd);
- cdi = cimag (cd);
-
- if (isinf (cdr) || isinf (cdi))
- return 0;
-
- if (isnan (cdr) || isnan (cdi))
- return runtime_fastrand1 ();
-
- /* Avoid negative zero. */
- if (cdr == 0 && cdi == 0)
- return 0;
- else if (cdr == 0)
- cd = cdi * I;
- else if (cdi == 0)
- cd = cdr;
-
- memcpy (&di, &cd, 16);
- return di[0] ^ di[1];
- }
- else
- runtime_throw ("__go_type_hash_complex: invalid complex size");
-}
-
-const FuncVal __go_type_hash_complex_descriptor =
- { (void *) __go_type_hash_complex };
-
-/* Equality function for complex types. */
-
-_Bool
-__go_type_equal_complex (const void *vk1, const void *vk2, uintptr_t key_size)
-{
- if (key_size == 8)
- {
- const complex float *cfp1;
- const complex float *cfp2;
-
- cfp1 = (const complex float *) vk1;
- cfp2 = (const complex float *) vk2;
-
- return *cfp1 == *cfp2;
- }
- else if (key_size == 16)
- {
- const complex double *cdp1;
- const complex double *cdp2;
-
- cdp1 = (const complex double *) vk1;
- cdp2 = (const complex double *) vk2;
-
- return *cdp1 == *cdp2;
- }
- else
- runtime_throw ("__go_type_equal_complex: invalid complex size");
-}
-
-const FuncVal __go_type_equal_complex_descriptor =
- { (void *) __go_type_equal_complex };
diff --git a/libgo/runtime/go-type-eface.c b/libgo/runtime/go-type-eface.c
deleted file mode 100644
index 315c30efb7..0000000000
--- a/libgo/runtime/go-type-eface.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/* go-type-eface.c -- hash and equality empty interface functions.
-
- Copyright 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "interface.h"
-#include "go-type.h"
-
-/* A hash function for an empty interface. */
-
-uintptr_t
-__go_type_hash_empty_interface (const void *vval,
- uintptr_t key_size __attribute__ ((unused)))
-{
- const struct __go_empty_interface *val;
- const struct __go_type_descriptor *descriptor;
- uintptr_t size;
-
- val = (const struct __go_empty_interface *) vval;
- descriptor = val->__type_descriptor;
- if (descriptor == NULL)
- return 0;
- size = descriptor->__size;
- if (__go_is_pointer_type (descriptor))
- return __go_call_hashfn (descriptor->__hashfn, &val->__object, size);
- else
- return __go_call_hashfn (descriptor->__hashfn, val->__object, size);
-}
-
-const FuncVal __go_type_hash_empty_interface_descriptor =
- { (void *) __go_type_hash_empty_interface };
-
-/* An equality function for an empty interface. */
-
-_Bool
-__go_type_equal_empty_interface (const void *vv1, const void *vv2,
- uintptr_t key_size __attribute__ ((unused)))
-{
- const struct __go_empty_interface *v1;
- const struct __go_empty_interface *v2;
- const struct __go_type_descriptor* v1_descriptor;
- const struct __go_type_descriptor* v2_descriptor;
-
- v1 = (const struct __go_empty_interface *) vv1;
- v2 = (const struct __go_empty_interface *) vv2;
- v1_descriptor = v1->__type_descriptor;
- v2_descriptor = v2->__type_descriptor;
- if (v1_descriptor == NULL || v2_descriptor == NULL)
- return v1_descriptor == v2_descriptor;
- if (!__go_type_descriptors_equal (v1_descriptor, v2_descriptor))
- return 0;
- if (__go_is_pointer_type (v1_descriptor))
- return v1->__object == v2->__object;
- else
- return __go_call_equalfn (v1_descriptor->__equalfn, v1->__object,
- v2->__object, v1_descriptor->__size);
-}
-
-const FuncVal __go_type_equal_empty_interface_descriptor =
- { (void *) __go_type_equal_empty_interface };
diff --git a/libgo/runtime/go-type-error.c b/libgo/runtime/go-type-error.c
deleted file mode 100644
index 8881a86f6e..0000000000
--- a/libgo/runtime/go-type-error.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/* go-type-error.c -- invalid hash and equality functions.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-type.h"
-
-/* A hash function used for a type which does not support hash
- functions. */
-
-uintptr_t
-__go_type_hash_error (const void *val __attribute__ ((unused)),
- uintptr_t key_size __attribute__ ((unused)))
-{
- runtime_panicstring ("hash of unhashable type");
-}
-
-const FuncVal __go_type_hash_error_descriptor =
- { (void *) __go_type_hash_error };
-
-/* An equality function for an interface. */
-
-_Bool
-__go_type_equal_error (const void *v1 __attribute__ ((unused)),
- const void *v2 __attribute__ ((unused)),
- uintptr_t key_size __attribute__ ((unused)))
-{
- runtime_panicstring ("comparing uncomparable types");
-}
-
-const FuncVal __go_type_equal_error_descriptor =
- { (void *) __go_type_equal_error };
diff --git a/libgo/runtime/go-type-float.c b/libgo/runtime/go-type-float.c
deleted file mode 100644
index 39f9b29ae7..0000000000
--- a/libgo/runtime/go-type-float.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/* go-type-float.c -- hash and equality float functions.
-
- Copyright 2012 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <math.h>
-#include <stdint.h>
-#include "runtime.h"
-#include "go-type.h"
-
-/* Hash function for float types. */
-
-uintptr_t
-__go_type_hash_float (const void *vkey, uintptr_t key_size)
-{
- if (key_size == 4)
- {
- const float *fp;
- float f;
- uint32_t si;
-
- fp = (const float *) vkey;
- f = *fp;
-
- if (isinf (f) || f == 0)
- return 0;
-
- /* NaN != NaN, so the hash code of a NaN is irrelevant. Make it
- random so that not all NaNs wind up in the same place. */
- if (isnan (f))
- return runtime_fastrand1 ();
-
- memcpy (&si, vkey, 4);
- return (uintptr_t) si;
- }
- else if (key_size == 8)
- {
- const double *dp;
- double d;
- uint64_t di;
-
- dp = (const double *) vkey;
- d = *dp;
-
- if (isinf (d) || d == 0)
- return 0;
-
- if (isnan (d))
- return runtime_fastrand1 ();
-
- memcpy (&di, vkey, 8);
- return (uintptr_t) di;
- }
- else
- runtime_throw ("__go_type_hash_float: invalid float size");
-}
-
-const FuncVal __go_type_hash_float_descriptor =
- { (void *) __go_type_hash_float };
-
-/* Equality function for float types. */
-
-_Bool
-__go_type_equal_float (const void *vk1, const void *vk2, uintptr_t key_size)
-{
- if (key_size == 4)
- {
- const float *fp1;
- const float *fp2;
-
- fp1 = (const float *) vk1;
- fp2 = (const float *) vk2;
-
- return *fp1 == *fp2;
- }
- else if (key_size == 8)
- {
- const double *dp1;
- const double *dp2;
-
- dp1 = (const double *) vk1;
- dp2 = (const double *) vk2;
-
- return *dp1 == *dp2;
- }
- else
- runtime_throw ("__go_type_equal_float: invalid float size");
-}
-
-const FuncVal __go_type_equal_float_descriptor =
- { (void *) __go_type_equal_float };
diff --git a/libgo/runtime/go-type-identity.c b/libgo/runtime/go-type-identity.c
deleted file mode 100644
index a334d56cbe..0000000000
--- a/libgo/runtime/go-type-identity.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/* go-type-identity.c -- hash and equality identity functions.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-
-#include "runtime.h"
-#include "go-type.h"
-
-/* An identity hash function for a type. This is used for types where
- we can simply use the type value itself as a hash code. This is
- true of, e.g., integers and pointers. */
-
-uintptr_t
-__go_type_hash_identity (const void *key, uintptr_t key_size)
-{
- uintptr_t ret;
- uintptr_t i;
- const unsigned char *p;
-
- if (key_size <= 8)
- {
- union
- {
- uint64 v;
- unsigned char a[8];
- } u;
- u.v = 0;
-#ifdef WORDS_BIGENDIAN
- __builtin_memcpy (&u.a[8 - key_size], key, key_size);
-#else
- __builtin_memcpy (&u.a[0], key, key_size);
-#endif
- if (sizeof (uintptr_t) >= 8)
- return (uintptr_t) u.v;
- else
- return (uintptr_t) ((u.v >> 32) ^ (u.v & 0xffffffff));
- }
-
- ret = 5381;
- for (i = 0, p = (const unsigned char *) key; i < key_size; i++, p++)
- ret = ret * 33 + *p;
- return ret;
-}
-
-const FuncVal __go_type_hash_identity_descriptor =
- { (void *) __go_type_hash_identity };
-
-/* An identity equality function for a type. This is used for types
- where we can check for equality by checking that the values have
- the same bits. */
-
-_Bool
-__go_type_equal_identity (const void *k1, const void *k2, uintptr_t key_size)
-{
- return __builtin_memcmp (k1, k2, key_size) == 0;
-}
-
-const FuncVal __go_type_equal_identity_descriptor =
- { (void *) __go_type_equal_identity };
diff --git a/libgo/runtime/go-type-interface.c b/libgo/runtime/go-type-interface.c
deleted file mode 100644
index e9e577956e..0000000000
--- a/libgo/runtime/go-type-interface.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/* go-type-interface.c -- hash and equality interface functions.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "interface.h"
-#include "go-type.h"
-
-/* A hash function for an interface. */
-
-uintptr_t
-__go_type_hash_interface (const void *vval,
- uintptr_t key_size __attribute__ ((unused)))
-{
- const struct __go_interface *val;
- const struct __go_type_descriptor *descriptor;
- uintptr_t size;
-
- val = (const struct __go_interface *) vval;
- if (val->__methods == NULL)
- return 0;
- descriptor = (const struct __go_type_descriptor *) val->__methods[0];
- size = descriptor->__size;
- if (__go_is_pointer_type (descriptor))
- return __go_call_hashfn (descriptor->__hashfn, &val->__object, size);
- else
- return __go_call_hashfn (descriptor->__hashfn, val->__object, size);
-}
-
-const FuncVal __go_type_hash_interface_descriptor =
- { (void *) __go_type_hash_interface };
-
-/* An equality function for an interface. */
-
-_Bool
-__go_type_equal_interface (const void *vv1, const void *vv2,
- uintptr_t key_size __attribute__ ((unused)))
-{
- const struct __go_interface *v1;
- const struct __go_interface *v2;
- const struct __go_type_descriptor* v1_descriptor;
- const struct __go_type_descriptor* v2_descriptor;
-
- v1 = (const struct __go_interface *) vv1;
- v2 = (const struct __go_interface *) vv2;
- if (v1->__methods == NULL || v2->__methods == NULL)
- return v1->__methods == v2->__methods;
- v1_descriptor = (const struct __go_type_descriptor *) v1->__methods[0];
- v2_descriptor = (const struct __go_type_descriptor *) v2->__methods[0];
- if (!__go_type_descriptors_equal (v1_descriptor, v2_descriptor))
- return 0;
- if (__go_is_pointer_type (v1_descriptor))
- return v1->__object == v2->__object;
- else
- return __go_call_equalfn (v1_descriptor->__equalfn, v1->__object,
- v2->__object, v1_descriptor->__size);
-}
-
-const FuncVal __go_type_equal_interface_descriptor =
- { (void *) __go_type_equal_interface };
diff --git a/libgo/runtime/go-type-string.c b/libgo/runtime/go-type-string.c
deleted file mode 100644
index 3d33d6ee51..0000000000
--- a/libgo/runtime/go-type-string.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/* go-type-string.c -- hash and equality string functions.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-type.h"
-#include "go-string.h"
-
-/* A string hash function for a map. */
-
-uintptr_t
-__go_type_hash_string (const void *vkey,
- uintptr_t key_size __attribute__ ((unused)))
-{
- uintptr_t ret;
- const String *key;
- intgo len;
- intgo i;
- const byte *p;
-
- ret = 5381;
- key = (const String *) vkey;
- len = key->len;
- for (i = 0, p = key->str; i < len; i++, p++)
- ret = ret * 33 + *p;
- return ret;
-}
-
-const FuncVal __go_type_hash_string_descriptor =
- { (void *) __go_type_hash_string };
-
-/* A string equality function for a map. */
-
-_Bool
-__go_type_equal_string (const void *vk1, const void *vk2,
- uintptr_t key_size __attribute__ ((unused)))
-{
- const String *k1;
- const String *k2;
-
- k1 = (const String *) vk1;
- k2 = (const String *) vk2;
- return __go_ptr_strings_equal (k1, k2);
-}
-
-const FuncVal __go_type_equal_string_descriptor =
- { (void *) __go_type_equal_string };
diff --git a/libgo/runtime/go-type.h b/libgo/runtime/go-type.h
index eb063ec678..e1552548d1 100644
--- a/libgo/runtime/go-type.h
+++ b/libgo/runtime/go-type.h
@@ -257,6 +257,33 @@ struct __go_map_type
/* The map value type. */
const struct __go_type_descriptor *__val_type;
+
+ /* The map bucket type. */
+ const struct __go_type_descriptor *__bucket_type;
+
+ /* The map header type. */
+ const struct __go_type_descriptor *__hmap_type;
+
+ /* The size of the key slot. */
+ uint8_t __key_size;
+
+ /* Whether to store a pointer to key rather than the key itself. */
+ uint8_t __indirect_key;
+
+ /* The size of the value slot. */
+ uint8_t __value_size;
+
+ /* Whether to store a pointer to value rather than the value itself. */
+ uint8_t __indirect_value;
+
+ /* The size of a bucket. */
+ uint16_t __bucket_size;
+
+ /* Whether the key type is reflexive--whether k==k for all keys. */
+ _Bool __reflexive_key;
+
+ /* Whether we should update the key when overwriting an entry. */
+ _Bool __need_key_update;
};
/* A pointer type. */
@@ -314,10 +341,11 @@ __go_is_pointer_type (const struct __go_type_descriptor *td)
/* Call a type hash function, given the __hashfn value. */
static inline uintptr_t
-__go_call_hashfn (const FuncVal *hashfn, const void *p, uintptr_t size)
+__go_call_hashfn (const FuncVal *hashfn, const void *p, uintptr_t seed,
+ uintptr_t size)
{
- uintptr_t (*h) (const void *, uintptr_t) = (void *) hashfn->fn;
- return __builtin_call_with_static_chain (h (p, size), hashfn);
+ uintptr_t (*h) (const void *, uintptr_t, uintptr_t) = (void *) hashfn->fn;
+ return __builtin_call_with_static_chain (h (p, seed, size), hashfn);
}
/* Call a type equality function, given the __equalfn value. */
@@ -334,29 +362,4 @@ extern _Bool
__go_type_descriptors_equal(const struct __go_type_descriptor*,
const struct __go_type_descriptor*);
-extern uintptr_t __go_type_hash_identity (const void *, uintptr_t);
-extern const FuncVal __go_type_hash_identity_descriptor;
-extern _Bool __go_type_equal_identity (const void *, const void *, uintptr_t);
-extern const FuncVal __go_type_equal_identity_descriptor;
-extern uintptr_t __go_type_hash_string (const void *, uintptr_t);
-extern const FuncVal __go_type_hash_string_descriptor;
-extern _Bool __go_type_equal_string (const void *, const void *, uintptr_t);
-extern const FuncVal __go_type_equal_string_descriptor;
-extern uintptr_t __go_type_hash_float (const void *, uintptr_t);
-extern const FuncVal __go_type_hash_float_descriptor;
-extern _Bool __go_type_equal_float (const void *, const void *, uintptr_t);
-extern const FuncVal __go_type_equal_float_descriptor;
-extern uintptr_t __go_type_hash_complex (const void *, uintptr_t);
-extern const FuncVal __go_type_hash_complex_descriptor;
-extern _Bool __go_type_equal_complex (const void *, const void *, uintptr_t);
-extern const FuncVal __go_type_equal_complex_descriptor;
-extern uintptr_t __go_type_hash_interface (const void *, uintptr_t);
-extern const FuncVal __go_type_hash_interface_descriptor;
-extern _Bool __go_type_equal_interface (const void *, const void *, uintptr_t);
-extern const FuncVal __go_type_equal_interface_descriptor;
-extern uintptr_t __go_type_hash_error (const void *, uintptr_t);
-extern const FuncVal __go_type_hash_error_descriptor;
-extern _Bool __go_type_equal_error (const void *, const void *, uintptr_t);
-extern const FuncVal __go_type_equal_error_descriptor;
-
#endif /* !defined(LIBGO_GO_TYPE_H) */
diff --git a/libgo/runtime/go-unsafe-new.c b/libgo/runtime/go-unsafe-new.c
index 7848642624..07f274f99a 100644
--- a/libgo/runtime/go-unsafe-new.c
+++ b/libgo/runtime/go-unsafe-new.c
@@ -8,7 +8,6 @@
#include "arch.h"
#include "malloc.h"
#include "go-type.h"
-#include "interface.h"
/* Implement unsafe_New, called from the reflect package. */
diff --git a/libgo/runtime/go-unsafe-newarray.c b/libgo/runtime/go-unsafe-newarray.c
index f5c5efce78..409ddd95dc 100644
--- a/libgo/runtime/go-unsafe-newarray.c
+++ b/libgo/runtime/go-unsafe-newarray.c
@@ -8,7 +8,6 @@
#include "arch.h"
#include "malloc.h"
#include "go-type.h"
-#include "interface.h"
/* Implement unsafe_NewArray, called from the reflect package. */
diff --git a/libgo/runtime/go-unsafe-pointer.c b/libgo/runtime/go-unsafe-pointer.c
index ce82fcd407..3a97ee1d4a 100644
--- a/libgo/runtime/go-unsafe-pointer.c
+++ b/libgo/runtime/go-unsafe-pointer.c
@@ -36,7 +36,13 @@ static const String reflection_string =
sizeof REFLECTION - 1
};
-const uintptr unsafe_Pointer_gc[] = {sizeof(void*), GC_APTR, 0, GC_END};
+const uintptr unsafe_Pointer_gc[] __attribute__((aligned(4))) =
+ {sizeof(void*), GC_APTR, 0, GC_END};
+
+extern const FuncVal runtime_pointerhash_descriptor
+ __asm__ (GOSYM_PREFIX "runtime.pointerhash$descriptor");
+extern const FuncVal runtime_pointerequal_descriptor
+ __asm__ (GOSYM_PREFIX "runtime.pointerequal$descriptor");
const struct __go_type_descriptor unsafe_Pointer =
{
@@ -51,9 +57,9 @@ const struct __go_type_descriptor unsafe_Pointer =
/* __hash */
78501163U,
/* __hashfn */
- &__go_type_hash_identity_descriptor,
+ &runtime_pointerhash_descriptor,
/* __equalfn */
- &__go_type_equal_identity_descriptor,
+ &runtime_pointerequal_descriptor,
/* __gc */
unsafe_Pointer_gc,
/* __reflection */
@@ -79,6 +85,12 @@ static const String preflection_string =
sizeof PREFLECTION - 1,
};
+extern const uintptr pointer_unsafe_Pointer_gc[]
+ __asm__ (GOSYM_PREFIX "__go_td_pN14_unsafe.Pointer$gc");
+
+const uintptr pointer_unsafe_Pointer_gc[] __attribute__((aligned(4))) =
+ {sizeof(void*), GC_APTR, 0, GC_END};
+
const struct __go_ptr_type pointer_unsafe_Pointer =
{
/* __common */
@@ -94,11 +106,11 @@ const struct __go_ptr_type pointer_unsafe_Pointer =
/* __hash */
1256018616U,
/* __hashfn */
- &__go_type_hash_identity_descriptor,
+ &runtime_pointerhash_descriptor,
/* __equalfn */
- &__go_type_equal_identity_descriptor,
+ &runtime_pointerequal_descriptor,
/* __gc */
- unsafe_Pointer_gc,
+ pointer_unsafe_Pointer_gc,
/* __reflection */
&preflection_string,
/* __uncommon */
diff --git a/libgo/runtime/go-unsetenv.c b/libgo/runtime/go-unsetenv.c
index 409436a0d3..21359975f2 100644
--- a/libgo/runtime/go-unsetenv.c
+++ b/libgo/runtime/go-unsetenv.c
@@ -9,10 +9,7 @@
#include <stddef.h>
#include <stdlib.h>
-#include "go-alloc.h"
#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
/* Unset an environment variable from Go. This is called by
syscall.Unsetenv. */
@@ -24,7 +21,6 @@ unsetenv_c (String k)
{
const byte *ks;
unsigned char *kn;
- intgo len;
ks = k.str;
if (ks == NULL)
@@ -33,14 +29,11 @@ unsetenv_c (String k)
#ifdef HAVE_UNSETENV
- if (ks != NULL && ks[k.len] != 0)
+ if (ks[k.len] != 0)
{
- // Objects that are explicitly freed must be at least 16 bytes in size,
- // so that they are not allocated using tiny alloc.
- len = k.len + 1;
- if (len < TinySize)
- len = TinySize;
- kn = __go_alloc (len);
+ kn = malloc (k.len + 1);
+ if (kn == NULL)
+ runtime_throw ("out of malloc memory");
__builtin_memcpy (kn, ks, k.len);
ks = kn;
}
@@ -50,5 +43,5 @@ unsetenv_c (String k)
#endif /* !defined(HAVE_UNSETENV) */
if (kn != NULL)
- __go_free (kn);
+ free (kn);
}
diff --git a/libgo/runtime/go-unwind.c b/libgo/runtime/go-unwind.c
index 87d9eb3ef4..4c9fb49c99 100644
--- a/libgo/runtime/go-unwind.c
+++ b/libgo/runtime/go-unwind.c
@@ -14,9 +14,6 @@
#include "unwind-pe.h"
#include "runtime.h"
-#include "go-alloc.h"
-#include "go-defer.h"
-#include "go-panic.h"
/* The code for a Go exception. */
@@ -35,111 +32,16 @@ static const _Unwind_Exception_Class __go_exception_class =
<< 8 | (_Unwind_Exception_Class) '\0');
#endif
+/* Rethrow an exception. */
-/* This function is called by exception handlers used when unwinding
- the stack after a recovered panic. The exception handler looks
- like this:
- __go_check_defer (frame);
- return;
- If we have not yet reached the frame we are looking for, we
- continue unwinding. */
+void rethrowException (void) __asm__(GOSYM_PREFIX "runtime.rethrowException");
void
-__go_check_defer (_Bool *frame)
+rethrowException ()
{
- G *g;
struct _Unwind_Exception *hdr;
- g = runtime_g ();
-
- if (g == NULL)
- {
- /* Some other language has thrown an exception. We know there
- are no defer handlers, so there is nothing to do. */
- }
- else if (g->is_foreign)
- {
- struct __go_panic_stack *n;
- _Bool was_recovered;
-
- /* Some other language has thrown an exception. We need to run
- the local defer handlers. If they call recover, we stop
- unwinding the stack here. */
-
- n = ((struct __go_panic_stack *)
- __go_alloc (sizeof (struct __go_panic_stack)));
-
- n->__arg.__type_descriptor = NULL;
- n->__arg.__object = NULL;
- n->__was_recovered = 0;
- n->__is_foreign = 1;
- n->__next = g->panic;
- g->panic = n;
-
- while (1)
- {
- struct __go_defer_stack *d;
- void (*pfn) (void *);
-
- d = g->defer;
- if (d == NULL || d->__frame != frame || d->__pfn == NULL)
- break;
-
- pfn = d->__pfn;
- g->defer = d->__next;
-
- (*pfn) (d->__arg);
-
- if (runtime_m () != NULL)
- runtime_freedefer (d);
-
- if (n->__was_recovered)
- {
- /* The recover function caught the panic thrown by some
- other language. */
- break;
- }
- }
-
- was_recovered = n->__was_recovered;
- g->panic = n->__next;
- __go_free (n);
-
- if (was_recovered)
- {
- /* Just return and continue executing Go code. */
- *frame = 1;
- return;
- }
-
- /* We are panicing through this function. */
- *frame = 0;
- }
- else if (g->defer != NULL
- && g->defer->__pfn == NULL
- && g->defer->__frame == frame)
- {
- struct __go_defer_stack *d;
-
- /* This is the defer function which called recover. Simply
- return to stop the stack unwind, and let the Go code continue
- to execute. */
- d = g->defer;
- g->defer = d->__next;
-
- if (runtime_m () != NULL)
- runtime_freedefer (d);
-
- /* We are returning from this function. */
- *frame = 1;
-
- return;
- }
-
- /* This is some other defer function. It was already run by the
- call to panic, or just above. Rethrow the exception. */
-
- hdr = (struct _Unwind_Exception *) g->exception;
+ hdr = (struct _Unwind_Exception *) runtime_g()->exception;
#ifdef __USING_SJLJ_EXCEPTIONS__
_Unwind_SjLj_Resume_or_Rethrow (hdr);
@@ -155,23 +57,48 @@ __go_check_defer (_Bool *frame)
abort();
}
-/* Unwind function calls until we reach the one which used a defer
- function which called recover. Each function which uses a defer
- statement will have an exception handler, as shown above. */
+/* Return the size of the type that holds an exception header, so that
+ it can be allocated by Go code. */
+
+uintptr unwindExceptionSize(void)
+ __asm__ (GOSYM_PREFIX "runtime.unwindExceptionSize");
+
+uintptr
+unwindExceptionSize ()
+{
+ uintptr ret, align;
+
+ ret = sizeof (struct _Unwind_Exception);
+ /* Adjust the size fo make sure that we can get an aligned value. */
+ align = __alignof__ (struct _Unwind_Exception);
+ if (align > __alignof__ (uintptr))
+ ret += align - __alignof__ (uintptr);
+ return ret;
+}
+
+/* Throw an exception. This is called with g->exception pointing to
+ an uninitialized _Unwind_Exception instance. */
+
+void throwException (void) __asm__(GOSYM_PREFIX "runtime.throwException");
void
-__go_unwind_stack ()
+throwException ()
{
struct _Unwind_Exception *hdr;
+ uintptr align;
+
+ hdr = (struct _Unwind_Exception *)runtime_g ()->exception;
+ /* Make sure the value is correctly aligned. It will be large
+ enough, because of unwindExceptionSize. */
+ align = __alignof__ (struct _Unwind_Exception);
hdr = ((struct _Unwind_Exception *)
- __go_alloc (sizeof (struct _Unwind_Exception)));
+ (((uintptr) hdr + align - 1) &~ (align - 1)));
+
__builtin_memcpy (&hdr->exception_class, &__go_exception_class,
sizeof hdr->exception_class);
hdr->exception_cleanup = NULL;
- runtime_g ()->exception = hdr;
-
#ifdef __USING_SJLJ_EXCEPTIONS__
_Unwind_SjLj_RaiseException (hdr);
#else
@@ -432,7 +359,7 @@ PERSONALITY_FUNCTION (int version,
else
{
g->exception = ue_header;
- g->is_foreign = is_foreign;
+ g->isforeign = is_foreign;
}
_Unwind_SetGR (context, __builtin_eh_return_data_regno (0),
diff --git a/libgo/runtime/heapdump.c b/libgo/runtime/heapdump.c
index d0cfb01478..4c673f4182 100644
--- a/libgo/runtime/heapdump.c
+++ b/libgo/runtime/heapdump.c
@@ -14,8 +14,6 @@
#include "malloc.h"
#include "mgc0.h"
#include "go-type.h"
-#include "go-defer.h"
-#include "go-panic.h"
#define hash __hash
#define KindNoPointers GO_NO_POINTERS
@@ -265,15 +263,15 @@ dumpgoroutine(G *gp)
dumpint((uintptr)0);
dumpint(gp->goid);
dumpint(gp->gopc);
- dumpint(gp->status);
+ dumpint(gp->atomicstatus);
dumpbool(gp->issystem);
dumpbool(gp->isbackground);
dumpint(gp->waitsince);
- dumpcstr((const int8 *)gp->waitreason);
+ dumpstr(gp->waitreason);
dumpint((uintptr)0);
dumpint((uintptr)gp->m);
- dumpint((uintptr)gp->defer);
- dumpint((uintptr)gp->panic);
+ dumpint((uintptr)gp->_defer);
+ dumpint((uintptr)gp->_panic);
// dump stack
// child.args.n = -1;
@@ -285,24 +283,24 @@ dumpgoroutine(G *gp)
// runtime_gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, &child, false);
// dump defer & panic records
- for(d = gp->defer; d != nil; d = d->__next) {
+ for(d = gp->_defer; d != nil; d = d->link) {
dumpint(TagDefer);
dumpint((uintptr)d);
dumpint((uintptr)gp);
- dumpint((uintptr)d->__arg);
- dumpint((uintptr)d->__frame);
- dumpint((uintptr)d->__pfn);
+ dumpint((uintptr)d->arg);
+ dumpint((uintptr)d->frame);
+ dumpint((uintptr)d->pfn);
dumpint((uintptr)0);
- dumpint((uintptr)d->__next);
+ dumpint((uintptr)d->link);
}
- for (p = gp->panic; p != nil; p = p->__next) {
+ for (p = gp->_panic; p != nil; p = p->link) {
dumpint(TagPanic);
dumpint((uintptr)p);
dumpint((uintptr)gp);
- dumpint((uintptr)p->__arg.__type_descriptor);
- dumpint((uintptr)p->__arg.__object);
+ dumpint((uintptr)p->arg._type);
+ dumpint((uintptr)p->arg.data);
dumpint((uintptr)0);
- dumpint((uintptr)p->__next);
+ dumpint((uintptr)p->link);
}
}
@@ -313,17 +311,17 @@ dumpgs(void)
uint32 i;
// goroutines & stacks
- for(i = 0; i < runtime_allglen; i++) {
- gp = runtime_allg[i];
- switch(gp->status){
+ for(i = 0; i < runtime_getallglen(); i++) {
+ gp = runtime_getallg(i);
+ switch(gp->atomicstatus){
default:
- runtime_printf("unexpected G.status %d\n", gp->status);
+ runtime_printf("unexpected G.status %d\n", gp->atomicstatus);
runtime_throw("mark - bad status");
- case Gdead:
+ case _Gdead:
break;
- case Grunnable:
- case Gsyscall:
- case Gwaiting:
+ case _Grunnable:
+ case _Gsyscall:
+ case _Gwaiting:
dumpgoroutine(gp);
break;
}
@@ -463,7 +461,7 @@ dumpparams(void)
else
dumpbool(true); // big-endian ptrs
dumpint(PtrSize);
- dumpint(runtime_Hchansize);
+ dumpint(hchanSize);
dumpint((uintptr)runtime_mheap.arena_start);
dumpint((uintptr)runtime_mheap.arena_used);
dumpint(0);
@@ -476,7 +474,7 @@ dumpms(void)
{
M *mp;
- for(mp = runtime_allm; mp != nil; mp = mp->alllink) {
+ for(mp = runtime_getallm(); mp != nil; mp = mp->alllink) {
dumpint(TagOSThread);
dumpint((uintptr)mp);
dumpint(mp->id);
@@ -490,33 +488,33 @@ dumpmemstats(void)
int32 i;
dumpint(TagMemStats);
- dumpint(mstats.alloc);
- dumpint(mstats.total_alloc);
- dumpint(mstats.sys);
- dumpint(mstats.nlookup);
- dumpint(mstats.nmalloc);
- dumpint(mstats.nfree);
- dumpint(mstats.heap_alloc);
- dumpint(mstats.heap_sys);
- dumpint(mstats.heap_idle);
- dumpint(mstats.heap_inuse);
- dumpint(mstats.heap_released);
- dumpint(mstats.heap_objects);
- dumpint(mstats.stacks_inuse);
- dumpint(mstats.stacks_sys);
- dumpint(mstats.mspan_inuse);
- dumpint(mstats.mspan_sys);
- dumpint(mstats.mcache_inuse);
- dumpint(mstats.mcache_sys);
- dumpint(mstats.buckhash_sys);
- dumpint(mstats.gc_sys);
- dumpint(mstats.other_sys);
- dumpint(mstats.next_gc);
- dumpint(mstats.last_gc);
- dumpint(mstats.pause_total_ns);
+ dumpint(mstats()->alloc);
+ dumpint(mstats()->total_alloc);
+ dumpint(mstats()->sys);
+ dumpint(mstats()->nlookup);
+ dumpint(mstats()->nmalloc);
+ dumpint(mstats()->nfree);
+ dumpint(mstats()->heap_alloc);
+ dumpint(mstats()->heap_sys);
+ dumpint(mstats()->heap_idle);
+ dumpint(mstats()->heap_inuse);
+ dumpint(mstats()->heap_released);
+ dumpint(mstats()->heap_objects);
+ dumpint(mstats()->stacks_inuse);
+ dumpint(mstats()->stacks_sys);
+ dumpint(mstats()->mspan_inuse);
+ dumpint(mstats()->mspan_sys);
+ dumpint(mstats()->mcache_inuse);
+ dumpint(mstats()->mcache_sys);
+ dumpint(mstats()->buckhash_sys);
+ dumpint(mstats()->gc_sys);
+ dumpint(mstats()->other_sys);
+ dumpint(mstats()->next_gc);
+ dumpint(mstats()->last_gc);
+ dumpint(mstats()->pause_total_ns);
for(i = 0; i < 256; i++)
- dumpint(mstats.pause_ns[i]);
- dumpint(mstats.numgc);
+ dumpint(mstats()->pause_ns[i]);
+ dumpint(mstats()->numgc);
}
static void
@@ -546,6 +544,8 @@ dumpmemprof_callback(Bucket *b, uintptr nstk, Location *stk, uintptr size, uintp
dumpint(frees);
}
+static FuncVal dumpmemprof_callbackv = {(void(*)(void))dumpmemprof_callback};
+
static void
dumpmemprof(void)
{
@@ -555,7 +555,7 @@ dumpmemprof(void)
SpecialProfile *spp;
byte *p;
- runtime_iterate_memprof(dumpmemprof_callback);
+ runtime_iterate_memprof(&dumpmemprof_callbackv);
allspans = runtime_mheap.allspans;
for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
@@ -602,7 +602,7 @@ mdump(G *gp)
flush();
gp->param = nil;
- gp->status = Grunning;
+ gp->atomicstatus = _Grunning;
runtime_gogo(gp);
}
@@ -616,11 +616,10 @@ runtime_debug_WriteHeapDump(uintptr fd)
G *g;
// Stop the world.
- runtime_semacquire(&runtime_worldsema, false);
+ runtime_acquireWorldsema();
m = runtime_m();
- m->gcing = 1;
- m->locks++;
- runtime_stoptheworld();
+ m->preemptoff = runtime_gostringnocopy((const byte*)"write heap dump");
+ runtime_stopTheWorldWithSema();
// Update stats so we can dump them.
// As a side effect, flushes all the MCaches so the MSpan.freelist
@@ -632,18 +631,17 @@ runtime_debug_WriteHeapDump(uintptr fd)
// Call dump routine on M stack.
g = runtime_g();
- g->status = Gwaiting;
- g->waitreason = "dumping heap";
+ g->atomicstatus = _Gwaiting;
+ g->waitreason = runtime_gostringnocopy((const byte*)"dumping heap");
runtime_mcall(mdump);
// Reset dump file.
dumpfd = 0;
// Start up the world again.
- m->gcing = 0;
- runtime_semrelease(&runtime_worldsema);
- runtime_starttheworld();
- m->locks--;
+ runtime_startTheWorldWithSema();
+ runtime_releaseWorldsema();
+ m->preemptoff = runtime_gostringnocopy(nil);
}
// Runs the specified gc program. Calls the callback for every
@@ -763,14 +761,16 @@ dumpefacetypes(void *obj __attribute__ ((unused)), uintptr size, const Type *typ
//playgcprog(0, (uintptr*)type->gc + 1, dumpeface_callback, obj);
break;
case TypeInfo_Array:
- for(i = 0; i <= size - type->__size; i += type->__size)
+ for(i = 0; i <= size - type->__size; i += type->__size) {
//playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
+ }
break;
case TypeInfo_Chan:
if(type->__size == 0) // channels may have zero-sized objects in them
break;
- for(i = runtime_Hchansize; i <= size - type->__size; i += type->__size)
+ for(i = hchanSize; i <= size - type->__size; i += type->__size) {
//playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
+ }
break;
}
}
diff --git a/libgo/runtime/interface.h b/libgo/runtime/interface.h
deleted file mode 100644
index f3068a656f..0000000000
--- a/libgo/runtime/interface.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* interface.h -- the interface type for Go.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#ifndef LIBGO_INTERFACE_H
-#define LIBGO_INTERFACE_H
-
-struct __go_type_descriptor;
-
-/* A variable of interface type is an instance of this struct, if the
- interface has any methods. */
-
-struct __go_interface
-{
- /* A pointer to the interface method table. The first pointer is
- the type descriptor of the object. Subsequent pointers are
- pointers to functions. This is effectively the vtable for this
- interface. The function pointers are in the same order as the
- list in the internal representation of the interface, which sorts
- them by name. */
- const void **__methods;
-
- /* The object. If the object is a pointer--if the type descriptor
- code is GO_PTR or GO_UNSAFE_POINTER--then this field is the value
- of the object itself. Otherwise this is a pointer to memory
- which holds the value. */
- void *__object;
-};
-
-/* A variable of an empty interface type is an instance of this
- struct. */
-
-struct __go_empty_interface
-{
- /* The type descriptor of the object. */
- const struct __go_type_descriptor *__type_descriptor;
-
- /* The object. This is the same as __go_interface above. */
- void *__object;
-};
-
-extern void *
-__go_convert_interface (const struct __go_type_descriptor *,
- const struct __go_type_descriptor *);
-
-extern void *
-__go_convert_interface_2 (const struct __go_type_descriptor *,
- const struct __go_type_descriptor *,
- _Bool may_fail);
-
-extern _Bool
-__go_can_convert_to_interface(const struct __go_type_descriptor *,
- const struct __go_type_descriptor *);
-
-#endif /* !defined(LIBGO_INTERFACE_H) */
diff --git a/libgo/runtime/lfstack.goc b/libgo/runtime/lfstack.goc
deleted file mode 100644
index 5ab1baa436..0000000000
--- a/libgo/runtime/lfstack.goc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Lock-free stack.
-
-package runtime
-#include "runtime.h"
-#include "arch.h"
-
-#if __SIZEOF_POINTER__ == 8
-// SPARC64 and Solaris on AMD64 uses all 64 bits of virtual addresses.
-// Use low-order three bits as ABA counter.
-// http://docs.oracle.com/cd/E19120-01/open.solaris/816-5138/6mba6ua5p/index.html
-# if defined(__sparc__) || (defined(__sun__) && defined(__amd64__))
-static inline uint64 lfPack(LFNode *node, uintptr cnt) {
- return ((uint64)(node)) | ((cnt)&7);
-}
-static inline LFNode* lfUnpack(uint64 val) {
- return (LFNode*)(val&~7);
-}
-# else
-# if defined(__aarch64__)
-// Depending on the kernel options, pointers on arm64 can have up to 48 significant
-// bits (see https://www.kernel.org/doc/Documentation/arm64/memory.txt).
-# define PTR_BITS 48
-# else
-// Amd64 uses 48-bit virtual addresses, 47-th bit is used as kernel/user flag.
-// So we use 17msb of pointers as ABA counter.
-# define PTR_BITS 47
-# endif
-# define CNT_BITS (64 - PTR_BITS + 3)
-static inline uint64 lfPack(LFNode *node, uintptr cnt) {
- return ((uint64)(node)<<(64-PTR_BITS)) | (cnt&(((1<<CNT_BITS)-1)));
-}
-static inline LFNode* lfUnpack(uint64 val) {
- return (LFNode*)((val >> CNT_BITS) << 3);
-}
-# endif
-#else
-static inline uint64 lfPack(LFNode *node, uintptr cnt) {
- return ((uint64)(uintptr)(node)<<32) | cnt;
-}
-static inline LFNode* lfUnpack(uint64 val) {
- return (LFNode*)(uintptr)(val >> 32);
-}
-#endif
-
-void
-runtime_lfstackpush(uint64 *head, LFNode *node)
-{
- uint64 old, new;
-
- if(node != lfUnpack(lfPack(node, 0))) {
- runtime_printf("p=%p\n", node);
- runtime_throw("runtime_lfstackpush: invalid pointer");
- }
-
- node->pushcnt++;
- new = lfPack(node, node->pushcnt);
- for(;;) {
- old = runtime_atomicload64(head);
- node->next = lfUnpack(old);
- if(runtime_cas64(head, old, new))
- break;
- }
-}
-
-LFNode*
-runtime_lfstackpop(uint64 *head)
-{
- LFNode *node, *node2;
- uint64 old, new;
-
- for(;;) {
- old = runtime_atomicload64(head);
- if(old == 0)
- return nil;
- node = lfUnpack(old);
- node2 = runtime_atomicloadp(&node->next);
- new = 0;
- if(node2 != nil)
- new = lfPack(node2, node2->pushcnt);
- if(runtime_cas64(head, old, new))
- return node;
- }
-}
-
-func lfstackpush_go(head *uint64, node *LFNode) {
- runtime_lfstackpush(head, node);
-}
-
-func lfstackpop_go(head *uint64) (node *LFNode) {
- node = runtime_lfstackpop(head);
-}
diff --git a/libgo/runtime/lock_futex.c b/libgo/runtime/lock_futex.c
deleted file mode 100644
index 33ef073c90..0000000000
--- a/libgo/runtime/lock_futex.c
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build dragonfly freebsd linux
-
-#include "runtime.h"
-
-// This implementation depends on OS-specific implementations of
-//
-// runtime_futexsleep(uint32 *addr, uint32 val, int64 ns)
-// Atomically,
-// if(*addr == val) sleep
-// Might be woken up spuriously; that's allowed.
-// Don't sleep longer than ns; ns < 0 means forever.
-//
-// runtime_futexwakeup(uint32 *addr, uint32 cnt)
-// If any procs are sleeping on addr, wake up at most cnt.
-
-enum
-{
- MUTEX_UNLOCKED = 0,
- MUTEX_LOCKED = 1,
- MUTEX_SLEEPING = 2,
-
- ACTIVE_SPIN = 4,
- ACTIVE_SPIN_CNT = 30,
- PASSIVE_SPIN = 1,
-};
-
-// Possible lock states are MUTEX_UNLOCKED, MUTEX_LOCKED and MUTEX_SLEEPING.
-// MUTEX_SLEEPING means that there is presumably at least one sleeping thread.
-// Note that there can be spinning threads during all states - they do not
-// affect mutex's state.
-void
-runtime_lock(Lock *l)
-{
- uint32 i, v, wait, spin;
-
- if(runtime_m()->locks++ < 0)
- runtime_throw("runtime_lock: lock count");
-
- // Speculative grab for lock.
- v = runtime_xchg((uint32*)&l->key, MUTEX_LOCKED);
- if(v == MUTEX_UNLOCKED)
- return;
-
- // wait is either MUTEX_LOCKED or MUTEX_SLEEPING
- // depending on whether there is a thread sleeping
- // on this mutex. If we ever change l->key from
- // MUTEX_SLEEPING to some other value, we must be
- // careful to change it back to MUTEX_SLEEPING before
- // returning, to ensure that the sleeping thread gets
- // its wakeup call.
- wait = v;
-
- // On uniprocessor's, no point spinning.
- // On multiprocessors, spin for ACTIVE_SPIN attempts.
- spin = 0;
- if(runtime_ncpu > 1)
- spin = ACTIVE_SPIN;
-
- for(;;) {
- // Try for lock, spinning.
- for(i = 0; i < spin; i++) {
- while(l->key == MUTEX_UNLOCKED)
- if(runtime_cas((uint32*)&l->key, MUTEX_UNLOCKED, wait))
- return;
- runtime_procyield(ACTIVE_SPIN_CNT);
- }
-
- // Try for lock, rescheduling.
- for(i=0; i < PASSIVE_SPIN; i++) {
- while(l->key == MUTEX_UNLOCKED)
- if(runtime_cas((uint32*)&l->key, MUTEX_UNLOCKED, wait))
- return;
- runtime_osyield();
- }
-
- // Sleep.
- v = runtime_xchg((uint32*)&l->key, MUTEX_SLEEPING);
- if(v == MUTEX_UNLOCKED)
- return;
- wait = MUTEX_SLEEPING;
- runtime_futexsleep((uint32*)&l->key, MUTEX_SLEEPING, -1);
- }
-}
-
-void
-runtime_unlock(Lock *l)
-{
- uint32 v;
-
- v = runtime_xchg((uint32*)&l->key, MUTEX_UNLOCKED);
- if(v == MUTEX_UNLOCKED)
- runtime_throw("unlock of unlocked lock");
- if(v == MUTEX_SLEEPING)
- runtime_futexwakeup((uint32*)&l->key, 1);
-
- if(--runtime_m()->locks < 0)
- runtime_throw("runtime_unlock: lock count");
-}
-
-// One-time notifications.
-void
-runtime_noteclear(Note *n)
-{
- n->key = 0;
-}
-
-void
-runtime_notewakeup(Note *n)
-{
- uint32 old;
-
- old = runtime_xchg((uint32*)&n->key, 1);
- if(old != 0) {
- runtime_printf("notewakeup - double wakeup (%d)\n", old);
- runtime_throw("notewakeup - double wakeup");
- }
- runtime_futexwakeup((uint32*)&n->key, 1);
-}
-
-void
-runtime_notesleep(Note *n)
-{
- M *m = runtime_m();
-
- /* For gccgo it's OK to sleep in non-g0, and it happens in
- stoptheworld because we have not implemented preemption.
-
- if(runtime_g() != runtime_m()->g0)
- runtime_throw("notesleep not on g0");
- */
- while(runtime_atomicload((uint32*)&n->key) == 0) {
- m->blocked = true;
- runtime_futexsleep((uint32*)&n->key, 0, -1);
- m->blocked = false;
- }
-}
-
-static bool
-notetsleep(Note *n, int64 ns, int64 deadline, int64 now)
-{
- M *m = runtime_m();
-
- // Conceptually, deadline and now are local variables.
- // They are passed as arguments so that the space for them
- // does not count against our nosplit stack sequence.
-
- if(ns < 0) {
- while(runtime_atomicload((uint32*)&n->key) == 0) {
- m->blocked = true;
- runtime_futexsleep((uint32*)&n->key, 0, -1);
- m->blocked = false;
- }
- return true;
- }
-
- if(runtime_atomicload((uint32*)&n->key) != 0)
- return true;
-
- deadline = runtime_nanotime() + ns;
- for(;;) {
- m->blocked = true;
- runtime_futexsleep((uint32*)&n->key, 0, ns);
- m->blocked = false;
- if(runtime_atomicload((uint32*)&n->key) != 0)
- break;
- now = runtime_nanotime();
- if(now >= deadline)
- break;
- ns = deadline - now;
- }
- return runtime_atomicload((uint32*)&n->key) != 0;
-}
-
-bool
-runtime_notetsleep(Note *n, int64 ns)
-{
- bool res;
-
- if(runtime_g() != runtime_m()->g0 && !runtime_m()->gcing)
- runtime_throw("notetsleep not on g0");
-
- res = notetsleep(n, ns, 0, 0);
- return res;
-}
-
-// same as runtime_notetsleep, but called on user g (not g0)
-// calls only nosplit functions between entersyscallblock/exitsyscall
-bool
-runtime_notetsleepg(Note *n, int64 ns)
-{
- bool res;
-
- if(runtime_g() == runtime_m()->g0)
- runtime_throw("notetsleepg on g0");
-
- runtime_entersyscallblock();
- res = notetsleep(n, ns, 0, 0);
- runtime_exitsyscall();
- return res;
-}
diff --git a/libgo/runtime/lock_sema.c b/libgo/runtime/lock_sema.c
deleted file mode 100644
index ef611fb36a..0000000000
--- a/libgo/runtime/lock_sema.c
+++ /dev/null
@@ -1,281 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin nacl netbsd openbsd plan9 solaris windows
-
-#include "runtime.h"
-
-// This implementation depends on OS-specific implementations of
-//
-// uintptr runtime_semacreate(void)
-// Create a semaphore, which will be assigned to m->waitsema.
-// The zero value is treated as absence of any semaphore,
-// so be sure to return a non-zero value.
-//
-// int32 runtime_semasleep(int64 ns)
-// If ns < 0, acquire m->waitsema and return 0.
-// If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds.
-// Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
-//
-// int32 runtime_semawakeup(M *mp)
-// Wake up mp, which is or will soon be sleeping on mp->waitsema.
-//
-
-enum
-{
- LOCKED = 1,
-
- ACTIVE_SPIN = 4,
- ACTIVE_SPIN_CNT = 30,
- PASSIVE_SPIN = 1,
-};
-
-void
-runtime_lock(Lock *l)
-{
- M *m;
- uintptr v;
- uint32 i, spin;
-
- m = runtime_m();
- if(m->locks++ < 0)
- runtime_throw("runtime_lock: lock count");
-
- // Speculative grab for lock.
- if(runtime_casp((void**)&l->key, nil, (void*)LOCKED))
- return;
-
- if(m->waitsema == 0)
- m->waitsema = runtime_semacreate();
-
- // On uniprocessor's, no point spinning.
- // On multiprocessors, spin for ACTIVE_SPIN attempts.
- spin = 0;
- if(runtime_ncpu > 1)
- spin = ACTIVE_SPIN;
-
- for(i=0;; i++) {
- v = (uintptr)runtime_atomicloadp((void**)&l->key);
- if((v&LOCKED) == 0) {
-unlocked:
- if(runtime_casp((void**)&l->key, (void*)v, (void*)(v|LOCKED)))
- return;
- i = 0;
- }
- if(i<spin)
- runtime_procyield(ACTIVE_SPIN_CNT);
- else if(i<spin+PASSIVE_SPIN)
- runtime_osyield();
- else {
- // Someone else has it.
- // l->waitm points to a linked list of M's waiting
- // for this lock, chained through m->nextwaitm.
- // Queue this M.
- for(;;) {
- m->nextwaitm = (void*)(v&~LOCKED);
- if(runtime_casp((void**)&l->key, (void*)v, (void*)((uintptr)m|LOCKED)))
- break;
- v = (uintptr)runtime_atomicloadp((void**)&l->key);
- if((v&LOCKED) == 0)
- goto unlocked;
- }
- if(v&LOCKED) {
- // Queued. Wait.
- runtime_semasleep(-1);
- i = 0;
- }
- }
- }
-}
-
-void
-runtime_unlock(Lock *l)
-{
- uintptr v;
- M *mp;
-
- for(;;) {
- v = (uintptr)runtime_atomicloadp((void**)&l->key);
- if(v == LOCKED) {
- if(runtime_casp((void**)&l->key, (void*)LOCKED, nil))
- break;
- } else {
- // Other M's are waiting for the lock.
- // Dequeue an M.
- mp = (void*)(v&~LOCKED);
- if(runtime_casp((void**)&l->key, (void*)v, mp->nextwaitm)) {
- // Dequeued an M. Wake it.
- runtime_semawakeup(mp);
- break;
- }
- }
- }
-
- if(--runtime_m()->locks < 0)
- runtime_throw("runtime_unlock: lock count");
-}
-
-// One-time notifications.
-void
-runtime_noteclear(Note *n)
-{
- n->key = 0;
-}
-
-void
-runtime_notewakeup(Note *n)
-{
- M *mp;
-
- do
- mp = runtime_atomicloadp((void**)&n->key);
- while(!runtime_casp((void**)&n->key, mp, (void*)LOCKED));
-
- // Successfully set waitm to LOCKED.
- // What was it before?
- if(mp == nil) {
- // Nothing was waiting. Done.
- } else if(mp == (M*)LOCKED) {
- // Two notewakeups! Not allowed.
- runtime_throw("notewakeup - double wakeup");
- } else {
- // Must be the waiting m. Wake it up.
- runtime_semawakeup(mp);
- }
-}
-
-void
-runtime_notesleep(Note *n)
-{
- M *m;
-
- m = runtime_m();
-
- /* For gccgo it's OK to sleep in non-g0, and it happens in
- stoptheworld because we have not implemented preemption.
-
- if(runtime_g() != m->g0)
- runtime_throw("notesleep not on g0");
- */
-
- if(m->waitsema == 0)
- m->waitsema = runtime_semacreate();
- if(!runtime_casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup)
- if(n->key != LOCKED)
- runtime_throw("notesleep - waitm out of sync");
- return;
- }
- // Queued. Sleep.
- m->blocked = true;
- runtime_semasleep(-1);
- m->blocked = false;
-}
-
-static bool
-notetsleep(Note *n, int64 ns, int64 deadline, M *mp)
-{
- M *m;
-
- m = runtime_m();
-
- // Conceptually, deadline and mp are local variables.
- // They are passed as arguments so that the space for them
- // does not count against our nosplit stack sequence.
-
- // Register for wakeup on n->waitm.
- if(!runtime_casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup already)
- if(n->key != LOCKED)
- runtime_throw("notetsleep - waitm out of sync");
- return true;
- }
-
- if(ns < 0) {
- // Queued. Sleep.
- m->blocked = true;
- runtime_semasleep(-1);
- m->blocked = false;
- return true;
- }
-
- deadline = runtime_nanotime() + ns;
- for(;;) {
- // Registered. Sleep.
- m->blocked = true;
- if(runtime_semasleep(ns) >= 0) {
- m->blocked = false;
- // Acquired semaphore, semawakeup unregistered us.
- // Done.
- return true;
- }
- m->blocked = false;
-
- // Interrupted or timed out. Still registered. Semaphore not acquired.
- ns = deadline - runtime_nanotime();
- if(ns <= 0)
- break;
- // Deadline hasn't arrived. Keep sleeping.
- }
-
- // Deadline arrived. Still registered. Semaphore not acquired.
- // Want to give up and return, but have to unregister first,
- // so that any notewakeup racing with the return does not
- // try to grant us the semaphore when we don't expect it.
- for(;;) {
- mp = runtime_atomicloadp((void**)&n->key);
- if(mp == m) {
- // No wakeup yet; unregister if possible.
- if(runtime_casp((void**)&n->key, mp, nil))
- return false;
- } else if(mp == (M*)LOCKED) {
- // Wakeup happened so semaphore is available.
- // Grab it to avoid getting out of sync.
- m->blocked = true;
- if(runtime_semasleep(-1) < 0)
- runtime_throw("runtime: unable to acquire - semaphore out of sync");
- m->blocked = false;
- return true;
- } else
- runtime_throw("runtime: unexpected waitm - semaphore out of sync");
- }
-}
-
-bool
-runtime_notetsleep(Note *n, int64 ns)
-{
- M *m;
- bool res;
-
- m = runtime_m();
-
- if(runtime_g() != m->g0 && !m->gcing)
- runtime_throw("notetsleep not on g0");
-
- if(m->waitsema == 0)
- m->waitsema = runtime_semacreate();
-
- res = notetsleep(n, ns, 0, nil);
- return res;
-}
-
-// same as runtime_notetsleep, but called on user g (not g0)
-// calls only nosplit functions between entersyscallblock/exitsyscall
-bool
-runtime_notetsleepg(Note *n, int64 ns)
-{
- M *m;
- bool res;
-
- m = runtime_m();
-
- if(runtime_g() == m->g0)
- runtime_throw("notetsleepg on g0");
-
- if(m->waitsema == 0)
- m->waitsema = runtime_semacreate();
-
- runtime_entersyscallblock();
- res = notetsleep(n, ns, 0, nil);
- runtime_exitsyscall();
- return res;
-}
diff --git a/libgo/runtime/malloc.goc b/libgo/runtime/malloc.goc
index 0d8629277f..232210fc4e 100644
--- a/libgo/runtime/malloc.goc
+++ b/libgo/runtime/malloc.goc
@@ -10,22 +10,15 @@ package runtime
#include <stddef.h>
#include <errno.h>
#include <stdlib.h>
-#include "go-alloc.h"
#include "runtime.h"
#include "arch.h"
#include "malloc.h"
-#include "interface.h"
#include "go-type.h"
// Map gccgo field names to gc field names.
-// Eface aka __go_empty_interface.
-#define type __type_descriptor
// Type aka __go_type_descriptor
#define kind __code
#define string __reflection
-#define KindPtr GO_PTR
-#define KindNoPointers GO_NO_POINTERS
-#define kindMask GO_CODE_MASK
// GCCGO SPECIFIC CHANGE
//
@@ -54,12 +47,9 @@ package runtime
// Mark mheap as 'no pointers', it does not contain interesting pointers but occupies ~45K.
MHeap runtime_mheap;
-MStats mstats;
int32 runtime_checking;
-extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
-
extern volatile intgo runtime_MemProfileRate
__asm__ (GOSYM_PREFIX "runtime.MemProfileRate");
@@ -84,31 +74,33 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
MLink *v, *next;
byte *tiny;
bool incallback;
+ MStats *pmstats;
if(size == 0) {
// All 0-length allocations use this pointer.
// The language does not require the allocations to
// have distinct values.
- return &runtime_zerobase;
+ return runtime_getZerobase();
}
- m = runtime_m();
g = runtime_g();
+ m = g->m;
incallback = false;
- if(m->mcache == nil && g->ncgo > 0) {
+ if(m->mcache == nil && m->ncgo > 0) {
// For gccgo this case can occur when a cgo or SWIG function
// has an interface return type and the function
// returns a non-pointer, so memory allocation occurs
// after syscall.Cgocall but before syscall.CgocallDone.
// We treat it as a callback.
- runtime_exitsyscall();
+ runtime_exitsyscall(0);
m = runtime_m();
incallback = true;
flag |= FlagNoInvokeGC;
}
- if(runtime_gcwaiting() && g != m->g0 && m->locks == 0 && !(flag & FlagNoInvokeGC)) {
+ if((g->preempt || runtime_gcwaiting()) && g != m->g0 && m->locks == 0 && !(flag & FlagNoInvokeGC) && m->preemptoff.len == 0) {
+ g->preempt = false;
runtime_gosched();
m = runtime_m();
}
@@ -165,16 +157,16 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
tiny = (byte*)ROUND((uintptr)tiny, 4);
else if((size&1) == 0)
tiny = (byte*)ROUND((uintptr)tiny, 2);
- size1 = size + (tiny - c->tiny);
+ size1 = size + (tiny - (byte*)c->tiny);
if(size1 <= tinysize) {
// The object fits into existing tiny block.
v = (MLink*)tiny;
- c->tiny += size1;
+ c->tiny = (byte*)c->tiny + size1;
c->tinysize -= size1;
m->mallocing = 0;
m->locks--;
if(incallback)
- runtime_entersyscall();
+ runtime_entersyscall(0);
return v;
}
}
@@ -255,11 +247,12 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
m->locks--;
- if(!(flag & FlagNoInvokeGC) && mstats.heap_alloc >= mstats.next_gc)
+ pmstats = mstats();
+ if(!(flag & FlagNoInvokeGC) && pmstats->heap_alloc >= pmstats->next_gc)
runtime_gc(0);
if(incallback)
- runtime_entersyscall();
+ runtime_entersyscall(0);
return v;
}
@@ -281,7 +274,7 @@ largealloc(uint32 flag, uintptr *sizep)
s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1, !(flag & FlagNoZero));
if(s == nil)
runtime_throw("out of memory");
- s->limit = (byte*)(s->start<<PageShift) + size;
+ s->limit = (uintptr)((byte*)(s->start<<PageShift) + size);
*sizep = npages<<PageShift;
v = (void*)(s->start << PageShift);
// setup for mark sweep
@@ -303,7 +296,7 @@ runtime_profilealloc(void *v, uintptr size)
// If you change this, also change allocmcache.
if(rate > 0x3fffffff) // make 2*rate not overflow
rate = 0x3fffffff;
- next = runtime_fastrand1() % (2*rate);
+ next = runtime_fastrand() % (2*rate);
// Subtract the "remainder" of the current allocation.
// Otherwise objects that are close in size to sampling rate
// will be under-sampled, because we consistently discard this remainder.
@@ -315,107 +308,6 @@ runtime_profilealloc(void *v, uintptr size)
runtime_MProf_Malloc(v, size);
}
-void*
-__go_alloc(uintptr size)
-{
- return runtime_mallocgc(size, 0, FlagNoInvokeGC);
-}
-
-// Free the object whose base pointer is v.
-void
-__go_free(void *v)
-{
- M *m;
- int32 sizeclass;
- MSpan *s;
- MCache *c;
- uintptr size;
-
- if(v == nil)
- return;
-
- // If you change this also change mgc0.c:/^sweep,
- // which has a copy of the guts of free.
-
- m = runtime_m();
- if(m->mallocing)
- runtime_throw("malloc/free - deadlock");
- m->mallocing = 1;
-
- if(!runtime_mlookup(v, nil, nil, &s)) {
- runtime_printf("free %p: not an allocated block\n", v);
- runtime_throw("free runtime_mlookup");
- }
- size = s->elemsize;
- sizeclass = s->sizeclass;
- // Objects that are smaller than TinySize can be allocated using tiny alloc,
- // if then such object is combined with an object with finalizer, we will crash.
- if(size < TinySize)
- runtime_throw("freeing too small block");
-
- if(runtime_debug.allocfreetrace)
- runtime_tracefree(v, size);
-
- // Ensure that the span is swept.
- // If we free into an unswept span, we will corrupt GC bitmaps.
- runtime_MSpan_EnsureSwept(s);
-
- if(s->specials != nil)
- runtime_freeallspecials(s, v, size);
-
- c = m->mcache;
- if(sizeclass == 0) {
- // Large object.
- s->needzero = 1;
- // Must mark v freed before calling unmarkspan and MHeap_Free:
- // they might coalesce v into other spans and change the bitmap further.
- runtime_markfreed(v);
- runtime_unmarkspan(v, 1<<PageShift);
- // NOTE(rsc,dvyukov): The original implementation of efence
- // in CL 22060046 used SysFree instead of SysFault, so that
- // the operating system would eventually give the memory
- // back to us again, so that an efence program could run
- // longer without running out of memory. Unfortunately,
- // calling SysFree here without any kind of adjustment of the
- // heap data structures means that when the memory does
- // come back to us, we have the wrong metadata for it, either in
- // the MSpan structures or in the garbage collection bitmap.
- // Using SysFault here means that the program will run out of
- // memory fairly quickly in efence mode, but at least it won't
- // have mysterious crashes due to confused memory reuse.
- // It should be possible to switch back to SysFree if we also
- // implement and then call some kind of MHeap_DeleteSpan.
- if(runtime_debug.efence)
- runtime_SysFault((void*)(s->start<<PageShift), size);
- else
- runtime_MHeap_Free(&runtime_mheap, s, 1);
- c->local_nlargefree++;
- c->local_largefree += size;
- } else {
- // Small object.
- if(size > 2*sizeof(uintptr))
- ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed"
- else if(size > sizeof(uintptr))
- ((uintptr*)v)[1] = 0;
- // Must mark v freed before calling MCache_Free:
- // it might coalesce v and other blocks into a bigger span
- // and change the bitmap further.
- c->local_nsmallfree[sizeclass]++;
- c->local_cachealloc -= size;
- if(c->alloc[sizeclass] == s) {
- // We own the span, so we can just add v to the freelist
- runtime_markfreed(v);
- ((MLink*)v)->next = s->freelist;
- s->freelist = v;
- s->ref--;
- } else {
- // Someone else owns this span. Add to free queue.
- runtime_MCache_Free(c, v, sizeclass, size);
- }
- }
- m->mallocing = 0;
-}
-
int32
runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
{
@@ -475,9 +367,9 @@ runtime_purgecachedstats(MCache *c)
// Protected by either heap or GC lock.
h = &runtime_mheap;
- mstats.heap_alloc += c->local_cachealloc;
+ mstats()->heap_alloc += (intptr)c->local_cachealloc;
c->local_cachealloc = 0;
- mstats.nlookup += c->local_nlookup;
+ mstats()->nlookup += c->local_nlookup;
c->local_nlookup = 0;
h->largefree += c->local_largefree;
c->local_largefree = 0;
@@ -489,13 +381,6 @@ runtime_purgecachedstats(MCache *c)
}
}
-extern uintptr runtime_sizeof_C_MStats
- __asm__ (GOSYM_PREFIX "runtime.Sizeof_C_MStats");
-
-// Size of the trailing by_size array differs between Go and C,
-// NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
-// sizeof_C_MStats is what C thinks about size of Go struct.
-
// Initialized in mallocinit because it's defined in go/runtime/mem.go.
#define MaxArena32 (2U<<30)
@@ -511,8 +396,6 @@ runtime_mallocinit(void)
uint64 i;
bool reserved;
- runtime_sizeof_C_MStats = sizeof(MStats) - (NumSizeClasses - 61) * sizeof(mstats.by_size[0]);
-
p = nil;
p_size = 0;
arena_size = 0;
@@ -644,9 +527,6 @@ runtime_mallocinit(void)
// Initialize the rest of the allocator.
runtime_MHeap_Init(&runtime_mheap);
runtime_m()->mcache = runtime_allocmcache();
-
- // See if it works.
- runtime_free(runtime_malloc(TinySize));
}
void*
@@ -688,7 +568,7 @@ runtime_MHeap_SysAlloc(MHeap *h, uintptr n)
if(n <= (uintptr)(h->arena_end - h->arena_used)) {
// Keep taking from our reservation.
p = h->arena_used;
- runtime_SysMap(p, n, h->arena_reserved, &mstats.heap_sys);
+ runtime_SysMap(p, n, h->arena_reserved, &mstats()->heap_sys);
h->arena_used += n;
runtime_MHeap_MapBits(h);
runtime_MHeap_MapSpans(h);
@@ -706,14 +586,14 @@ runtime_MHeap_SysAlloc(MHeap *h, uintptr n)
// try to get memory at a location chosen by the OS
// and hope that it is in the range we allocated bitmap for.
p_size = ROUND(n, PageSize) + PageSize;
- p = runtime_SysAlloc(p_size, &mstats.heap_sys);
+ p = runtime_SysAlloc(p_size, &mstats()->heap_sys);
if(p == nil)
return nil;
if(p < h->arena_start || (uintptr)(p+p_size - h->arena_start) >= MaxArena32) {
runtime_printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
p, h->arena_start, h->arena_start+MaxArena32);
- runtime_SysFree(p, p_size, &mstats.heap_sys);
+ runtime_SysFree(p, p_size, &mstats()->heap_sys);
return nil;
}
@@ -766,7 +646,7 @@ runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat)
runtime_lock(&persistent);
persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align);
if(persistent.pos + size > persistent.end) {
- persistent.pos = runtime_SysAlloc(PersistentAllocChunk, &mstats.other_sys);
+ persistent.pos = runtime_SysAlloc(PersistentAllocChunk, &mstats()->other_sys);
if(persistent.pos == nil) {
runtime_unlock(&persistent);
runtime_throw("runtime: cannot allocate memory");
@@ -776,10 +656,10 @@ runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat)
p = persistent.pos;
persistent.pos += size;
runtime_unlock(&persistent);
- if(stat != &mstats.other_sys) {
+ if(stat != &mstats()->other_sys) {
// reaccount the allocation against provided stat
runtime_xadd64(stat, size);
- runtime_xadd64(&mstats.other_sys, -(uint64)size);
+ runtime_xadd64(&mstats()->other_sys, -(uint64)size);
}
return p;
}
@@ -893,30 +773,30 @@ runtime_mal(uintptr n)
}
func new(typ *Type) (ret *uint8) {
- ret = runtime_mallocgc(typ->__size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&KindNoPointers ? FlagNoScan : 0);
+ ret = runtime_mallocgc(typ->__size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&kindNoPointers ? FlagNoScan : 0);
}
static void*
-cnew(const Type *typ, intgo n, int32 objtyp)
+runtime_docnew(const Type *typ, intgo n, int32 objtyp)
{
if((objtyp&(PtrSize-1)) != objtyp)
runtime_throw("runtime: invalid objtyp");
if(n < 0 || (typ->__size > 0 && (uintptr)n > (MaxMem/typ->__size)))
runtime_panicstring("runtime: allocation size out of range");
- return runtime_mallocgc(typ->__size*n, (uintptr)typ | objtyp, typ->kind&KindNoPointers ? FlagNoScan : 0);
+ return runtime_mallocgc(typ->__size*n, (uintptr)typ | objtyp, typ->kind&kindNoPointers ? FlagNoScan : 0);
}
// same as runtime_new, but callable from C
void*
runtime_cnew(const Type *typ)
{
- return cnew(typ, 1, TypeInfo_SingleObject);
+ return runtime_docnew(typ, 1, TypeInfo_SingleObject);
}
void*
runtime_cnewarray(const Type *typ, intgo n)
{
- return cnew(typ, n, TypeInfo_Array);
+ return runtime_docnew(typ, n, TypeInfo_Array);
}
func GC() {
@@ -930,15 +810,15 @@ func SetFinalizer(obj Eface, finalizer Eface) {
const Type *fint;
const PtrType *ot;
- if(obj.__type_descriptor == nil) {
+ if((Type*)obj._type == nil) {
runtime_printf("runtime.SetFinalizer: first argument is nil interface\n");
goto throw;
}
- if((obj.__type_descriptor->kind&kindMask) != GO_PTR) {
- runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.__type_descriptor->__reflection);
+ if((((Type*)obj._type)->kind&kindMask) != GO_PTR) {
+ runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *((Type*)obj._type)->__reflection);
goto throw;
}
- ot = (const PtrType*)obj.type;
+ ot = (const PtrType*)obj._type;
// As an implementation detail we do not run finalizers for zero-sized objects,
// because we use &runtime_zerobase for all such allocations.
if(ot->__element_type != nil && ot->__element_type->__size == 0)
@@ -950,49 +830,53 @@ func SetFinalizer(obj Eface, finalizer Eface) {
// runtime.SetFinalizer(Foo, nil)
// }
// See issue 7656.
- if((byte*)obj.__object < runtime_mheap.arena_start || runtime_mheap.arena_used <= (byte*)obj.__object)
+ if((byte*)obj.data < runtime_mheap.arena_start || runtime_mheap.arena_used <= (byte*)obj.data)
return;
- if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) {
+ if(!runtime_mlookup(obj.data, &base, &size, nil) || obj.data != base) {
// As an implementation detail we allow to set finalizers for an inner byte
// of an object if it could come from tiny alloc (see mallocgc for details).
- if(ot->__element_type == nil || (ot->__element_type->kind&KindNoPointers) == 0 || ot->__element_type->__size >= TinySize) {
- runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block (%p)\n", obj.__object);
+ if(ot->__element_type == nil || (ot->__element_type->kind&kindNoPointers) == 0 || ot->__element_type->__size >= TinySize) {
+ runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block (%p)\n", obj.data);
goto throw;
}
}
- if(finalizer.__type_descriptor != nil) {
+ if((Type*)finalizer._type != nil) {
runtime_createfing();
- if((finalizer.__type_descriptor->kind&kindMask) != GO_FUNC)
+ if((((Type*)finalizer._type)->kind&kindMask) != GO_FUNC)
goto badfunc;
- ft = (const FuncType*)finalizer.__type_descriptor;
+ ft = (const FuncType*)finalizer._type;
if(ft->__dotdotdot || ft->__in.__count != 1)
goto badfunc;
fint = *(Type**)ft->__in.__values;
- if(__go_type_descriptors_equal(fint, obj.__type_descriptor)) {
+ if(__go_type_descriptors_equal(fint, (Type*)obj._type)) {
// ok - same type
- } else if((fint->kind&kindMask) == GO_PTR && (fint->__uncommon == nil || fint->__uncommon->__name == nil || obj.type->__uncommon == nil || obj.type->__uncommon->__name == nil) && __go_type_descriptors_equal(((const PtrType*)fint)->__element_type, ((const PtrType*)obj.type)->__element_type)) {
+ } else if((fint->kind&kindMask) == GO_PTR && (fint->__uncommon == nil || fint->__uncommon->__name == nil || ((Type*)obj._type)->__uncommon == nil || ((Type*)obj._type)->__uncommon->__name == nil) && __go_type_descriptors_equal(((const PtrType*)fint)->__element_type, ((const PtrType*)obj._type)->__element_type)) {
// ok - not same type, but both pointers,
// one or the other is unnamed, and same element type, so assignable.
} else if((fint->kind&kindMask) == GO_INTERFACE && ((const InterfaceType*)fint)->__methods.__count == 0) {
// ok - satisfies empty interface
- } else if((fint->kind&kindMask) == GO_INTERFACE && __go_convert_interface_2(fint, obj.__type_descriptor, 1) != nil) {
+ } else if((fint->kind&kindMask) == GO_INTERFACE && getitab(fint, (Type*)obj._type, true) != nil) {
// ok - satisfies non-empty interface
} else
goto badfunc;
- ot = (const PtrType*)obj.__type_descriptor;
- if(!runtime_addfinalizer(obj.__object, *(FuncVal**)finalizer.__object, ft, ot)) {
+ ot = (const PtrType*)obj._type;
+ if(!runtime_addfinalizer(obj.data, *(FuncVal**)finalizer.data, ft, ot)) {
runtime_printf("runtime.SetFinalizer: finalizer already set\n");
goto throw;
}
} else {
// NOTE: asking to remove a finalizer when there currently isn't one set is OK.
- runtime_removefinalizer(obj.__object);
+ runtime_removefinalizer(obj.data);
}
return;
badfunc:
- runtime_printf("runtime.SetFinalizer: cannot pass %S to finalizer %S\n", *obj.__type_descriptor->__reflection, *finalizer.__type_descriptor->__reflection);
+ runtime_printf("runtime.SetFinalizer: cannot pass %S to finalizer %S\n", *((Type*)obj._type)->__reflection, *((Type*)finalizer._type)->__reflection);
throw:
runtime_throw("runtime.SetFinalizer");
}
+
+func KeepAlive(x Eface) {
+ USED(x);
+}
diff --git a/libgo/runtime/malloc.h b/libgo/runtime/malloc.h
index 065f74a9b5..00e4166d81 100644
--- a/libgo/runtime/malloc.h
+++ b/libgo/runtime/malloc.h
@@ -82,11 +82,11 @@
typedef struct MCentral MCentral;
typedef struct MHeap MHeap;
-typedef struct MSpan MSpan;
-typedef struct MStats MStats;
-typedef struct MLink MLink;
-typedef struct MTypes MTypes;
-typedef struct GCStats GCStats;
+typedef struct mspan MSpan;
+typedef struct mstats MStats;
+typedef struct mlink MLink;
+typedef struct mtypes MTypes;
+typedef struct gcstats GCStats;
enum
{
@@ -100,10 +100,10 @@ enum
{
// Computed constant. The definition of MaxSmallSize and the
// algorithm in msize.c produce some number of different allocation
- // size classes. NumSizeClasses is that number. It's needed here
+ // size classes. _NumSizeClasses is that number. It's needed here
// because there are static arrays of this length; when msize runs its
// size choosing algorithm it double-checks that NumSizeClasses agrees.
- NumSizeClasses = 67,
+ // _NumSizeClasses is defined in runtime2.go as 67.
// Tunable constants.
MaxSmallSize = 32<<10,
@@ -132,12 +132,6 @@ enum
#else
MHeapMap_Bits = 32 - PageShift,
#endif
-
- // Max number of threads to run garbage collection.
- // 2, 3, and 4 are all plausible maximums depending
- // on the hardware details of the machine. The garbage
- // collector scales well to 8 cpus.
- MaxGcproc = 8,
};
// Maximum memory allocation size, a hint for callers.
@@ -148,13 +142,6 @@ enum
#else
#define MaxMem ((uintptr)-1)
#endif
-
-// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
-struct MLink
-{
- MLink *next;
-};
-
// SysAlloc obtains a large chunk of zeroed memory from the
// operating system, typically on the order of a hundred kilobytes
// or a megabyte.
@@ -191,8 +178,10 @@ struct MLink
// SysFault marks a (already SysAlloc'd) region to fault
// if accessed. Used only for debugging the runtime.
-void* runtime_SysAlloc(uintptr nbytes, uint64 *stat);
-void runtime_SysFree(void *v, uintptr nbytes, uint64 *stat);
+void* runtime_SysAlloc(uintptr nbytes, uint64 *stat)
+ __asm__ (GOSYM_PREFIX "runtime.sysAlloc");
+void runtime_SysFree(void *v, uintptr nbytes, uint64 *stat)
+ __asm__ (GOSYM_PREFIX "runtime.sysFree");
void runtime_SysUnused(void *v, uintptr nbytes);
void runtime_SysUsed(void *v, uintptr nbytes);
void runtime_SysMap(void *v, uintptr nbytes, bool reserved, uint64 *stat);
@@ -223,68 +212,15 @@ void runtime_FixAlloc_Init(FixAlloc *f, uintptr size, void (*first)(void*, byte*
void* runtime_FixAlloc_Alloc(FixAlloc *f);
void runtime_FixAlloc_Free(FixAlloc *f, void *p);
-
-// Statistics.
-// Shared with Go: if you edit this structure, also edit type MemStats in mem.go.
-struct MStats
-{
- // General statistics.
- uint64 alloc; // bytes allocated and still in use
- uint64 total_alloc; // bytes allocated (even if freed)
- uint64 sys; // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
- uint64 nlookup; // number of pointer lookups
- uint64 nmalloc; // number of mallocs
- uint64 nfree; // number of frees
-
- // Statistics about malloc heap.
- // protected by mheap.Lock
- uint64 heap_alloc; // bytes allocated and still in use
- uint64 heap_sys; // bytes obtained from system
- uint64 heap_idle; // bytes in idle spans
- uint64 heap_inuse; // bytes in non-idle spans
- uint64 heap_released; // bytes released to the OS
- uint64 heap_objects; // total number of allocated objects
-
- // Statistics about allocation of low-level fixed-size structures.
- // Protected by FixAlloc locks.
- uint64 stacks_inuse; // bootstrap stacks
- uint64 stacks_sys;
- uint64 mspan_inuse; // MSpan structures
- uint64 mspan_sys;
- uint64 mcache_inuse; // MCache structures
- uint64 mcache_sys;
- uint64 buckhash_sys; // profiling bucket hash table
- uint64 gc_sys;
- uint64 other_sys;
-
- // Statistics about garbage collector.
- // Protected by mheap or stopping the world during GC.
- uint64 next_gc; // next GC (in heap_alloc time)
- uint64 last_gc; // last GC (in absolute time)
- uint64 pause_total_ns;
- uint64 pause_ns[256];
- uint64 pause_end[256];
- uint32 numgc;
- float64 gc_cpu_fraction;
- bool enablegc;
- bool debuggc;
-
- // Statistics about allocation size classes.
- struct {
- uint32 size;
- uint64 nmalloc;
- uint64 nfree;
- } by_size[NumSizeClasses];
-};
-
-extern MStats mstats
- __asm__ (GOSYM_PREFIX "runtime.memStats");
-void runtime_updatememstats(GCStats *stats);
+extern MStats *mstats(void)
+ __asm__ (GOSYM_PREFIX "runtime.getMstats");
+void runtime_updatememstats(GCStats *stats)
+ __asm__ (GOSYM_PREFIX "runtime.updatememstats");
// Size classes. Computed and initialized by InitSizes.
//
// SizeToClass(0 <= n <= MaxSmallSize) returns the size class,
-// 1 <= sizeclass < NumSizeClasses, for n.
+// 1 <= sizeclass < _NumSizeClasses, for n.
// Size class 0 is reserved to mean "not small".
//
// class_to_size[i] = largest size in class i
@@ -292,42 +228,16 @@ void runtime_updatememstats(GCStats *stats);
// making new objects in class i
int32 runtime_SizeToClass(int32);
-uintptr runtime_roundupsize(uintptr);
-extern int32 runtime_class_to_size[NumSizeClasses];
-extern int32 runtime_class_to_allocnpages[NumSizeClasses];
+uintptr runtime_roundupsize(uintptr)
+ __asm__(GOSYM_PREFIX "runtime.roundupsize");
+extern int32 runtime_class_to_size[_NumSizeClasses];
+extern int32 runtime_class_to_allocnpages[_NumSizeClasses];
extern int8 runtime_size_to_class8[1024/8 + 1];
extern int8 runtime_size_to_class128[(MaxSmallSize-1024)/128 + 1];
extern void runtime_InitSizes(void);
-typedef struct MCacheList MCacheList;
-struct MCacheList
-{
- MLink *list;
- uint32 nlist;
-};
-
-// Per-thread (in Go, per-P) cache for small objects.
-// No locking needed because it is per-thread (per-P).
-struct MCache
-{
- // The following members are accessed on every malloc,
- // so they are grouped here for better caching.
- int32 next_sample; // trigger heap sample after allocating this many bytes
- intptr local_cachealloc; // bytes allocated (or freed) from cache since last lock of heap
- // Allocator cache for tiny objects w/o pointers.
- // See "Tiny allocator" comment in malloc.goc.
- byte* tiny;
- uintptr tinysize;
- // The rest is not accessed on every malloc.
- MSpan* alloc[NumSizeClasses]; // spans to allocate from
- MCacheList free[NumSizeClasses];// lists of explicitly freed objects
- // Local allocator stats, flushed during GC.
- uintptr local_nlookup; // number of pointer lookups
- uintptr local_largefree; // bytes freed for large objects (>MaxSmallSize)
- uintptr local_nlargefree; // number of frees for large objects (>MaxSmallSize)
- uintptr local_nsmallfree[NumSizeClasses]; // number of frees for small objects (<=MaxSmallSize)
-};
+typedef struct mcachelist MCacheList;
MSpan* runtime_MCache_Refill(MCache *c, int32 sizeclass);
void runtime_MCache_Free(MCache *c, MLink *p, int32 sizeclass, uintptr size);
@@ -364,11 +274,6 @@ enum
MTypes_Words = 2,
MTypes_Bytes = 3,
};
-struct MTypes
-{
- byte compression; // one of MTypes_*
- uintptr data;
-};
enum
{
@@ -380,13 +285,7 @@ enum
// if that happens.
};
-typedef struct Special Special;
-struct Special
-{
- Special* next; // linked list in span
- uint16 offset; // span offset of object
- byte kind; // kind of Special
-};
+typedef struct special Special;
// The described object has a finalizer set for it.
typedef struct SpecialFinalizer SpecialFinalizer;
@@ -399,7 +298,7 @@ struct SpecialFinalizer
};
// The described object is being heap profiled.
-typedef struct Bucket Bucket; // from mprof.goc
+typedef struct bucket Bucket; // from mprof.go
typedef struct SpecialProfile SpecialProfile;
struct SpecialProfile
{
@@ -415,33 +314,6 @@ enum
MSpanListHead,
MSpanDead,
};
-struct MSpan
-{
- MSpan *next; // in a span linked list
- MSpan *prev; // in a span linked list
- PageID start; // starting page number
- uintptr npages; // number of pages in span
- MLink *freelist; // list of free objects
- // sweep generation:
- // if sweepgen == h->sweepgen - 2, the span needs sweeping
- // if sweepgen == h->sweepgen - 1, the span is currently being swept
- // if sweepgen == h->sweepgen, the span is swept and ready to use
- // h->sweepgen is incremented by 2 after every GC
- uint32 sweepgen;
- uint16 ref; // capacity - number of objects in freelist
- uint8 sizeclass; // size class
- bool incache; // being used by an MCache
- uint8 state; // MSpanInUse etc
- uint8 needzero; // needs to be zeroed before allocation
- uintptr elemsize; // computed from sizeclass or from npages
- int64 unusedsince; // First time spotted by GC in MSpanFree state
- uintptr npreleased; // number of pages released to the OS
- byte *limit; // end of data in span
- MTypes types; // types of allocated objects in this span
- Lock specialLock; // guards specials list
- Special *specials; // linked list of special records sorted by offset.
- MLink *freebuf; // objects freed explicitly, not incorporated into freelist yet
-};
void runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages);
void runtime_MSpan_EnsureSwept(MSpan *span);
@@ -463,7 +335,7 @@ struct MCentral
Lock;
int32 sizeclass;
MSpan nonempty; // list of spans with a free object
- MSpan empty; // list of spans with no free objects (or cached in an MCache)
+ MSpan mempty; // list of spans with no free objects (or cached in an MCache)
int32 nfree; // # of objects available in nonempty spans
};
@@ -509,7 +381,7 @@ struct MHeap
struct {
MCentral;
byte pad[64];
- } central[NumSizeClasses];
+ } central[_NumSizeClasses];
FixAlloc spanalloc; // allocator for Span*
FixAlloc cachealloc; // allocator for MCache*
@@ -520,7 +392,7 @@ struct MHeap
// Malloc stats.
uint64 largefree; // bytes freed for large objects (>MaxSmallSize)
uint64 nlargefree; // number of frees for large objects (>MaxSmallSize)
- uint64 nsmallfree[NumSizeClasses]; // number of frees for small objects (<=MaxSmallSize)
+ uint64 nsmallfree[_NumSizeClasses]; // number of frees for small objects (<=MaxSmallSize)
};
extern MHeap runtime_mheap;
@@ -537,7 +409,8 @@ void runtime_MHeap_Scavenger(void*);
void runtime_MHeap_SplitSpan(MHeap *h, MSpan *s);
void* runtime_mallocgc(uintptr size, uintptr typ, uint32 flag);
-void* runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat);
+void* runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat)
+ __asm__(GOSYM_PREFIX "runtime.persistentalloc");
int32 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s);
void runtime_gc(int32 force);
uintptr runtime_sweepone(void);
@@ -550,11 +423,16 @@ extern int32 runtime_checking;
void runtime_markspan(void *v, uintptr size, uintptr n, bool leftover);
void runtime_unmarkspan(void *v, uintptr size);
void runtime_purgecachedstats(MCache*);
-void* runtime_cnew(const Type*);
-void* runtime_cnewarray(const Type*, intgo);
-void runtime_tracealloc(void*, uintptr, uintptr);
-void runtime_tracefree(void*, uintptr);
-void runtime_tracegc(void);
+void* runtime_cnew(const Type*)
+ __asm__(GOSYM_PREFIX "runtime.newobject");
+void* runtime_cnewarray(const Type*, intgo)
+ __asm__(GOSYM_PREFIX "runtime.newarray");
+void runtime_tracealloc(void*, uintptr, uintptr)
+ __asm__ (GOSYM_PREFIX "runtime.tracealloc");
+void runtime_tracefree(void*, uintptr)
+ __asm__ (GOSYM_PREFIX "runtime.tracefree");
+void runtime_tracegc(void)
+ __asm__ (GOSYM_PREFIX "runtime.tracegc");
uintptr runtime_gettype(void*);
@@ -576,19 +454,28 @@ struct Obj
uintptr ti; // type info
};
-void runtime_MProf_Malloc(void*, uintptr);
-void runtime_MProf_Free(Bucket*, uintptr, bool);
-void runtime_MProf_GC(void);
-void runtime_iterate_memprof(void (*callback)(Bucket*, uintptr, Location*, uintptr, uintptr, uintptr));
-int32 runtime_gcprocs(void);
-void runtime_helpgc(int32 nproc);
-void runtime_gchelper(void);
+void runtime_MProf_Malloc(void*, uintptr)
+ __asm__ (GOSYM_PREFIX "runtime.mProf_Malloc");
+void runtime_MProf_Free(Bucket*, uintptr, bool)
+ __asm__ (GOSYM_PREFIX "runtime.mProf_Free");
+void runtime_MProf_GC(void)
+ __asm__ (GOSYM_PREFIX "runtime.mProf_GC");
+void runtime_iterate_memprof(FuncVal* callback)
+ __asm__ (GOSYM_PREFIX "runtime.iterate_memprof");
+int32 runtime_gcprocs(void)
+ __asm__ (GOSYM_PREFIX "runtime.gcprocs");
+void runtime_helpgc(int32 nproc)
+ __asm__ (GOSYM_PREFIX "runtime.helpgc");
+void runtime_gchelper(void)
+ __asm__ (GOSYM_PREFIX "runtime.gchelper");
void runtime_createfing(void);
-G* runtime_wakefing(void);
+G* runtime_wakefing(void)
+ __asm__ (GOSYM_PREFIX "runtime.wakefing");
extern bool runtime_fingwait;
extern bool runtime_fingwake;
-void runtime_setprofilebucket(void *p, Bucket *b);
+void runtime_setprofilebucket(void *p, Bucket *b)
+ __asm__ (GOSYM_PREFIX "runtime.setprofilebucket");
struct __go_func_type;
struct __go_ptr_type;
@@ -647,14 +534,11 @@ void runtime_gc_g_ptr(Eface*);
void runtime_gc_itab_ptr(Eface*);
void runtime_memorydump(void);
-int32 runtime_setgcpercent(int32);
+int32 runtime_setgcpercent(int32)
+ __asm__ (GOSYM_PREFIX "runtime.setgcpercent");
// Value we use to mark dead pointers when GODEBUG=gcdead=1.
#define PoisonGC ((uintptr)0xf969696969696969ULL)
#define PoisonStack ((uintptr)0x6868686868686868ULL)
struct Workbuf;
-void runtime_MProf_Mark(struct Workbuf**, void (*)(struct Workbuf**, Obj));
-void runtime_proc_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
-void runtime_time_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
-void runtime_netpoll_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
diff --git a/libgo/runtime/map.goc b/libgo/runtime/map.goc
deleted file mode 100644
index e4b8456dc3..0000000000
--- a/libgo/runtime/map.goc
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-#include "runtime.h"
-#include "map.h"
-
-typedef struct __go_map Hmap;
-typedef struct __go_hash_iter hiter;
-
-/* Access a value in a map, returning a value and a presence indicator. */
-
-func mapaccess2(t *MapType, h *Hmap, key *byte, val *byte) (present bool) {
- byte *mapval;
- size_t valsize;
-
- mapval = __go_map_index(h, key, 0);
- valsize = t->__val_type->__size;
- if (mapval == nil) {
- __builtin_memset(val, 0, valsize);
- present = 0;
- } else {
- __builtin_memcpy(val, mapval, valsize);
- present = 1;
- }
-}
-
-/* Optionally assign a value to a map (m[k] = v, p). */
-
-func mapassign2(h *Hmap, key *byte, val *byte, p bool) {
- if (!p) {
- __go_map_delete(h, key);
- } else {
- byte *mapval;
- size_t valsize;
-
- mapval = __go_map_index(h, key, 1);
- valsize = h->__descriptor->__map_descriptor->__val_type->__size;
- __builtin_memcpy(mapval, val, valsize);
- }
-}
-
-/* Delete a key from a map. */
-
-func mapdelete(h *Hmap, key *byte) {
- __go_map_delete(h, key);
-}
-
-/* Initialize a range over a map. */
-
-func mapiterinit(h *Hmap, it *hiter) {
- __go_mapiterinit(h, it);
-}
-
-/* Move to the next iteration, updating *HITER. */
-
-func mapiternext(it *hiter) {
- __go_mapiternext(it);
-}
-
-/* Get the key of the current iteration. */
-
-func mapiter1(it *hiter, key *byte) {
- __go_mapiter1(it, key);
-}
-
-/* Get the key and value of the current iteration. */
-
-func mapiter2(it *hiter, key *byte, val *byte) {
- __go_mapiter2(it, key, val);
-}
diff --git a/libgo/runtime/map.h b/libgo/runtime/map.h
deleted file mode 100644
index 0c587bb2af..0000000000
--- a/libgo/runtime/map.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* map.h -- the map type for Go.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include "go-type.h"
-
-/* A map descriptor is what we need to manipulate the map. This is
- constant for a given map type. */
-
-struct __go_map_descriptor
-{
- /* A pointer to the type descriptor for the type of the map itself. */
- const struct __go_map_type *__map_descriptor;
-
- /* A map entry is a struct with three fields:
- map_entry_type *next_entry;
- key_type key;
- value_type value;
- This is the size of that struct. */
- uintptr_t __entry_size;
-
- /* The offset of the key field in a map entry struct. */
- uintptr_t __key_offset;
-
- /* The offset of the value field in a map entry struct (the value
- field immediately follows the key field, but there may be some
- bytes inserted for alignment). */
- uintptr_t __val_offset;
-};
-
-struct __go_map
-{
- /* The constant descriptor for this map. */
- const struct __go_map_descriptor *__descriptor;
-
- /* The number of elements in the hash table. */
- uintptr_t __element_count;
-
- /* The number of entries in the __buckets array. */
- uintptr_t __bucket_count;
-
- /* Each bucket is a pointer to a linked list of map entries. */
- void **__buckets;
-};
-
-/* For a map iteration the compiled code will use a pointer to an
- iteration structure. The iteration structure will be allocated on
- the stack. The Go code must allocate at least enough space. */
-
-struct __go_hash_iter
-{
- /* A pointer to the current entry. This will be set to NULL when
- the range has completed. The Go will test this field, so it must
- be the first one in the structure. */
- const void *entry;
- /* The map we are iterating over. */
- const struct __go_map *map;
- /* A pointer to the next entry in the current bucket. This permits
- deleting the current entry. This will be NULL when we have seen
- all the entries in the current bucket. */
- const void *next_entry;
- /* The bucket index of the current and next entry. */
- uintptr_t bucket;
-};
-
-extern struct __go_map *__go_new_map (const struct __go_map_descriptor *,
- uintptr_t);
-
-extern uintptr_t __go_map_next_prime (uintptr_t);
-
-extern void *__go_map_index (struct __go_map *, const void *, _Bool);
-
-extern void __go_map_delete (struct __go_map *, const void *);
-
-extern void __go_mapiterinit (const struct __go_map *, struct __go_hash_iter *);
-
-extern void __go_mapiternext (struct __go_hash_iter *);
-
-extern void __go_mapiter1 (struct __go_hash_iter *it, unsigned char *key);
-
-extern void __go_mapiter2 (struct __go_hash_iter *it, unsigned char *key,
- unsigned char *val);
diff --git a/libgo/runtime/mcache.c b/libgo/runtime/mcache.c
index 746711a0d3..46684bc824 100644
--- a/libgo/runtime/mcache.c
+++ b/libgo/runtime/mcache.c
@@ -27,7 +27,7 @@ runtime_allocmcache(void)
c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
runtime_unlock(&runtime_mheap);
runtime_memclr((byte*)c, sizeof(*c));
- for(i = 0; i < NumSizeClasses; i++)
+ for(i = 0; i < _NumSizeClasses; i++)
c->alloc[i] = &emptymspan;
// Set first allocation sample size.
@@ -35,7 +35,7 @@ runtime_allocmcache(void)
if(rate > 0x3fffffff) // make 2*rate not overflow
rate = 0x3fffffff;
if(rate != 0)
- c->next_sample = runtime_fastrand1() % (2*rate);
+ c->next_sample = runtime_fastrand() % (2*rate);
return c;
}
@@ -115,7 +115,7 @@ runtime_MCache_ReleaseAll(MCache *c)
MSpan *s;
MCacheList *l;
- for(i=0; i<NumSizeClasses; i++) {
+ for(i=0; i<_NumSizeClasses; i++) {
s = c->alloc[i];
if(s != &emptymspan) {
runtime_MCentral_UncacheSpan(&runtime_mheap.central[i], s);
diff --git a/libgo/runtime/mcentral.c b/libgo/runtime/mcentral.c
index e41a83fbf0..491cac5330 100644
--- a/libgo/runtime/mcentral.c
+++ b/libgo/runtime/mcentral.c
@@ -8,7 +8,7 @@
//
// The MCentral doesn't actually contain the list of free objects; the MSpan does.
// Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
-// and those that are completely allocated (c->empty).
+// and those that are completely allocated (c->mempty).
//
// TODO(rsc): tcmalloc uses a "transfer cache" to split the list
// into sections of class_to_transfercount[sizeclass] objects
@@ -28,7 +28,7 @@ runtime_MCentral_Init(MCentral *c, int32 sizeclass)
{
c->sizeclass = sizeclass;
runtime_MSpanList_Init(&c->nonempty);
- runtime_MSpanList_Init(&c->empty);
+ runtime_MSpanList_Init(&c->mempty);
}
// Allocate a span to use in an MCache.
@@ -58,13 +58,13 @@ retry:
goto havespan;
}
- for(s = c->empty.next; s != &c->empty; s = s->next) {
+ for(s = c->mempty.next; s != &c->mempty; s = s->next) {
if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
// we have an empty span that requires sweeping,
// sweep it and see if we can free some space in it
runtime_MSpanList_Remove(s);
// swept spans are at the end of the list
- runtime_MSpanList_InsertBack(&c->empty, s);
+ runtime_MSpanList_InsertBack(&c->mempty, s);
runtime_unlock(c);
runtime_MSpan_Sweep(s);
runtime_lock(c);
@@ -96,7 +96,7 @@ havespan:
runtime_throw("freelist empty");
c->nfree -= n;
runtime_MSpanList_Remove(s);
- runtime_MSpanList_InsertBack(&c->empty, s);
+ runtime_MSpanList_InsertBack(&c->mempty, s);
s->incache = true;
runtime_unlock(c);
return s;
@@ -272,7 +272,7 @@ MCentral_Grow(MCentral *c)
// Carve span into sequence of blocks.
tailp = &s->freelist;
p = (byte*)(s->start << PageShift);
- s->limit = p + size*n;
+ s->limit = (uintptr)(p + size*n);
for(i=0; i<n; i++) {
v = (MLink*)p;
*tailp = v;
diff --git a/libgo/runtime/mem_posix_memalign.c b/libgo/runtime/mem_posix_memalign.c
index 8acdf07057..853b5c7ae8 100644
--- a/libgo/runtime/mem_posix_memalign.c
+++ b/libgo/runtime/mem_posix_memalign.c
@@ -9,7 +9,7 @@ runtime_SysAlloc(uintptr n)
{
void *p;
- mstats.sys += n;
+ mstats()->sys += n;
errno = posix_memalign(&p, PageSize, n);
if (errno > 0) {
perror("posix_memalign");
@@ -29,7 +29,7 @@ runtime_SysUnused(void *v, uintptr n)
void
runtime_SysFree(void *v, uintptr n)
{
- mstats.sys -= n;
+ mstats()->sys -= n;
free(v);
}
diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c
index d7d0b27ba9..fc5424149e 100644
--- a/libgo/runtime/mgc0.c
+++ b/libgo/runtime/mgc0.c
@@ -7,7 +7,7 @@
// GC is:
// - mark&sweep
// - mostly precise (with the exception of some C-allocated objects, assembly frames/arguments, etc)
-// - parallel (up to MaxGcproc threads)
+// - parallel (up to _MaxGcproc threads)
// - partially concurrent (mark is stop-the-world, while sweep is concurrent)
// - non-moving/non-compacting
// - full (non-partial)
@@ -56,22 +56,16 @@
#include "arch.h"
#include "malloc.h"
#include "mgc0.h"
-#include "chan.h"
#include "go-type.h"
// Map gccgo field names to gc field names.
// Slice aka __go_open_array.
#define array __values
#define cap __capacity
-// Iface aka __go_interface
-#define tab __methods
// Hmap aka __go_map
typedef struct __go_map Hmap;
// Type aka __go_type_descriptor
#define string __reflection
-#define KindPtr GO_PTR
-#define KindNoPointers GO_NO_POINTERS
-#define kindMask GO_CODE_MASK
// PtrType aka __go_ptr_type
#define elem __element_type
@@ -130,6 +124,7 @@ clearpools(void)
{
P *p, **pp;
MCache *c;
+ Defer *d, *dlink;
// clear sync.Pool's
if(poolcleanup != nil) {
@@ -144,25 +139,18 @@ clearpools(void)
c->tiny = nil;
c->tinysize = 0;
}
- // clear defer pools
- p->deferpool = nil;
}
-}
-// Holding worldsema grants an M the right to try to stop the world.
-// The procedure is:
-//
-// runtime_semacquire(&runtime_worldsema);
-// m->gcing = 1;
-// runtime_stoptheworld();
-//
-// ... do stuff ...
-//
-// m->gcing = 0;
-// runtime_semrelease(&runtime_worldsema);
-// runtime_starttheworld();
-//
-uint32 runtime_worldsema = 1;
+ // Clear central defer pools.
+ // Leave per-P pools alone, they have strictly bounded size.
+ runtime_lock(&runtime_sched->deferlock);
+ for(d = runtime_sched->deferpool; d != nil; d = dlink) {
+ dlink = d->link;
+ d->link = nil;
+ }
+ runtime_sched->deferpool = nil;
+ runtime_unlock(&runtime_sched->deferlock);
+}
typedef struct Workbuf Workbuf;
struct Workbuf
@@ -216,7 +204,7 @@ static void addstackroots(G *gp, Workbuf **wbufp);
static struct {
uint64 full; // lock-free list of full blocks
- uint64 empty; // lock-free list of empty blocks
+ uint64 wempty; // lock-free list of empty blocks
byte pad0[CacheLineSize]; // prevents false-sharing between full/empty and nproc/nwait
uint32 nproc;
int64 tstart;
@@ -321,7 +309,7 @@ markonly(const void *obj)
x = k;
x -= (uintptr)runtime_mheap.arena_start>>PageShift;
s = runtime_mheap.spans[x];
- if(s == nil || k < s->start || (const byte*)obj >= s->limit || s->state != MSpanInUse)
+ if(s == nil || k < s->start || (uintptr)obj >= s->limit || s->state != MSpanInUse)
return false;
p = (byte*)((uintptr)s->start<<PageShift);
if(s->sizeclass == 0) {
@@ -401,7 +389,7 @@ struct BufferList
uint32 busy;
byte pad[CacheLineSize];
};
-static BufferList bufferList[MaxGcproc];
+static BufferList bufferList[_MaxGcproc];
static void enqueue(Obj obj, Workbuf **_wbuf, Obj **_wp, uintptr *_nobj);
@@ -517,7 +505,7 @@ flushptrbuf(Scanbuf *sbuf)
x = k;
x -= (uintptr)arena_start>>PageShift;
s = runtime_mheap.spans[x];
- if(s == nil || k < s->start || obj >= s->limit || s->state != MSpanInUse)
+ if(s == nil || k < s->start || (uintptr)obj >= s->limit || s->state != MSpanInUse)
continue;
p = (byte*)((uintptr)s->start<<PageShift);
if(s->sizeclass == 0) {
@@ -651,8 +639,8 @@ static uintptr defaultProg[2] = {PtrSize, GC_DEFAULT_PTR};
static uintptr chanProg[2] = {0, GC_CHAN};
// Local variables of a program fragment or loop
-typedef struct Frame Frame;
-struct Frame {
+typedef struct GCFrame GCFrame;
+struct GCFrame {
uintptr count, elemsize, b;
const uintptr *loop_or_ret;
};
@@ -731,7 +719,7 @@ scanblock(Workbuf *wbuf, bool keepworking)
const Type *t, *et;
Slice *sliceptr;
String *stringptr;
- Frame *stack_ptr, stack_top, stack[GC_STACK_CAPACITY+4];
+ GCFrame *stack_ptr, stack_top, stack[GC_STACK_CAPACITY+4];
BufferList *scanbuffers;
Scanbuf sbuf;
Eface *eface;
@@ -926,12 +914,12 @@ scanblock(Workbuf *wbuf, bool keepworking)
eface = (Eface*)(stack_top.b + pc[1]);
pc += 2;
if(Debug > 2)
- runtime_printf("gc_eface @%p: %p %p\n", stack_top.b+pc[1], eface->__type_descriptor, eface->__object);
- if(eface->__type_descriptor == nil)
+ runtime_printf("gc_eface @%p: %p %p\n", stack_top.b+pc[1], eface->_type, eface->data);
+ if(eface->_type == nil)
continue;
// eface->type
- t = eface->__type_descriptor;
+ t = eface->_type;
if((const byte*)t >= arena_start && (const byte*)t < arena_used) {
union { const Type *tc; Type *tr; } u;
u.tc = t;
@@ -940,23 +928,23 @@ scanblock(Workbuf *wbuf, bool keepworking)
flushptrbuf(&sbuf);
}
- // eface->__object
- if((byte*)eface->__object >= arena_start && (byte*)eface->__object < arena_used) {
+ // eface->data
+ if((byte*)eface->data >= arena_start && (byte*)eface->data < arena_used) {
if(__go_is_pointer_type(t)) {
- if((t->__code & KindNoPointers))
+ if((t->__code & kindNoPointers))
continue;
- obj = eface->__object;
- if((t->__code & kindMask) == KindPtr) {
+ obj = eface->data;
+ if((t->__code & kindMask) == kindPtr) {
// Only use type information if it is a pointer-containing type.
// This matches the GC programs written by cmd/gc/reflect.c's
// dgcsym1 in case TPTR32/case TPTR64. See rationale there.
et = ((const PtrType*)t)->elem;
- if(!(et->__code & KindNoPointers))
+ if(!(et->__code & kindNoPointers))
objti = (uintptr)((const PtrType*)t)->elem->__gc;
}
} else {
- obj = eface->__object;
+ obj = eface->data;
objti = (uintptr)t->__gc;
}
}
@@ -966,7 +954,7 @@ scanblock(Workbuf *wbuf, bool keepworking)
iface = (Iface*)(stack_top.b + pc[1]);
pc += 2;
if(Debug > 2)
- runtime_printf("gc_iface @%p: %p/%p %p\n", stack_top.b+pc[1], iface->__methods[0], nil, iface->__object);
+ runtime_printf("gc_iface @%p: %p/%p %p\n", stack_top.b+pc[1], *(Type**)iface->tab, nil, iface->data);
if(iface->tab == nil)
continue;
@@ -978,23 +966,23 @@ scanblock(Workbuf *wbuf, bool keepworking)
}
// iface->data
- if((byte*)iface->__object >= arena_start && (byte*)iface->__object < arena_used) {
- t = (const Type*)iface->tab[0];
+ if((byte*)iface->data >= arena_start && (byte*)iface->data < arena_used) {
+ t = *(Type**)iface->tab;
if(__go_is_pointer_type(t)) {
- if((t->__code & KindNoPointers))
+ if((t->__code & kindNoPointers))
continue;
- obj = iface->__object;
- if((t->__code & kindMask) == KindPtr) {
+ obj = iface->data;
+ if((t->__code & kindMask) == kindPtr) {
// Only use type information if it is a pointer-containing type.
// This matches the GC programs written by cmd/gc/reflect.c's
// dgcsym1 in case TPTR32/case TPTR64. See rationale there.
et = ((const PtrType*)t)->elem;
- if(!(et->__code & KindNoPointers))
+ if(!(et->__code & kindNoPointers))
objti = (uintptr)((const PtrType*)t)->elem->__gc;
}
} else {
- obj = iface->__object;
+ obj = iface->data;
objti = (uintptr)t->__gc;
}
}
@@ -1057,7 +1045,7 @@ scanblock(Workbuf *wbuf, bool keepworking)
// Stack push.
*stack_ptr-- = stack_top;
- stack_top = (Frame){count, elemsize, i, pc};
+ stack_top = (GCFrame){count, elemsize, i, pc};
continue;
case GC_ARRAY_NEXT:
@@ -1074,7 +1062,7 @@ scanblock(Workbuf *wbuf, bool keepworking)
case GC_CALL:
// Stack push.
*stack_ptr-- = stack_top;
- stack_top = (Frame){1, 0, stack_top.b + pc[1], pc+3 /*return address*/};
+ stack_top = (GCFrame){1, 0, stack_top.b + pc[1], pc+3 /*return address*/};
pc = (const uintptr*)((const byte*)pc + *(const int32*)(pc+2)); // target of the CALL instruction
continue;
@@ -1101,7 +1089,7 @@ scanblock(Workbuf *wbuf, bool keepworking)
}
if(markonly(chan)) {
chantype = (ChanType*)pc[2];
- if(!(chantype->elem->__code & KindNoPointers)) {
+ if(!(chantype->elem->__code & kindNoPointers)) {
// Start chanProg.
chan_ret = pc+3;
pc = chanProg+1;
@@ -1114,16 +1102,14 @@ scanblock(Workbuf *wbuf, bool keepworking)
case GC_CHAN:
// There are no heap pointers in struct Hchan,
// so we can ignore the leading sizeof(Hchan) bytes.
- if(!(chantype->elem->__code & KindNoPointers)) {
- // Channel's buffer follows Hchan immediately in memory.
- // Size of buffer (cap(c)) is second int in the chan struct.
- chancap = ((uintgo*)chan)[1];
- if(chancap > 0) {
+ if(!(chantype->elem->__code & kindNoPointers)) {
+ chancap = chan->dataqsiz;
+ if(chancap > 0 && markonly(chan->buf)) {
// TODO(atom): split into two chunks so that only the
// in-use part of the circular buffer is scanned.
// (Channel routines zero the unused part, so the current
// code does not lead to leaks, it's just a little inefficient.)
- *sbuf.obj.pos++ = (Obj){(byte*)chan+runtime_Hchansize, chancap*chantype->elem->__size,
+ *sbuf.obj.pos++ = (Obj){chan->buf, chancap*chantype->elem->__size,
(uintptr)chantype->elem->__gc | PRECISE | LOOP};
if(sbuf.obj.pos == sbuf.obj.end)
flushobjbuf(&sbuf);
@@ -1293,14 +1279,8 @@ markroot(ParFor *desc, uint32 i)
// For gccgo we use this for all the other global roots.
enqueue1(&wbuf, (Obj){(byte*)&runtime_m0, sizeof runtime_m0, 0});
enqueue1(&wbuf, (Obj){(byte*)&runtime_g0, sizeof runtime_g0, 0});
- enqueue1(&wbuf, (Obj){(byte*)&runtime_allg, sizeof runtime_allg, 0});
- enqueue1(&wbuf, (Obj){(byte*)&runtime_allm, sizeof runtime_allm, 0});
enqueue1(&wbuf, (Obj){(byte*)&runtime_allp, sizeof runtime_allp, 0});
enqueue1(&wbuf, (Obj){(byte*)&work, sizeof work, 0});
- runtime_proc_scan(&wbuf, enqueue1);
- runtime_MProf_Mark(&wbuf, enqueue1);
- runtime_time_scan(&wbuf, enqueue1);
- runtime_netpoll_scan(&wbuf, enqueue1);
break;
case RootFinalizers:
@@ -1352,12 +1332,12 @@ markroot(ParFor *desc, uint32 i)
default:
// the rest is scanning goroutine stacks
- if(i - RootCount >= runtime_allglen)
+ if(i - RootCount >= runtime_getallglen())
runtime_throw("markroot: bad index");
- gp = runtime_allg[i - RootCount];
+ gp = runtime_getallg(i - RootCount);
// remember when we've first observed the G blocked
// needed only to output in traceback
- if((gp->status == Gwaiting || gp->status == Gsyscall) && gp->waitsince == 0)
+ if((gp->atomicstatus == _Gwaiting || gp->atomicstatus == _Gsyscall) && gp->waitsince == 0)
gp->waitsince = work.tstart;
addstackroots(gp, &wbuf);
break;
@@ -1377,13 +1357,13 @@ getempty(Workbuf *b)
{
if(b != nil)
runtime_lfstackpush(&work.full, &b->node);
- b = (Workbuf*)runtime_lfstackpop(&work.empty);
+ b = (Workbuf*)runtime_lfstackpop(&work.wempty);
if(b == nil) {
// Need to allocate.
runtime_lock(&work);
if(work.nchunk < sizeof *b) {
work.nchunk = 1<<20;
- work.chunk = runtime_SysAlloc(work.nchunk, &mstats.gc_sys);
+ work.chunk = runtime_SysAlloc(work.nchunk, &mstats()->gc_sys);
if(work.chunk == nil)
runtime_throw("runtime: cannot allocate memory");
}
@@ -1402,7 +1382,7 @@ putempty(Workbuf *b)
if(CollectStats)
runtime_xadd64(&gcstats.putempty, 1);
- runtime_lfstackpush(&work.empty, &b->node);
+ runtime_lfstackpush(&work.wempty, &b->node);
}
// Get a full work buffer off the work.full list, or return nil.
@@ -1416,7 +1396,7 @@ getfull(Workbuf *b)
runtime_xadd64(&gcstats.getfull, 1);
if(b != nil)
- runtime_lfstackpush(&work.empty, &b->node);
+ runtime_lfstackpush(&work.wempty, &b->node);
b = (Workbuf*)runtime_lfstackpop(&work.full);
if(b != nil || work.nproc == 1)
return b;
@@ -1472,17 +1452,17 @@ handoff(Workbuf *b)
static void
addstackroots(G *gp, Workbuf **wbufp)
{
- switch(gp->status){
+ switch(gp->atomicstatus){
default:
- runtime_printf("unexpected G.status %d (goroutine %p %D)\n", gp->status, gp, gp->goid);
+ runtime_printf("unexpected G.status %d (goroutine %p %D)\n", gp->atomicstatus, gp, gp->goid);
runtime_throw("mark - bad status");
- case Gdead:
+ case _Gdead:
return;
- case Grunning:
+ case _Grunning:
runtime_throw("mark - world not stopped");
- case Grunnable:
- case Gsyscall:
- case Gwaiting:
+ case _Grunnable:
+ case _Gsyscall:
+ case _Gwaiting:
break;
}
@@ -1512,12 +1492,12 @@ addstackroots(G *gp, Workbuf **wbufp)
// the system call instead, since that won't change underfoot.
if(gp->gcstack != nil) {
sp = gp->gcstack;
- spsize = gp->gcstack_size;
- next_segment = gp->gcnext_segment;
- next_sp = gp->gcnext_sp;
- initial_sp = gp->gcinitial_sp;
+ spsize = gp->gcstacksize;
+ next_segment = gp->gcnextsegment;
+ next_sp = gp->gcnextsp;
+ initial_sp = gp->gcinitialsp;
} else {
- sp = __splitstack_find_context(&gp->stack_context[0],
+ sp = __splitstack_find_context(&gp->stackcontext[0],
&spsize, &next_segment,
&next_sp, &initial_sp);
}
@@ -1543,11 +1523,11 @@ addstackroots(G *gp, Workbuf **wbufp)
} else {
// Scanning another goroutine's stack.
// The goroutine is usually asleep (the world is stopped).
- bottom = (byte*)gp->gcnext_sp;
+ bottom = (byte*)gp->gcnextsp;
if(bottom == nil)
return;
}
- top = (byte*)gp->gcinitial_sp + gp->gcstack_size;
+ top = (byte*)gp->gcinitialsp + gp->gcstacksize;
if(top > bottom)
enqueue1(wbufp, (Obj){bottom, top - bottom, 0});
else
@@ -1564,7 +1544,7 @@ runtime_queuefinalizer(void *p, FuncVal *fn, const FuncType *ft, const PtrType *
runtime_lock(&finlock);
if(finq == nil || finq->cnt == finq->cap) {
if(finc == nil) {
- finc = runtime_persistentalloc(FinBlockSize, 0, &mstats.gc_sys);
+ finc = runtime_persistentalloc(FinBlockSize, 0, &mstats()->gc_sys);
finc->cap = (FinBlockSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
finc->alllink = allfin;
allfin = finc;
@@ -1761,7 +1741,7 @@ runtime_MSpan_Sweep(MSpan *s)
runtime_MHeap_Free(&runtime_mheap, s, 1);
c->local_nlargefree++;
c->local_largefree += size;
- runtime_xadd64(&mstats.next_gc, -(uint64)(size * (gcpercent + 100)/100));
+ runtime_xadd64(&mstats()->next_gc, -(uint64)(size * (gcpercent + 100)/100));
res = true;
} else {
// Free small object.
@@ -1803,7 +1783,7 @@ runtime_MSpan_Sweep(MSpan *s)
if(nfree > 0) {
c->local_nsmallfree[cl] += nfree;
c->local_cachealloc -= nfree * size;
- runtime_xadd64(&mstats.next_gc, -(uint64)(nfree * size * (gcpercent + 100)/100));
+ runtime_xadd64(&mstats()->next_gc, -(uint64)(nfree * size * (gcpercent + 100)/100));
res = runtime_MCentral_FreeSpan(&runtime_mheap.central[cl], s, nfree, head.next, end);
//MCentral_FreeSpan updates sweepgen
}
@@ -2016,11 +1996,12 @@ runtime_updatememstats(GCStats *stats)
uint32 i;
uint64 stacks_inuse, smallfree;
uint64 *src, *dst;
+ MStats *pmstats;
if(stats)
runtime_memclr((byte*)stats, sizeof(*stats));
stacks_inuse = 0;
- for(mp=runtime_allm; mp; mp=mp->alllink) {
+ for(mp=runtime_getallm(); mp; mp=mp->alllink) {
//stacks_inuse += mp->stackinuse*FixedStack;
if(stats) {
src = (uint64*)&mp->gcstats;
@@ -2030,11 +2011,12 @@ runtime_updatememstats(GCStats *stats)
runtime_memclr((byte*)&mp->gcstats, sizeof(mp->gcstats));
}
}
- mstats.stacks_inuse = stacks_inuse;
- mstats.mcache_inuse = runtime_mheap.cachealloc.inuse;
- mstats.mspan_inuse = runtime_mheap.spanalloc.inuse;
- mstats.sys = mstats.heap_sys + mstats.stacks_sys + mstats.mspan_sys +
- mstats.mcache_sys + mstats.buckhash_sys + mstats.gc_sys + mstats.other_sys;
+ pmstats = mstats();
+ pmstats->stacks_inuse = stacks_inuse;
+ pmstats->mcache_inuse = runtime_mheap.cachealloc.inuse;
+ pmstats->mspan_inuse = runtime_mheap.spanalloc.inuse;
+ pmstats->sys = pmstats->heap_sys + pmstats->stacks_sys + pmstats->mspan_sys +
+ pmstats->mcache_sys + pmstats->buckhash_sys + pmstats->gc_sys + pmstats->other_sys;
// Calculate memory allocator stats.
// During program execution we only count number of frees and amount of freed memory.
@@ -2043,13 +2025,13 @@ runtime_updatememstats(GCStats *stats)
// Total number of mallocs is calculated as number of frees plus number of alive objects.
// Similarly, total amount of allocated memory is calculated as amount of freed memory
// plus amount of alive heap memory.
- mstats.alloc = 0;
- mstats.total_alloc = 0;
- mstats.nmalloc = 0;
- mstats.nfree = 0;
- for(i = 0; i < nelem(mstats.by_size); i++) {
- mstats.by_size[i].nmalloc = 0;
- mstats.by_size[i].nfree = 0;
+ pmstats->alloc = 0;
+ pmstats->total_alloc = 0;
+ pmstats->nmalloc = 0;
+ pmstats->nfree = 0;
+ for(i = 0; i < nelem(pmstats->by_size); i++) {
+ pmstats->by_size[i].nmalloc = 0;
+ pmstats->by_size[i].nfree = 0;
}
// Flush MCache's to MCentral.
@@ -2064,30 +2046,30 @@ runtime_updatememstats(GCStats *stats)
if(s->state != MSpanInUse)
continue;
if(s->sizeclass == 0) {
- mstats.nmalloc++;
- mstats.alloc += s->elemsize;
+ pmstats->nmalloc++;
+ pmstats->alloc += s->elemsize;
} else {
- mstats.nmalloc += s->ref;
- mstats.by_size[s->sizeclass].nmalloc += s->ref;
- mstats.alloc += s->ref*s->elemsize;
+ pmstats->nmalloc += s->ref;
+ pmstats->by_size[s->sizeclass].nmalloc += s->ref;
+ pmstats->alloc += s->ref*s->elemsize;
}
}
// Aggregate by size class.
smallfree = 0;
- mstats.nfree = runtime_mheap.nlargefree;
- for(i = 0; i < nelem(mstats.by_size); i++) {
- mstats.nfree += runtime_mheap.nsmallfree[i];
- mstats.by_size[i].nfree = runtime_mheap.nsmallfree[i];
- mstats.by_size[i].nmalloc += runtime_mheap.nsmallfree[i];
+ pmstats->nfree = runtime_mheap.nlargefree;
+ for(i = 0; i < nelem(pmstats->by_size); i++) {
+ pmstats->nfree += runtime_mheap.nsmallfree[i];
+ pmstats->by_size[i].nfree = runtime_mheap.nsmallfree[i];
+ pmstats->by_size[i].nmalloc += runtime_mheap.nsmallfree[i];
smallfree += runtime_mheap.nsmallfree[i] * runtime_class_to_size[i];
}
- mstats.nmalloc += mstats.nfree;
+ pmstats->nmalloc += pmstats->nfree;
// Calculate derived stats.
- mstats.total_alloc = mstats.alloc + runtime_mheap.largefree + smallfree;
- mstats.heap_alloc = mstats.alloc;
- mstats.heap_objects = mstats.nmalloc - mstats.nfree;
+ pmstats->total_alloc = pmstats->alloc + runtime_mheap.largefree + smallfree;
+ pmstats->heap_alloc = pmstats->alloc;
+ pmstats->heap_objects = pmstats->nmalloc - pmstats->nfree;
}
// Structure of arguments passed to function gc().
@@ -2125,11 +2107,12 @@ runtime_gc(int32 force)
G *g;
struct gc_args a;
int32 i;
+ MStats *pmstats;
// The atomic operations are not atomic if the uint64s
// are not aligned on uint64 boundaries. This has been
// a problem in the past.
- if((((uintptr)&work.empty) & 7) != 0)
+ if((((uintptr)&work.wempty) & 7) != 0)
runtime_throw("runtime: gc work buffer is misaligned");
if((((uintptr)&work.full) & 7) != 0)
runtime_throw("runtime: gc work buffer is misaligned");
@@ -2147,7 +2130,8 @@ runtime_gc(int32 force)
// while holding a lock. The next mallocgc
// without a lock will do the gc instead.
m = runtime_m();
- if(!mstats.enablegc || runtime_g() == m->g0 || m->locks > 0 || runtime_panicking)
+ pmstats = mstats();
+ if(!pmstats->enablegc || runtime_g() == m->g0 || m->locks > 0 || runtime_panicking() || m->preemptoff.len > 0)
return;
if(gcpercent == GcpercentUnknown) { // first time through
@@ -2159,11 +2143,11 @@ runtime_gc(int32 force)
if(gcpercent < 0)
return;
- runtime_semacquire(&runtime_worldsema, false);
- if(force==0 && mstats.heap_alloc < mstats.next_gc) {
+ runtime_acquireWorldsema();
+ if(force==0 && pmstats->heap_alloc < pmstats->next_gc) {
// typically threads which lost the race to grab
// worldsema exit here when gc is done.
- runtime_semrelease(&runtime_worldsema);
+ runtime_releaseWorldsema();
return;
}
@@ -2171,7 +2155,7 @@ runtime_gc(int32 force)
a.start_time = runtime_nanotime();
a.eagersweep = force >= 2;
m->gcing = 1;
- runtime_stoptheworld();
+ runtime_stopTheWorldWithSema();
clearpools();
@@ -2186,8 +2170,8 @@ runtime_gc(int32 force)
// switch to g0, call gc(&a), then switch back
g = runtime_g();
g->param = &a;
- g->status = Gwaiting;
- g->waitreason = "garbage collection";
+ g->atomicstatus = _Gwaiting;
+ g->waitreason = runtime_gostringnocopy((const byte*)"garbage collection");
runtime_mcall(mgc);
m = runtime_m();
}
@@ -2195,8 +2179,8 @@ runtime_gc(int32 force)
// all done
m->gcing = 0;
m->locks++;
- runtime_semrelease(&runtime_worldsema);
- runtime_starttheworld();
+ runtime_releaseWorldsema();
+ runtime_startTheWorldWithSema();
m->locks--;
// now that gc is done, kick off finalizer thread if needed
@@ -2214,7 +2198,7 @@ mgc(G *gp)
{
gc(gp->param);
gp->param = nil;
- gp->status = Grunning;
+ gp->atomicstatus = _Grunning;
runtime_gogo(gp);
}
@@ -2222,10 +2206,11 @@ static void
gc(struct gc_args *args)
{
M *m;
- int64 t0, t1, t2, t3, t4;
+ int64 tm0, tm1, tm2, tm3, tm4;
uint64 heap0, heap1, obj, ninstr;
GCStats stats;
uint32 i;
+ MStats *pmstats;
// Eface eface;
m = runtime_m();
@@ -2234,7 +2219,7 @@ gc(struct gc_args *args)
runtime_tracegc();
m->traceback = 2;
- t0 = args->start_time;
+ tm0 = args->start_time;
work.tstart = args->start_time;
if(CollectStats)
@@ -2242,12 +2227,12 @@ gc(struct gc_args *args)
m->locks++; // disable gc during mallocs in parforalloc
if(work.markfor == nil)
- work.markfor = runtime_parforalloc(MaxGcproc);
+ work.markfor = runtime_parforalloc(_MaxGcproc);
m->locks--;
- t1 = 0;
+ tm1 = 0;
if(runtime_debug.gctrace)
- t1 = runtime_nanotime();
+ tm1 = runtime_nanotime();
// Sweep what is not sweeped by bgsweep.
while(runtime_sweepone() != (uintptr)-1)
@@ -2256,23 +2241,23 @@ gc(struct gc_args *args)
work.nwait = 0;
work.ndone = 0;
work.nproc = runtime_gcprocs();
- runtime_parforsetup(work.markfor, work.nproc, RootCount + runtime_allglen, false, &markroot_funcval);
+ runtime_parforsetup(work.markfor, work.nproc, RootCount + runtime_getallglen(), false, &markroot_funcval);
if(work.nproc > 1) {
runtime_noteclear(&work.alldone);
runtime_helpgc(work.nproc);
}
- t2 = 0;
+ tm2 = 0;
if(runtime_debug.gctrace)
- t2 = runtime_nanotime();
+ tm2 = runtime_nanotime();
gchelperstart();
runtime_parfordo(work.markfor);
scanblock(nil, true);
- t3 = 0;
+ tm3 = 0;
if(runtime_debug.gctrace)
- t3 = runtime_nanotime();
+ tm3 = runtime_nanotime();
bufferList[m->helpgc].busy = 0;
if(work.nproc > 1)
@@ -2281,28 +2266,29 @@ gc(struct gc_args *args)
cachestats();
// next_gc calculation is tricky with concurrent sweep since we don't know size of live heap
// estimate what was live heap size after previous GC (for tracing only)
- heap0 = mstats.next_gc*100/(gcpercent+100);
+ pmstats = mstats();
+ heap0 = pmstats->next_gc*100/(gcpercent+100);
// conservatively set next_gc to high value assuming that everything is live
// concurrent/lazy sweep will reduce this number while discovering new garbage
- mstats.next_gc = mstats.heap_alloc+(mstats.heap_alloc-runtime_stacks_sys)*gcpercent/100;
+ pmstats->next_gc = pmstats->heap_alloc+(pmstats->heap_alloc-runtime_stacks_sys)*gcpercent/100;
- t4 = runtime_nanotime();
- mstats.last_gc = runtime_unixnanotime(); // must be Unix time to make sense to user
- mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t4 - t0;
- mstats.pause_end[mstats.numgc%nelem(mstats.pause_end)] = mstats.last_gc;
- mstats.pause_total_ns += t4 - t0;
- mstats.numgc++;
- if(mstats.debuggc)
- runtime_printf("pause %D\n", t4-t0);
+ tm4 = runtime_nanotime();
+ pmstats->last_gc = runtime_unixnanotime(); // must be Unix time to make sense to user
+ pmstats->pause_ns[pmstats->numgc%nelem(pmstats->pause_ns)] = tm4 - tm0;
+ pmstats->pause_end[pmstats->numgc%nelem(pmstats->pause_end)] = pmstats->last_gc;
+ pmstats->pause_total_ns += tm4 - tm0;
+ pmstats->numgc++;
+ if(pmstats->debuggc)
+ runtime_printf("pause %D\n", tm4-tm0);
if(runtime_debug.gctrace) {
- heap1 = mstats.heap_alloc;
+ heap1 = pmstats->heap_alloc;
runtime_updatememstats(&stats);
- if(heap1 != mstats.heap_alloc) {
- runtime_printf("runtime: mstats skew: heap=%D/%D\n", heap1, mstats.heap_alloc);
+ if(heap1 != pmstats->heap_alloc) {
+ runtime_printf("runtime: mstats skew: heap=%D/%D\n", heap1, pmstats->heap_alloc);
runtime_throw("mstats skew");
}
- obj = mstats.nmalloc - mstats.nfree;
+ obj = pmstats->nmalloc - pmstats->nfree;
stats.nprocyield += work.markfor->nprocyield;
stats.nosyield += work.markfor->nosyield;
@@ -2311,9 +2297,9 @@ gc(struct gc_args *args)
runtime_printf("gc%d(%d): %D+%D+%D+%D us, %D -> %D MB, %D (%D-%D) objects,"
" %d/%d/%d sweeps,"
" %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
- mstats.numgc, work.nproc, (t1-t0)/1000, (t2-t1)/1000, (t3-t2)/1000, (t4-t3)/1000,
+ pmstats->numgc, work.nproc, (tm1-tm0)/1000, (tm2-tm1)/1000, (tm3-tm2)/1000, (tm4-tm3)/1000,
heap0>>20, heap1>>20, obj,
- mstats.nmalloc, mstats.nfree,
+ pmstats->nmalloc, pmstats->nfree,
sweep.nspan, gcstats.nbgsweep, gcstats.npausesweep,
stats.nhandoff, stats.nhandoffcnt,
work.markfor->nsteal, work.markfor->nstealcnt,
@@ -2352,7 +2338,7 @@ gc(struct gc_args *args)
// Free the old cached array if necessary.
if(sweep.spans && sweep.spans != runtime_mheap.allspans)
- runtime_SysFree(sweep.spans, sweep.nspan*sizeof(sweep.spans[0]), &mstats.other_sys);
+ runtime_SysFree(sweep.spans, sweep.nspan*sizeof(sweep.spans[0]), &pmstats->other_sys);
// Cache the current array.
runtime_mheap.sweepspans = runtime_mheap.allspans;
runtime_mheap.sweepgen += 2;
@@ -2368,7 +2354,7 @@ gc(struct gc_args *args)
sweep.g = __go_go(bgsweep, nil);
else if(sweep.parked) {
sweep.parked = false;
- runtime_ready(sweep.g);
+ runtime_ready(sweep.g, 0, true);
}
runtime_unlock(&gclock);
} else {
@@ -2383,36 +2369,6 @@ gc(struct gc_args *args)
m->traceback = 0;
}
-extern uintptr runtime_sizeof_C_MStats
- __asm__ (GOSYM_PREFIX "runtime.Sizeof_C_MStats");
-
-void runtime_ReadMemStats(MStats *)
- __asm__ (GOSYM_PREFIX "runtime.ReadMemStats");
-
-void
-runtime_ReadMemStats(MStats *stats)
-{
- M *m;
-
- // Have to acquire worldsema to stop the world,
- // because stoptheworld can only be used by
- // one goroutine at a time, and there might be
- // a pending garbage collection already calling it.
- runtime_semacquire(&runtime_worldsema, false);
- m = runtime_m();
- m->gcing = 1;
- runtime_stoptheworld();
- runtime_updatememstats(nil);
- // Size of the trailing by_size array differs between Go and C,
- // NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
- runtime_memmove(stats, &mstats, runtime_sizeof_C_MStats);
- m->gcing = 0;
- m->locks++;
- runtime_semrelease(&runtime_worldsema);
- runtime_starttheworld();
- m->locks--;
-}
-
void runtime_debug_readGCStats(Slice*)
__asm__("runtime_debug.readGCStats");
@@ -2421,30 +2377,34 @@ runtime_debug_readGCStats(Slice *pauses)
{
uint64 *p;
uint32 i, n;
+ MStats *pmstats;
// Calling code in runtime/debug should make the slice large enough.
- if((size_t)pauses->cap < nelem(mstats.pause_ns)+3)
+ pmstats = mstats();
+ if((size_t)pauses->cap < nelem(pmstats->pause_ns)+3)
runtime_throw("runtime: short slice passed to readGCStats");
// Pass back: pauses, last gc (absolute time), number of gc, total pause ns.
p = (uint64*)pauses->array;
runtime_lock(&runtime_mheap);
- n = mstats.numgc;
- if(n > nelem(mstats.pause_ns))
- n = nelem(mstats.pause_ns);
+ n = pmstats->numgc;
+ if(n > nelem(pmstats->pause_ns))
+ n = nelem(pmstats->pause_ns);
// The pause buffer is circular. The most recent pause is at
// pause_ns[(numgc-1)%nelem(pause_ns)], and then backward
// from there to go back farther in time. We deliver the times
// most recent first (in p[0]).
- for(i=0; i<n; i++)
- p[i] = mstats.pause_ns[(mstats.numgc-1-i)%nelem(mstats.pause_ns)];
+ for(i=0; i<n; i++) {
+ p[i] = pmstats->pause_ns[(pmstats->numgc-1-i)%nelem(pmstats->pause_ns)];
+ p[n+i] = pmstats->pause_end[(pmstats->numgc-1-i)%nelem(pmstats->pause_ns)];
+ }
- p[n] = mstats.last_gc;
- p[n+1] = mstats.numgc;
- p[n+2] = mstats.pause_total_ns;
+ p[n+n] = pmstats->last_gc;
+ p[n+n+1] = pmstats->numgc;
+ p[n+n+2] = pmstats->pause_total_ns;
runtime_unlock(&runtime_mheap);
- pauses->__count = n+3;
+ pauses->__count = n+n+3;
}
int32
@@ -2468,7 +2428,7 @@ gchelperstart(void)
M *m;
m = runtime_m();
- if(m->helpgc < 0 || m->helpgc >= MaxGcproc)
+ if(m->helpgc < 0 || m->helpgc >= _MaxGcproc)
runtime_throw("gchelperstart: bad m->helpgc");
if(runtime_xchg(&bufferList[m->helpgc].busy, 1))
runtime_throw("gchelperstart: already busy");
@@ -2492,8 +2452,8 @@ runfinq(void* dummy __attribute__ ((unused)))
fb = nil;
next = nil;
i = 0;
- ef.__type_descriptor = nil;
- ef.__object = nil;
+ ef._type = nil;
+ ef.data = nil;
// force flush to memory
USED(&f);
@@ -2522,21 +2482,23 @@ runfinq(void* dummy __attribute__ ((unused)))
f = &fb->fin[i];
fint = ((const Type**)f->ft->__in.array)[0];
- if((fint->__code & kindMask) == KindPtr) {
+ if((fint->__code & kindMask) == kindPtr) {
// direct use of pointer
param = &f->arg;
} else if(((const InterfaceType*)fint)->__methods.__count == 0) {
// convert to empty interface
- ef.__type_descriptor = (const Type*)f->ot;
- ef.__object = f->arg;
+ // using memcpy as const_cast.
+ memcpy(&ef._type, &f->ot,
+ sizeof ef._type);
+ ef.data = f->arg;
param = &ef;
} else {
// convert to interface with methods
- iface.__methods = __go_convert_interface_2((const Type*)fint,
- (const Type*)f->ot,
- 1);
- iface.__object = f->arg;
- if(iface.__methods == nil)
+ iface.tab = getitab(fint,
+ (const Type*)f->ot,
+ true);
+ iface.data = f->arg;
+ if(iface.data == nil)
runtime_throw("invalid type conversion in runfinq");
param = &iface;
}
@@ -2558,8 +2520,8 @@ runfinq(void* dummy __attribute__ ((unused)))
fb = nil;
next = nil;
i = 0;
- ef.__type_descriptor = nil;
- ef.__object = nil;
+ ef._type = nil;
+ ef.data = nil;
runtime_gc(1); // trigger another gc to clean up the finalized objects, if possible
}
}
@@ -2578,6 +2540,20 @@ runtime_createfing(void)
runtime_unlock(&gclock);
}
+bool getfingwait() __asm__(GOSYM_PREFIX "runtime.getfingwait");
+bool
+getfingwait()
+{
+ return runtime_fingwait;
+}
+
+bool getfingwake() __asm__(GOSYM_PREFIX "runtime.getfingwake");
+bool
+getfingwake()
+{
+ return runtime_fingwake;
+}
+
G*
runtime_wakefing(void)
{
@@ -2751,41 +2727,6 @@ runtime_MHeap_MapBits(MHeap *h)
if(h->bitmap_mapped >= n)
return;
- runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped, h->arena_reserved, &mstats.gc_sys);
+ runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped, h->arena_reserved, &mstats()->gc_sys);
h->bitmap_mapped = n;
}
-
-// typedmemmove copies a value of type t to dst from src.
-
-extern void typedmemmove(const Type* td, void *dst, const void *src)
- __asm__ (GOSYM_PREFIX "reflect.typedmemmove");
-
-void
-typedmemmove(const Type* td, void *dst, const void *src)
-{
- runtime_memmove(dst, src, td->__size);
-}
-
-// typedslicecopy copies a slice of elemType values from src to dst,
-// returning the number of elements copied.
-
-extern intgo typedslicecopy(const Type* elem, Slice dst, Slice src)
- __asm__ (GOSYM_PREFIX "reflect.typedslicecopy");
-
-intgo
-typedslicecopy(const Type* elem, Slice dst, Slice src)
-{
- intgo n;
- void *dstp;
- void *srcp;
-
- n = dst.__count;
- if (n > src.__count)
- n = src.__count;
- if (n == 0)
- return 0;
- dstp = dst.__values;
- srcp = src.__values;
- memmove(dstp, srcp, (uintptr_t)n * elem->__size);
- return n;
-}
diff --git a/libgo/runtime/mheap.c b/libgo/runtime/mheap.c
index 793915ef44..c167bdc819 100644
--- a/libgo/runtime/mheap.c
+++ b/libgo/runtime/mheap.c
@@ -36,7 +36,7 @@ RecordSpan(void *vh, byte *p)
cap = 64*1024/sizeof(all[0]);
if(cap < h->nspancap*3/2)
cap = h->nspancap*3/2;
- all = (MSpan**)runtime_SysAlloc(cap*sizeof(all[0]), &mstats.other_sys);
+ all = (MSpan**)runtime_SysAlloc(cap*sizeof(all[0]), &mstats()->other_sys);
if(all == nil)
runtime_throw("runtime: cannot allocate memory");
if(h->allspans) {
@@ -44,7 +44,7 @@ RecordSpan(void *vh, byte *p)
// Don't free the old array if it's referenced by sweep.
// See the comment in mgc0.c.
if(h->allspans != runtime_mheap.sweepspans)
- runtime_SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats.other_sys);
+ runtime_SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats()->other_sys);
}
h->allspans = all;
h->nspancap = cap;
@@ -56,12 +56,14 @@ RecordSpan(void *vh, byte *p)
void
runtime_MHeap_Init(MHeap *h)
{
+ MStats *pmstats;
uint32 i;
- runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &mstats.mspan_sys);
- runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &mstats.mcache_sys);
- runtime_FixAlloc_Init(&h->specialfinalizeralloc, sizeof(SpecialFinalizer), nil, nil, &mstats.other_sys);
- runtime_FixAlloc_Init(&h->specialprofilealloc, sizeof(SpecialProfile), nil, nil, &mstats.other_sys);
+ pmstats = mstats();
+ runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &pmstats->mspan_sys);
+ runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &pmstats->mcache_sys);
+ runtime_FixAlloc_Init(&h->specialfinalizeralloc, sizeof(SpecialFinalizer), nil, nil, &pmstats->other_sys);
+ runtime_FixAlloc_Init(&h->specialprofilealloc, sizeof(SpecialProfile), nil, nil, &pmstats->other_sys);
// h->mapcache needs no init
for(i=0; i<nelem(h->free); i++) {
runtime_MSpanList_Init(&h->free[i]);
@@ -88,7 +90,7 @@ runtime_MHeap_MapSpans(MHeap *h)
n = ROUND(n, pagesize);
if(h->spans_mapped >= n)
return;
- runtime_SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, h->arena_reserved, &mstats.other_sys);
+ runtime_SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, h->arena_reserved, &mstats()->other_sys);
h->spans_mapped = n;
}
@@ -173,17 +175,19 @@ MHeap_Reclaim(MHeap *h, uintptr npage)
MSpan*
runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
{
+ MStats *pmstats;
MSpan *s;
runtime_lock(h);
- mstats.heap_alloc += runtime_m()->mcache->local_cachealloc;
+ pmstats = mstats();
+ pmstats->heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
runtime_m()->mcache->local_cachealloc = 0;
s = MHeap_AllocLocked(h, npage, sizeclass);
if(s != nil) {
- mstats.heap_inuse += npage<<PageShift;
+ pmstats->heap_inuse += npage<<PageShift;
if(large) {
- mstats.heap_objects++;
- mstats.heap_alloc += npage<<PageShift;
+ pmstats->heap_objects++;
+ pmstats->heap_alloc += npage<<PageShift;
// Swept spans are at the end of lists.
if(s->npages < nelem(h->free))
runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
@@ -237,8 +241,8 @@ HaveSpan:
runtime_MSpanList_Remove(s);
runtime_atomicstore(&s->sweepgen, h->sweepgen);
s->state = MSpanInUse;
- mstats.heap_idle -= s->npages<<PageShift;
- mstats.heap_released -= s->npreleased<<PageShift;
+ mstats()->heap_idle -= s->npages<<PageShift;
+ mstats()->heap_released -= s->npreleased<<PageShift;
if(s->npreleased > 0)
runtime_SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
s->npreleased = 0;
@@ -326,7 +330,7 @@ MHeap_Grow(MHeap *h, uintptr npage)
v = runtime_MHeap_SysAlloc(h, ask);
}
if(v == nil) {
- runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats.heap_sys);
+ runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats()->heap_sys);
return false;
}
}
@@ -377,7 +381,7 @@ runtime_MHeap_LookupMaybe(MHeap *h, void *v)
q = p;
q -= (uintptr)h->arena_start >> PageShift;
s = h->spans[q];
- if(s == nil || p < s->start || (byte*)v >= s->limit || s->state != MSpanInUse)
+ if(s == nil || p < s->start || (uintptr)v >= s->limit || s->state != MSpanInUse)
return nil;
return s;
}
@@ -386,13 +390,16 @@ runtime_MHeap_LookupMaybe(MHeap *h, void *v)
void
runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
{
+ MStats *pmstats;
+
runtime_lock(h);
- mstats.heap_alloc += runtime_m()->mcache->local_cachealloc;
+ pmstats = mstats();
+ pmstats->heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
runtime_m()->mcache->local_cachealloc = 0;
- mstats.heap_inuse -= s->npages<<PageShift;
+ pmstats->heap_inuse -= s->npages<<PageShift;
if(acct) {
- mstats.heap_alloc -= s->npages<<PageShift;
- mstats.heap_objects--;
+ pmstats->heap_alloc -= s->npages<<PageShift;
+ pmstats->heap_objects--;
}
MHeap_FreeLocked(h, s);
runtime_unlock(h);
@@ -411,7 +418,7 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
s, s->start<<PageShift, s->state, s->ref, s->sweepgen, h->sweepgen);
runtime_throw("MHeap_FreeLocked - invalid free");
}
- mstats.heap_idle += s->npages<<PageShift;
+ mstats()->heap_idle += s->npages<<PageShift;
s->state = MSpanFree;
runtime_MSpanList_Remove(s);
// Stamp newly unused spans. The scavenger will use that
@@ -472,7 +479,7 @@ scavengelist(MSpan *list, uint64 now, uint64 limit)
for(s=list->next; s != list; s=s->next) {
if((now - s->unusedsince) > limit && s->npreleased != s->npages) {
released = (s->npages - s->npreleased) << PageShift;
- mstats.heap_released += released;
+ mstats()->heap_released += released;
sumreleased += released;
s->npreleased = s->npages;
@@ -508,8 +515,8 @@ scavenge(int32 k, uint64 now, uint64 limit)
if(sumreleased > 0)
runtime_printf("scvg%d: %D MB released\n", k, (uint64)sumreleased>>20);
runtime_printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n",
- k, mstats.heap_inuse>>20, mstats.heap_idle>>20, mstats.heap_sys>>20,
- mstats.heap_released>>20, (mstats.heap_sys - mstats.heap_released)>>20);
+ k, mstats()->heap_inuse>>20, mstats()->heap_idle>>20, mstats()->heap_sys>>20,
+ mstats()->heap_released>>20, (mstats()->heap_sys - mstats()->heap_released)>>20);
}
}
@@ -550,7 +557,7 @@ runtime_MHeap_Scavenger(void* dummy)
runtime_lock(h);
unixnow = runtime_unixnanotime();
- if(unixnow - mstats.last_gc > forcegc) {
+ if(unixnow - mstats()->last_gc > forcegc) {
runtime_unlock(h);
// The scavenger can not block other goroutines,
// otherwise deadlock detector can fire spuriously.
@@ -597,7 +604,7 @@ runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages)
span->unusedsince = 0;
span->npreleased = 0;
span->types.compression = MTypes_Empty;
- span->specialLock.key = 0;
+ span->speciallock.key = 0;
span->specials = nil;
span->needzero = 0;
span->freebuf = nil;
@@ -681,13 +688,13 @@ addspecial(void *p, Special *s)
offset = (uintptr)p - (span->start << PageShift);
kind = s->kind;
- runtime_lock(&span->specialLock);
+ runtime_lock(&span->speciallock);
// Find splice point, check for existing record.
t = &span->specials;
while((x = *t) != nil) {
if(offset == x->offset && kind == x->kind) {
- runtime_unlock(&span->specialLock);
+ runtime_unlock(&span->speciallock);
runtime_m()->locks--;
return false; // already exists
}
@@ -699,7 +706,7 @@ addspecial(void *p, Special *s)
s->offset = offset;
s->next = x;
*t = s;
- runtime_unlock(&span->specialLock);
+ runtime_unlock(&span->speciallock);
runtime_m()->locks--;
return true;
}
@@ -725,20 +732,20 @@ removespecial(void *p, byte kind)
offset = (uintptr)p - (span->start << PageShift);
- runtime_lock(&span->specialLock);
+ runtime_lock(&span->speciallock);
t = &span->specials;
while((s = *t) != nil) {
// This function is used for finalizers only, so we don't check for
// "interior" specials (p must be exactly equal to s->offset).
if(offset == s->offset && kind == s->kind) {
*t = s->next;
- runtime_unlock(&span->specialLock);
+ runtime_unlock(&span->speciallock);
runtime_m()->locks--;
return s;
}
t = &s->next;
}
- runtime_unlock(&span->specialLock);
+ runtime_unlock(&span->speciallock);
runtime_m()->locks--;
return nil;
}
@@ -838,7 +845,7 @@ runtime_freeallspecials(MSpan *span, void *p, uintptr size)
// this is required to not cause deadlock between span->specialLock and proflock
list = nil;
offset = (uintptr)p - (span->start << PageShift);
- runtime_lock(&span->specialLock);
+ runtime_lock(&span->speciallock);
t = &span->specials;
while((s = *t) != nil) {
if(offset + size <= s->offset)
@@ -850,7 +857,7 @@ runtime_freeallspecials(MSpan *span, void *p, uintptr size)
} else
t = &s->next;
}
- runtime_unlock(&span->specialLock);
+ runtime_unlock(&span->speciallock);
while(list != nil) {
s = list;
@@ -878,7 +885,7 @@ runtime_MHeap_SplitSpan(MHeap *h, MSpan *s)
// remove the span from whatever list it is in now
if(s->sizeclass > 0) {
- // must be in h->central[x].empty
+ // must be in h->central[x].mempty
c = &h->central[s->sizeclass];
runtime_lock(c);
runtime_MSpanList_Remove(s);
@@ -908,7 +915,7 @@ runtime_MHeap_SplitSpan(MHeap *h, MSpan *s)
// Allocate a new span for the first half.
t = runtime_FixAlloc_Alloc(&h->spanalloc);
runtime_MSpan_Init(t, s->start, npages/2);
- t->limit = (byte*)((t->start + npages/2) << PageShift);
+ t->limit = (uintptr)((t->start + npages/2) << PageShift);
t->state = MSpanInUse;
t->elemsize = npages << (PageShift - 1);
t->sweepgen = s->sweepgen;
@@ -937,7 +944,7 @@ runtime_MHeap_SplitSpan(MHeap *h, MSpan *s)
c = &h->central[s->sizeclass];
runtime_lock(c);
// swept spans are at the end of the list
- runtime_MSpanList_InsertBack(&c->empty, s);
+ runtime_MSpanList_InsertBack(&c->mempty, s);
runtime_unlock(c);
} else {
// Swept spans are at the end of lists.
diff --git a/libgo/runtime/mprof.goc b/libgo/runtime/mprof.goc
deleted file mode 100644
index 4e8cfc9cac..0000000000
--- a/libgo/runtime/mprof.goc
+++ /dev/null
@@ -1,562 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Malloc profiling.
-// Patterned after tcmalloc's algorithms; shorter code.
-
-package runtime
-#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
-#include "defs.h"
-#include "go-type.h"
-#include "go-string.h"
-
-// NOTE(rsc): Everything here could use cas if contention became an issue.
-static Lock proflock;
-
-// All memory allocations are local and do not escape outside of the profiler.
-// The profiler is forbidden from referring to garbage-collected memory.
-
-enum { MProf, BProf }; // profile types
-
-// Per-call-stack profiling information.
-// Lookup by hashing call stack into a linked-list hash table.
-struct Bucket
-{
- Bucket *next; // next in hash list
- Bucket *allnext; // next in list of all mbuckets/bbuckets
- int32 typ;
- // Generally unions can break precise GC,
- // this one is fine because it does not contain pointers.
- union
- {
- struct // typ == MProf
- {
- // The following complex 3-stage scheme of stats accumulation
- // is required to obtain a consistent picture of mallocs and frees
- // for some point in time.
- // The problem is that mallocs come in real time, while frees
- // come only after a GC during concurrent sweeping. So if we would
- // naively count them, we would get a skew toward mallocs.
- //
- // Mallocs are accounted in recent stats.
- // Explicit frees are accounted in recent stats.
- // GC frees are accounted in prev stats.
- // After GC prev stats are added to final stats and
- // recent stats are moved into prev stats.
- uintptr allocs;
- uintptr frees;
- uintptr alloc_bytes;
- uintptr free_bytes;
-
- uintptr prev_allocs; // since last but one till last gc
- uintptr prev_frees;
- uintptr prev_alloc_bytes;
- uintptr prev_free_bytes;
-
- uintptr recent_allocs; // since last gc till now
- uintptr recent_frees;
- uintptr recent_alloc_bytes;
- uintptr recent_free_bytes;
-
- };
- struct // typ == BProf
- {
- int64 count;
- int64 cycles;
- };
- };
- uintptr hash; // hash of size + stk
- uintptr size;
- uintptr nstk;
- Location stk[1];
-};
-enum {
- BuckHashSize = 179999,
-};
-static Bucket **buckhash;
-static Bucket *mbuckets; // memory profile buckets
-static Bucket *bbuckets; // blocking profile buckets
-static uintptr bucketmem;
-
-// Return the bucket for stk[0:nstk], allocating new bucket if needed.
-static Bucket*
-stkbucket(int32 typ, uintptr size, Location *stk, int32 nstk, bool alloc)
-{
- int32 i, j;
- uintptr h;
- Bucket *b;
-
- if(buckhash == nil) {
- buckhash = runtime_SysAlloc(BuckHashSize*sizeof buckhash[0], &mstats.buckhash_sys);
- if(buckhash == nil)
- runtime_throw("runtime: cannot allocate memory");
- }
-
- // Hash stack.
- h = 0;
- for(i=0; i<nstk; i++) {
- h += stk[i].pc;
- h += h<<10;
- h ^= h>>6;
- }
- // hash in size
- h += size;
- h += h<<10;
- h ^= h>>6;
- // finalize
- h += h<<3;
- h ^= h>>11;
-
- i = h%BuckHashSize;
- for(b = buckhash[i]; b; b=b->next) {
- if(b->typ == typ && b->hash == h && b->size == size && b->nstk == (uintptr)nstk) {
- for(j = 0; j < nstk; j++) {
- if(b->stk[j].pc != stk[j].pc ||
- b->stk[j].lineno != stk[j].lineno ||
- !__go_strings_equal(b->stk[j].filename, stk[j].filename))
- break;
- }
- if (j == nstk)
- return b;
- }
- }
-
- if(!alloc)
- return nil;
-
- b = runtime_persistentalloc(sizeof *b + nstk*sizeof stk[0], 0, &mstats.buckhash_sys);
- bucketmem += sizeof *b + nstk*sizeof stk[0];
- runtime_memmove(b->stk, stk, nstk*sizeof stk[0]);
- b->typ = typ;
- b->hash = h;
- b->size = size;
- b->nstk = nstk;
- b->next = buckhash[i];
- buckhash[i] = b;
- if(typ == MProf) {
- b->allnext = mbuckets;
- mbuckets = b;
- } else {
- b->allnext = bbuckets;
- bbuckets = b;
- }
- return b;
-}
-
-static void
-MProf_GC(void)
-{
- Bucket *b;
-
- for(b=mbuckets; b; b=b->allnext) {
- b->allocs += b->prev_allocs;
- b->frees += b->prev_frees;
- b->alloc_bytes += b->prev_alloc_bytes;
- b->free_bytes += b->prev_free_bytes;
-
- b->prev_allocs = b->recent_allocs;
- b->prev_frees = b->recent_frees;
- b->prev_alloc_bytes = b->recent_alloc_bytes;
- b->prev_free_bytes = b->recent_free_bytes;
-
- b->recent_allocs = 0;
- b->recent_frees = 0;
- b->recent_alloc_bytes = 0;
- b->recent_free_bytes = 0;
- }
-}
-
-// Record that a gc just happened: all the 'recent' statistics are now real.
-void
-runtime_MProf_GC(void)
-{
- runtime_lock(&proflock);
- MProf_GC();
- runtime_unlock(&proflock);
-}
-
-// Called by malloc to record a profiled block.
-void
-runtime_MProf_Malloc(void *p, uintptr size)
-{
- Location stk[32];
- Bucket *b;
- int32 nstk;
-
- nstk = runtime_callers(1, stk, nelem(stk), false);
- runtime_lock(&proflock);
- b = stkbucket(MProf, size, stk, nstk, true);
- b->recent_allocs++;
- b->recent_alloc_bytes += size;
- runtime_unlock(&proflock);
-
- // Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
- // This reduces potential contention and chances of deadlocks.
- // Since the object must be alive during call to MProf_Malloc,
- // it's fine to do this non-atomically.
- runtime_setprofilebucket(p, b);
-}
-
-// Called when freeing a profiled block.
-void
-runtime_MProf_Free(Bucket *b, uintptr size, bool freed)
-{
- runtime_lock(&proflock);
- if(freed) {
- b->recent_frees++;
- b->recent_free_bytes += size;
- } else {
- b->prev_frees++;
- b->prev_free_bytes += size;
- }
- runtime_unlock(&proflock);
-}
-
-int64 runtime_blockprofilerate; // in CPU ticks
-
-void runtime_SetBlockProfileRate(intgo) __asm__ (GOSYM_PREFIX "runtime.SetBlockProfileRate");
-
-void
-runtime_SetBlockProfileRate(intgo rate)
-{
- int64 r;
-
- if(rate <= 0)
- r = 0; // disable profiling
- else {
- // convert ns to cycles, use float64 to prevent overflow during multiplication
- r = (float64)rate*runtime_tickspersecond()/(1000*1000*1000);
- if(r == 0)
- r = 1;
- }
- runtime_atomicstore64((uint64*)&runtime_blockprofilerate, r);
-}
-
-void
-runtime_blockevent(int64 cycles, int32 skip)
-{
- int32 nstk;
- int64 rate;
- Location stk[32];
- Bucket *b;
-
- if(cycles <= 0)
- return;
- rate = runtime_atomicload64((uint64*)&runtime_blockprofilerate);
- if(rate <= 0 || (rate > cycles && runtime_fastrand1()%rate > cycles))
- return;
-
- nstk = runtime_callers(skip, stk, nelem(stk), false);
- runtime_lock(&proflock);
- b = stkbucket(BProf, 0, stk, nstk, true);
- b->count++;
- b->cycles += cycles;
- runtime_unlock(&proflock);
-}
-
-// Go interface to profile data. (Declared in debug.go)
-
-// Must match MemProfileRecord in debug.go.
-typedef struct Record Record;
-struct Record {
- int64 alloc_bytes, free_bytes;
- int64 alloc_objects, free_objects;
- uintptr stk[32];
-};
-
-// Write b's data to r.
-static void
-record(Record *r, Bucket *b)
-{
- uint32 i;
-
- r->alloc_bytes = b->alloc_bytes;
- r->free_bytes = b->free_bytes;
- r->alloc_objects = b->allocs;
- r->free_objects = b->frees;
- for(i=0; i<b->nstk && i<nelem(r->stk); i++)
- r->stk[i] = b->stk[i].pc;
- for(; i<nelem(r->stk); i++)
- r->stk[i] = 0;
-}
-
-func MemProfile(p Slice, include_inuse_zero bool) (n int, ok bool) {
- Bucket *b;
- Record *r;
- bool clear;
-
- runtime_lock(&proflock);
- n = 0;
- clear = true;
- for(b=mbuckets; b; b=b->allnext) {
- if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
- n++;
- if(b->allocs != 0 || b->frees != 0)
- clear = false;
- }
- if(clear) {
- // Absolutely no data, suggesting that a garbage collection
- // has not yet happened. In order to allow profiling when
- // garbage collection is disabled from the beginning of execution,
- // accumulate stats as if a GC just happened, and recount buckets.
- MProf_GC();
- MProf_GC();
- n = 0;
- for(b=mbuckets; b; b=b->allnext)
- if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
- n++;
- }
- ok = false;
- if(n <= p.__count) {
- ok = true;
- r = (Record*)p.__values;
- for(b=mbuckets; b; b=b->allnext)
- if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
- record(r++, b);
- }
- runtime_unlock(&proflock);
-}
-
-void
-runtime_MProf_Mark(struct Workbuf **wbufp, void (*enqueue1)(struct Workbuf**, Obj))
-{
- // buckhash is not allocated via mallocgc.
- enqueue1(wbufp, (Obj){(byte*)&mbuckets, sizeof mbuckets, 0});
- enqueue1(wbufp, (Obj){(byte*)&bbuckets, sizeof bbuckets, 0});
-}
-
-void
-runtime_iterate_memprof(void (*callback)(Bucket*, uintptr, Location*, uintptr, uintptr, uintptr))
-{
- Bucket *b;
-
- runtime_lock(&proflock);
- for(b=mbuckets; b; b=b->allnext) {
- callback(b, b->nstk, b->stk, b->size, b->allocs, b->frees);
- }
- runtime_unlock(&proflock);
-}
-
-// Must match BlockProfileRecord in debug.go.
-typedef struct BRecord BRecord;
-struct BRecord {
- int64 count;
- int64 cycles;
- uintptr stk[32];
-};
-
-func BlockProfile(p Slice) (n int, ok bool) {
- Bucket *b;
- BRecord *r;
- int32 i;
-
- runtime_lock(&proflock);
- n = 0;
- for(b=bbuckets; b; b=b->allnext)
- n++;
- ok = false;
- if(n <= p.__count) {
- ok = true;
- r = (BRecord*)p.__values;
- for(b=bbuckets; b; b=b->allnext, r++) {
- r->count = b->count;
- r->cycles = b->cycles;
- for(i=0; (uintptr)i<b->nstk && (uintptr)i<nelem(r->stk); i++)
- r->stk[i] = b->stk[i].pc;
- for(; (uintptr)i<nelem(r->stk); i++)
- r->stk[i] = 0;
- }
- }
- runtime_unlock(&proflock);
-}
-
-// Must match StackRecord in debug.go.
-typedef struct TRecord TRecord;
-struct TRecord {
- uintptr stk[32];
-};
-
-func ThreadCreateProfile(p Slice) (n int, ok bool) {
- TRecord *r;
- M *first, *mp;
- int32 i;
-
- first = runtime_atomicloadp(&runtime_allm);
- n = 0;
- for(mp=first; mp; mp=mp->alllink)
- n++;
- ok = false;
- if(n <= p.__count) {
- ok = true;
- r = (TRecord*)p.__values;
- for(mp=first; mp; mp=mp->alllink) {
- for(i = 0; (uintptr)i < nelem(r->stk); i++) {
- r->stk[i] = mp->createstack[i].pc;
- }
- r++;
- }
- }
-}
-
-func Stack(b Slice, all bool) (n int) {
- byte *pc;
- bool enablegc = false;
-
- pc = (byte*)(uintptr)runtime_getcallerpc(&b);
-
- if(all) {
- runtime_semacquire(&runtime_worldsema, false);
- runtime_m()->gcing = 1;
- runtime_stoptheworld();
- enablegc = mstats.enablegc;
- mstats.enablegc = false;
- }
-
- if(b.__count == 0)
- n = 0;
- else{
- G* g = runtime_g();
- g->writebuf = (byte*)b.__values;
- g->writenbuf = b.__count;
- USED(pc);
- runtime_goroutineheader(g);
- runtime_traceback();
- runtime_printcreatedby(g);
- if(all)
- runtime_tracebackothers(g);
- n = b.__count - g->writenbuf;
- g->writebuf = nil;
- g->writenbuf = 0;
- }
-
- if(all) {
- runtime_m()->gcing = 0;
- mstats.enablegc = enablegc;
- runtime_semrelease(&runtime_worldsema);
- runtime_starttheworld();
- }
-}
-
-static void
-saveg(G *gp, TRecord *r)
-{
- int32 n, i;
- Location locstk[nelem(r->stk)];
-
- if(gp == runtime_g()) {
- n = runtime_callers(0, locstk, nelem(r->stk), false);
- for(i = 0; i < n; i++)
- r->stk[i] = locstk[i].pc;
- }
- else {
- // FIXME: Not implemented.
- n = 0;
- }
- if((size_t)n < nelem(r->stk))
- r->stk[n] = 0;
-}
-
-func GoroutineProfile(b Slice) (n int, ok bool) {
- uintptr i;
- TRecord *r;
- G *gp;
-
- ok = false;
- n = runtime_gcount();
- if(n <= b.__count) {
- runtime_semacquire(&runtime_worldsema, false);
- runtime_m()->gcing = 1;
- runtime_stoptheworld();
-
- n = runtime_gcount();
- if(n <= b.__count) {
- G* g = runtime_g();
- ok = true;
- r = (TRecord*)b.__values;
- saveg(g, r++);
- for(i = 0; i < runtime_allglen; i++) {
- gp = runtime_allg[i];
- if(gp == g || gp->status == Gdead)
- continue;
- saveg(gp, r++);
- }
- }
-
- runtime_m()->gcing = 0;
- runtime_semrelease(&runtime_worldsema);
- runtime_starttheworld();
- }
-}
-
-// Tracing of alloc/free/gc.
-
-static Lock tracelock;
-
-static const char*
-typeinfoname(int32 typeinfo)
-{
- if(typeinfo == TypeInfo_SingleObject)
- return "single object";
- else if(typeinfo == TypeInfo_Array)
- return "array";
- else if(typeinfo == TypeInfo_Chan)
- return "channel";
- runtime_throw("typinfoname: unknown type info");
- return nil;
-}
-
-void
-runtime_tracealloc(void *p, uintptr size, uintptr typ)
-{
- const char *name;
- Type *type;
-
- runtime_lock(&tracelock);
- runtime_m()->traceback = 2;
- type = (Type*)(typ & ~3);
- name = typeinfoname(typ & 3);
- if(type == nil)
- runtime_printf("tracealloc(%p, %p, %s)\n", p, size, name);
- else
- runtime_printf("tracealloc(%p, %p, %s of %S)\n", p, size, name, *type->__reflection);
- if(runtime_m()->curg == nil || runtime_g() == runtime_m()->curg) {
- runtime_goroutineheader(runtime_g());
- runtime_traceback();
- } else {
- runtime_goroutineheader(runtime_m()->curg);
- runtime_traceback();
- }
- runtime_printf("\n");
- runtime_m()->traceback = 0;
- runtime_unlock(&tracelock);
-}
-
-void
-runtime_tracefree(void *p, uintptr size)
-{
- runtime_lock(&tracelock);
- runtime_m()->traceback = 2;
- runtime_printf("tracefree(%p, %p)\n", p, size);
- runtime_goroutineheader(runtime_g());
- runtime_traceback();
- runtime_printf("\n");
- runtime_m()->traceback = 0;
- runtime_unlock(&tracelock);
-}
-
-void
-runtime_tracegc(void)
-{
- runtime_lock(&tracelock);
- runtime_m()->traceback = 2;
- runtime_printf("tracegc()\n");
- // running on m->g0 stack; show all non-g0 goroutines
- runtime_tracebackothers(runtime_g());
- runtime_printf("end tracegc\n");
- runtime_printf("\n");
- runtime_m()->traceback = 0;
- runtime_unlock(&tracelock);
-}
diff --git a/libgo/runtime/msize.c b/libgo/runtime/msize.c
index 34509d0456..b82c709297 100644
--- a/libgo/runtime/msize.c
+++ b/libgo/runtime/msize.c
@@ -29,8 +29,8 @@
#include "arch.h"
#include "malloc.h"
-int32 runtime_class_to_size[NumSizeClasses];
-int32 runtime_class_to_allocnpages[NumSizeClasses];
+int32 runtime_class_to_size[_NumSizeClasses];
+int32 runtime_class_to_allocnpages[_NumSizeClasses];
// The SizeToClass lookup is implemented using two arrays,
// one mapping sizes <= 1024 to their class and one mapping
@@ -60,6 +60,7 @@ runtime_InitSizes(void)
int32 align, sizeclass, size, nextsize, n;
uint32 i;
uintptr allocsize, npages;
+ MStats *pmstats;
// Initialize the runtime_class_to_size table (and choose class sizes in the process).
runtime_class_to_size[0] = 0;
@@ -101,14 +102,14 @@ runtime_InitSizes(void)
runtime_class_to_size[sizeclass] = size;
sizeclass++;
}
- if(sizeclass != NumSizeClasses) {
- runtime_printf("sizeclass=%d NumSizeClasses=%d\n", sizeclass, NumSizeClasses);
- runtime_throw("InitSizes - bad NumSizeClasses");
+ if(sizeclass != _NumSizeClasses) {
+ runtime_printf("sizeclass=%d _NumSizeClasses=%d\n", sizeclass, _NumSizeClasses);
+ runtime_throw("InitSizes - bad _NumSizeClasses");
}
// Initialize the size_to_class tables.
nextsize = 0;
- for (sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) {
+ for (sizeclass = 1; sizeclass < _NumSizeClasses; sizeclass++) {
for(; nextsize < 1024 && nextsize <= runtime_class_to_size[sizeclass]; nextsize+=8)
runtime_size_to_class8[nextsize/8] = sizeclass;
if(nextsize >= 1024)
@@ -120,7 +121,7 @@ runtime_InitSizes(void)
if(0) {
for(n=0; n < MaxSmallSize; n++) {
sizeclass = runtime_SizeToClass(n);
- if(sizeclass < 1 || sizeclass >= NumSizeClasses || runtime_class_to_size[sizeclass] < n) {
+ if(sizeclass < 1 || sizeclass >= _NumSizeClasses || runtime_class_to_size[sizeclass] < n) {
runtime_printf("size=%d sizeclass=%d runtime_class_to_size=%d\n", n, sizeclass, runtime_class_to_size[sizeclass]);
runtime_printf("incorrect SizeToClass");
goto dump;
@@ -134,15 +135,16 @@ runtime_InitSizes(void)
}
// Copy out for statistics table.
+ pmstats = mstats();
for(i=0; i<nelem(runtime_class_to_size); i++)
- mstats.by_size[i].size = runtime_class_to_size[i];
+ pmstats->by_size[i].size = runtime_class_to_size[i];
return;
dump:
if(1){
- runtime_printf("NumSizeClasses=%d\n", NumSizeClasses);
+ runtime_printf("NumSizeClasses=%d\n", _NumSizeClasses);
runtime_printf("runtime_class_to_size:");
- for(sizeclass=0; sizeclass<NumSizeClasses; sizeclass++)
+ for(sizeclass=0; sizeclass<_NumSizeClasses; sizeclass++)
runtime_printf(" %d", runtime_class_to_size[sizeclass]);
runtime_printf("\n\n");
runtime_printf("size_to_class8:");
diff --git a/libgo/runtime/netpoll.goc b/libgo/runtime/netpoll.goc
deleted file mode 100644
index 2f3fa455f3..0000000000
--- a/libgo/runtime/netpoll.goc
+++ /dev/null
@@ -1,472 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
-
-package net
-
-#include "runtime.h"
-#include "defs.h"
-#include "arch.h"
-#include "malloc.h"
-
-// Map gccgo field names to gc field names.
-// Eface aka __go_empty_interface.
-#define type __type_descriptor
-#define data __object
-
-// Integrated network poller (platform-independent part).
-// A particular implementation (epoll/kqueue) must define the following functions:
-// void runtime_netpollinit(void); // to initialize the poller
-// int32 runtime_netpollopen(uintptr fd, PollDesc *pd); // to arm edge-triggered notifications
- // and associate fd with pd.
-// An implementation must call the following function to denote that the pd is ready.
-// void runtime_netpollready(G **gpp, PollDesc *pd, int32 mode);
-
-// PollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
-// goroutines respectively. The semaphore can be in the following states:
-// READY - io readiness notification is pending;
-// a goroutine consumes the notification by changing the state to nil.
-// WAIT - a goroutine prepares to park on the semaphore, but not yet parked;
-// the goroutine commits to park by changing the state to G pointer,
-// or, alternatively, concurrent io notification changes the state to READY,
-// or, alternatively, concurrent timeout/close changes the state to nil.
-// G pointer - the goroutine is blocked on the semaphore;
-// io notification or timeout/close changes the state to READY or nil respectively
-// and unparks the goroutine.
-// nil - nothing of the above.
-#define READY ((G*)1)
-#define WAIT ((G*)2)
-
-enum
-{
- PollBlockSize = 4*1024,
-};
-
-struct PollDesc
-{
- PollDesc* link; // in pollcache, protected by pollcache.Lock
-
- // The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations.
- // This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime.
- // pollReset, pollWait, pollWaitCanceled and runtime_netpollready (IO rediness notification)
- // proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
- // in a lock-free way by all operations.
- Lock; // protectes the following fields
- uintptr fd;
- bool closing;
- uintptr seq; // protects from stale timers and ready notifications
- G* rg; // READY, WAIT, G waiting for read or nil
- Timer rt; // read deadline timer (set if rt.fv != nil)
- int64 rd; // read deadline
- G* wg; // READY, WAIT, G waiting for write or nil
- Timer wt; // write deadline timer
- int64 wd; // write deadline
- void* user; // user settable cookie
-};
-
-static struct
-{
- Lock;
- PollDesc* first;
- // PollDesc objects must be type-stable,
- // because we can get ready notification from epoll/kqueue
- // after the descriptor is closed/reused.
- // Stale notifications are detected using seq variable,
- // seq is incremented when deadlines are changed or descriptor is reused.
-} pollcache;
-
-static bool netpollblock(PollDesc*, int32, bool);
-static G* netpollunblock(PollDesc*, int32, bool);
-static void deadline(Eface, uintptr);
-static void readDeadline(Eface, uintptr);
-static void writeDeadline(Eface, uintptr);
-static PollDesc* allocPollDesc(void);
-static intgo checkerr(PollDesc *pd, int32 mode);
-
-static FuncVal deadlineFn = {(void(*)(void))deadline};
-static FuncVal readDeadlineFn = {(void(*)(void))readDeadline};
-static FuncVal writeDeadlineFn = {(void(*)(void))writeDeadline};
-
-// runtimeNano returns the current value of the runtime clock in nanoseconds.
-func runtimeNano() (ns int64) {
- ns = runtime_nanotime();
-}
-
-func runtime_pollServerInit() {
- runtime_netpollinit();
-}
-
-func runtime_pollOpen(fd uintptr) (pd *PollDesc, errno int) {
- pd = allocPollDesc();
- runtime_lock(pd);
- if(pd->wg != nil && pd->wg != READY)
- runtime_throw("runtime_pollOpen: blocked write on free descriptor");
- if(pd->rg != nil && pd->rg != READY)
- runtime_throw("runtime_pollOpen: blocked read on free descriptor");
- pd->fd = fd;
- pd->closing = false;
- pd->seq++;
- pd->rg = nil;
- pd->rd = 0;
- pd->wg = nil;
- pd->wd = 0;
- runtime_unlock(pd);
-
- errno = runtime_netpollopen(fd, pd);
-}
-
-func runtime_pollClose(pd *PollDesc) {
- if(!pd->closing)
- runtime_throw("runtime_pollClose: close w/o unblock");
- if(pd->wg != nil && pd->wg != READY)
- runtime_throw("runtime_pollClose: blocked write on closing descriptor");
- if(pd->rg != nil && pd->rg != READY)
- runtime_throw("runtime_pollClose: blocked read on closing descriptor");
- runtime_netpollclose(pd->fd);
- runtime_lock(&pollcache);
- pd->link = pollcache.first;
- pollcache.first = pd;
- runtime_unlock(&pollcache);
-}
-
-func runtime_pollReset(pd *PollDesc, mode int) (err int) {
- err = checkerr(pd, mode);
- if(err)
- goto ret;
- if(mode == 'r')
- pd->rg = nil;
- else if(mode == 'w')
- pd->wg = nil;
-ret:
-}
-
-func runtime_pollWait(pd *PollDesc, mode int) (err int) {
- err = checkerr(pd, mode);
- if(err == 0) {
- // As for now only Solaris uses level-triggered IO.
- if(Solaris)
- runtime_netpollarm(pd, mode);
- while(!netpollblock(pd, mode, false)) {
- err = checkerr(pd, mode);
- if(err != 0)
- break;
- // Can happen if timeout has fired and unblocked us,
- // but before we had a chance to run, timeout has been reset.
- // Pretend it has not happened and retry.
- }
- }
-}
-
-func runtime_pollWaitCanceled(pd *PollDesc, mode int) {
- // This function is used only on windows after a failed attempt to cancel
- // a pending async IO operation. Wait for ioready, ignore closing or timeouts.
- while(!netpollblock(pd, mode, true))
- ;
-}
-
-func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
- G *rg, *wg;
-
- runtime_lock(pd);
- if(pd->closing) {
- runtime_unlock(pd);
- return;
- }
- pd->seq++; // invalidate current timers
- // Reset current timers.
- if(pd->rt.fv) {
- runtime_deltimer(&pd->rt);
- pd->rt.fv = nil;
- }
- if(pd->wt.fv) {
- runtime_deltimer(&pd->wt);
- pd->wt.fv = nil;
- }
- // Setup new timers.
- if(d != 0 && d <= runtime_nanotime())
- d = -1;
- if(mode == 'r' || mode == 'r'+'w')
- pd->rd = d;
- if(mode == 'w' || mode == 'r'+'w')
- pd->wd = d;
- if(pd->rd > 0 && pd->rd == pd->wd) {
- pd->rt.fv = &deadlineFn;
- pd->rt.when = pd->rd;
- // Copy current seq into the timer arg.
- // Timer func will check the seq against current descriptor seq,
- // if they differ the descriptor was reused or timers were reset.
- pd->rt.arg.type = nil; // should be *pollDesc type descriptor.
- pd->rt.arg.data = pd;
- pd->rt.seq = pd->seq;
- runtime_addtimer(&pd->rt);
- } else {
- if(pd->rd > 0) {
- pd->rt.fv = &readDeadlineFn;
- pd->rt.when = pd->rd;
- pd->rt.arg.type = nil; // should be *pollDesc type descriptor.
- pd->rt.arg.data = pd;
- pd->rt.seq = pd->seq;
- runtime_addtimer(&pd->rt);
- }
- if(pd->wd > 0) {
- pd->wt.fv = &writeDeadlineFn;
- pd->wt.when = pd->wd;
- pd->wt.arg.type = nil; // should be *pollDesc type descriptor.
- pd->wt.arg.data = pd;
- pd->wt.seq = pd->seq;
- runtime_addtimer(&pd->wt);
- }
- }
- // If we set the new deadline in the past, unblock currently pending IO if any.
- rg = nil;
- runtime_atomicstorep(&wg, nil); // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock
- if(pd->rd < 0)
- rg = netpollunblock(pd, 'r', false);
- if(pd->wd < 0)
- wg = netpollunblock(pd, 'w', false);
- runtime_unlock(pd);
- if(rg)
- runtime_ready(rg);
- if(wg)
- runtime_ready(wg);
-}
-
-func runtime_pollUnblock(pd *PollDesc) {
- G *rg, *wg;
-
- runtime_lock(pd);
- if(pd->closing)
- runtime_throw("runtime_pollUnblock: already closing");
- pd->closing = true;
- pd->seq++;
- runtime_atomicstorep(&rg, nil); // full memory barrier between store to closing and read of rg/wg in netpollunblock
- rg = netpollunblock(pd, 'r', false);
- wg = netpollunblock(pd, 'w', false);
- if(pd->rt.fv) {
- runtime_deltimer(&pd->rt);
- pd->rt.fv = nil;
- }
- if(pd->wt.fv) {
- runtime_deltimer(&pd->wt);
- pd->wt.fv = nil;
- }
- runtime_unlock(pd);
- if(rg)
- runtime_ready(rg);
- if(wg)
- runtime_ready(wg);
-}
-
-uintptr
-runtime_netpollfd(PollDesc *pd)
-{
- return pd->fd;
-}
-
-void**
-runtime_netpolluser(PollDesc *pd)
-{
- return &pd->user;
-}
-
-bool
-runtime_netpollclosing(PollDesc *pd)
-{
- return pd->closing;
-}
-
-void
-runtime_netpolllock(PollDesc *pd)
-{
- runtime_lock(pd);
-}
-
-void
-runtime_netpollunlock(PollDesc *pd)
-{
- runtime_unlock(pd);
-}
-
-// make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
-void
-runtime_netpollready(G **gpp, PollDesc *pd, int32 mode)
-{
- G *rg, *wg;
-
- rg = wg = nil;
- if(mode == 'r' || mode == 'r'+'w')
- rg = netpollunblock(pd, 'r', true);
- if(mode == 'w' || mode == 'r'+'w')
- wg = netpollunblock(pd, 'w', true);
- if(rg) {
- rg->schedlink = *gpp;
- *gpp = rg;
- }
- if(wg) {
- wg->schedlink = *gpp;
- *gpp = wg;
- }
-}
-
-static intgo
-checkerr(PollDesc *pd, int32 mode)
-{
- if(pd->closing)
- return 1; // errClosing
- if((mode == 'r' && pd->rd < 0) || (mode == 'w' && pd->wd < 0))
- return 2; // errTimeout
- return 0;
-}
-
-static bool
-blockcommit(G *gp, G **gpp)
-{
- return runtime_casp(gpp, WAIT, gp);
-}
-
-// returns true if IO is ready, or false if timedout or closed
-// waitio - wait only for completed IO, ignore errors
-static bool
-netpollblock(PollDesc *pd, int32 mode, bool waitio)
-{
- G **gpp, *old;
-
- gpp = &pd->rg;
- if(mode == 'w')
- gpp = &pd->wg;
-
- // set the gpp semaphore to WAIT
- for(;;) {
- old = *gpp;
- if(old == READY) {
- *gpp = nil;
- return true;
- }
- if(old != nil)
- runtime_throw("netpollblock: double wait");
- if(runtime_casp(gpp, nil, WAIT))
- break;
- }
-
- // need to recheck error states after setting gpp to WAIT
- // this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
- // do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
- if(waitio || checkerr(pd, mode) == 0)
- runtime_park((bool(*)(G*, void*))blockcommit, gpp, "IO wait");
- // be careful to not lose concurrent READY notification
- old = runtime_xchgp(gpp, nil);
- if(old > WAIT)
- runtime_throw("netpollblock: corrupted state");
- return old == READY;
-}
-
-static G*
-netpollunblock(PollDesc *pd, int32 mode, bool ioready)
-{
- G **gpp, *old, *new;
-
- gpp = &pd->rg;
- if(mode == 'w')
- gpp = &pd->wg;
-
- for(;;) {
- old = *gpp;
- if(old == READY)
- return nil;
- if(old == nil && !ioready) {
- // Only set READY for ioready. runtime_pollWait
- // will check for timeout/cancel before waiting.
- return nil;
- }
- new = nil;
- if(ioready)
- new = READY;
- if(runtime_casp(gpp, old, new))
- break;
- }
- if(old > WAIT)
- return old; // must be G*
- return nil;
-}
-
-static void
-deadlineimpl(Eface arg, uintptr seq, bool read, bool write)
-{
- PollDesc *pd;
- G *rg, *wg;
-
- pd = (PollDesc*)arg.data;
- rg = wg = nil;
- runtime_lock(pd);
- // Seq arg is seq when the timer was set.
- // If it's stale, ignore the timer event.
- if(seq != pd->seq) {
- // The descriptor was reused or timers were reset.
- runtime_unlock(pd);
- return;
- }
- if(read) {
- if(pd->rd <= 0 || pd->rt.fv == nil)
- runtime_throw("deadlineimpl: inconsistent read deadline");
- pd->rd = -1;
- runtime_atomicstorep(&pd->rt.fv, nil); // full memory barrier between store to rd and load of rg in netpollunblock
- rg = netpollunblock(pd, 'r', false);
- }
- if(write) {
- if(pd->wd <= 0 || (pd->wt.fv == nil && !read))
- runtime_throw("deadlineimpl: inconsistent write deadline");
- pd->wd = -1;
- runtime_atomicstorep(&pd->wt.fv, nil); // full memory barrier between store to wd and load of wg in netpollunblock
- wg = netpollunblock(pd, 'w', false);
- }
- runtime_unlock(pd);
- if(rg)
- runtime_ready(rg);
- if(wg)
- runtime_ready(wg);
-}
-
-static void
-deadline(Eface arg, uintptr seq)
-{
- deadlineimpl(arg, seq, true, true);
-}
-
-static void
-readDeadline(Eface arg, uintptr seq)
-{
- deadlineimpl(arg, seq, true, false);
-}
-
-static void
-writeDeadline(Eface arg, uintptr seq)
-{
- deadlineimpl(arg, seq, false, true);
-}
-
-static PollDesc*
-allocPollDesc(void)
-{
- PollDesc *pd;
- uint32 i, n;
-
- runtime_lock(&pollcache);
- if(pollcache.first == nil) {
- n = PollBlockSize/sizeof(*pd);
- if(n == 0)
- n = 1;
- // Must be in non-GC memory because can be referenced
- // only from epoll/kqueue internals.
- pd = runtime_persistentalloc(n*sizeof(*pd), 0, &mstats.other_sys);
- for(i = 0; i < n; i++) {
- pd[i].link = pollcache.first;
- pollcache.first = &pd[i];
- }
- }
- pd = pollcache.first;
- pollcache.first = pd->link;
- runtime_unlock(&pollcache);
- return pd;
-}
diff --git a/libgo/runtime/netpoll_epoll.c b/libgo/runtime/netpoll_epoll.c
deleted file mode 100644
index 1281f45b08..0000000000
--- a/libgo/runtime/netpoll_epoll.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-
-#include <errno.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/epoll.h>
-
-#include "runtime.h"
-#include "defs.h"
-#include "malloc.h"
-
-#ifndef EPOLLRDHUP
-#define EPOLLRDHUP 0x2000
-#endif
-
-#ifndef EPOLL_CLOEXEC
-#define EPOLL_CLOEXEC 02000000
-#endif
-
-#ifndef HAVE_EPOLL_CREATE1
-extern int epoll_create1(int __flags);
-#endif
-
-typedef struct epoll_event EpollEvent;
-
-static int32
-runtime_epollcreate(int32 size)
-{
- int r;
-
- r = epoll_create(size);
- if(r >= 0)
- return r;
- return - errno;
-}
-
-static int32
-runtime_epollcreate1(int32 flags)
-{
- int r;
-
- r = epoll_create1(flags);
- if(r >= 0)
- return r;
- return - errno;
-}
-
-static int32
-runtime_epollctl(int32 epfd, int32 op, int32 fd, EpollEvent *ev)
-{
- int r;
-
- r = epoll_ctl(epfd, op, fd, ev);
- if(r >= 0)
- return r;
- return - errno;
-}
-
-static int32
-runtime_epollwait(int32 epfd, EpollEvent *ev, int32 nev, int32 timeout)
-{
- int r;
-
- r = epoll_wait(epfd, ev, nev, timeout);
- if(r >= 0)
- return r;
- return - errno;
-}
-
-static void
-runtime_closeonexec(int32 fd)
-{
- fcntl(fd, F_SETFD, FD_CLOEXEC);
-}
-
-static int32 epfd = -1; // epoll descriptor
-
-void
-runtime_netpollinit(void)
-{
- epfd = runtime_epollcreate1(EPOLL_CLOEXEC);
- if(epfd >= 0)
- return;
- epfd = runtime_epollcreate(1024);
- if(epfd >= 0) {
- runtime_closeonexec(epfd);
- return;
- }
- runtime_printf("netpollinit: failed to create descriptor (%d)\n", -epfd);
- runtime_throw("netpollinit: failed to create descriptor");
-}
-
-int32
-runtime_netpollopen(uintptr fd, PollDesc *pd)
-{
- EpollEvent ev;
- int32 res;
-
- ev.events = EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLET;
- ev.data.ptr = (void*)pd;
- res = runtime_epollctl(epfd, EPOLL_CTL_ADD, (int32)fd, &ev);
- return -res;
-}
-
-int32
-runtime_netpollclose(uintptr fd)
-{
- EpollEvent ev;
- int32 res;
-
- res = runtime_epollctl(epfd, EPOLL_CTL_DEL, (int32)fd, &ev);
- return -res;
-}
-
-void
-runtime_netpollarm(PollDesc* pd, int32 mode)
-{
- USED(pd);
- USED(mode);
- runtime_throw("unused");
-}
-
-// polls for ready network connections
-// returns list of goroutines that become runnable
-G*
-runtime_netpoll(bool block)
-{
- static int32 lasterr;
- EpollEvent events[128], *ev;
- int32 n, i, waitms, mode;
- G *gp;
-
- if(epfd == -1)
- return nil;
- waitms = -1;
- if(!block)
- waitms = 0;
-retry:
- n = runtime_epollwait(epfd, events, nelem(events), waitms);
- if(n < 0) {
- if(n != -EINTR && n != lasterr) {
- lasterr = n;
- runtime_printf("runtime: epollwait on fd %d failed with %d\n", epfd, -n);
- }
- goto retry;
- }
- gp = nil;
- for(i = 0; i < n; i++) {
- ev = &events[i];
- if(ev->events == 0)
- continue;
- mode = 0;
- if(ev->events & (EPOLLIN|EPOLLRDHUP|EPOLLHUP|EPOLLERR))
- mode += 'r';
- if(ev->events & (EPOLLOUT|EPOLLHUP|EPOLLERR))
- mode += 'w';
- if(mode)
- runtime_netpollready(&gp, (void*)ev->data.ptr, mode);
- }
- if(block && gp == nil)
- goto retry;
- return gp;
-}
-
-void
-runtime_netpoll_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj))
-{
- USED(wbufp);
- USED(enqueue1);
-}
diff --git a/libgo/runtime/netpoll_kqueue.c b/libgo/runtime/netpoll_kqueue.c
deleted file mode 100644
index 5144a870fb..0000000000
--- a/libgo/runtime/netpoll_kqueue.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd netbsd openbsd
-
-#include "runtime.h"
-#include "defs.h"
-#include "malloc.h"
-
-// Integrated network poller (kqueue-based implementation).
-
-int32 runtime_kqueue(void);
-int32 runtime_kevent(int32, Kevent*, int32, Kevent*, int32, Timespec*);
-void runtime_closeonexec(int32);
-
-static int32 kq = -1;
-
-void
-runtime_netpollinit(void)
-{
- kq = runtime_kqueue();
- if(kq < 0) {
- runtime_printf("netpollinit: kqueue failed with %d\n", -kq);
- runtime_throw("netpollinit: kqueue failed");
- }
- runtime_closeonexec(kq);
-}
-
-int32
-runtime_netpollopen(uintptr fd, PollDesc *pd)
-{
- Kevent ev[2];
- int32 n;
-
- // Arm both EVFILT_READ and EVFILT_WRITE in edge-triggered mode (EV_CLEAR)
- // for the whole fd lifetime. The notifications are automatically unregistered
- // when fd is closed.
- ev[0].ident = (uint32)fd;
- ev[0].filter = EVFILT_READ;
- ev[0].flags = EV_ADD|EV_CLEAR;
- ev[0].fflags = 0;
- ev[0].data = 0;
- ev[0].udata = (kevent_udata)pd;
- ev[1] = ev[0];
- ev[1].filter = EVFILT_WRITE;
- n = runtime_kevent(kq, ev, 2, nil, 0, nil);
- if(n < 0)
- return -n;
- return 0;
-}
-
-int32
-runtime_netpollclose(uintptr fd)
-{
- // Don't need to unregister because calling close()
- // on fd will remove any kevents that reference the descriptor.
- USED(fd);
- return 0;
-}
-
-void
-runtime_netpollarm(PollDesc* pd, int32 mode)
-{
- USED(pd, mode);
- runtime_throw("unused");
-}
-
-// Polls for ready network connections.
-// Returns list of goroutines that become runnable.
-G*
-runtime_netpoll(bool block)
-{
- static int32 lasterr;
- Kevent events[64], *ev;
- Timespec ts, *tp;
- int32 n, i, mode;
- G *gp;
-
- if(kq == -1)
- return nil;
- tp = nil;
- if(!block) {
- ts.tv_sec = 0;
- ts.tv_nsec = 0;
- tp = &ts;
- }
- gp = nil;
-retry:
- n = runtime_kevent(kq, nil, 0, events, nelem(events), tp);
- if(n < 0) {
- if(n != -EINTR && n != lasterr) {
- lasterr = n;
- runtime_printf("runtime: kevent on fd %d failed with %d\n", kq, -n);
- }
- goto retry;
- }
- for(i = 0; i < n; i++) {
- ev = &events[i];
- mode = 0;
- if(ev->filter == EVFILT_READ)
- mode += 'r';
- if(ev->filter == EVFILT_WRITE)
- mode += 'w';
- if(mode)
- runtime_netpollready(&gp, (PollDesc*)ev->udata, mode);
- }
- if(block && gp == nil)
- goto retry;
- return gp;
-}
-
-void
-runtime_netpoll_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj))
-{
- USED(wbufp);
- USED(enqueue1);
-}
diff --git a/libgo/runtime/netpoll_select.c b/libgo/runtime/netpoll_select.c
deleted file mode 100644
index 033661d17f..0000000000
--- a/libgo/runtime/netpoll_select.c
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build solaris
-
-#include "config.h"
-
-#include <errno.h>
-#include <sys/times.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <fcntl.h>
-
-#ifdef HAVE_SYS_SELECT_H
-#include <sys/select.h>
-#endif
-
-#include "runtime.h"
-#include "malloc.h"
-
-static Lock selectlock;
-static int rdwake;
-static int wrwake;
-static fd_set fds;
-static PollDesc **data;
-static int allocated;
-
-void
-runtime_netpollinit(void)
-{
- int p[2];
- int fl;
-
- FD_ZERO(&fds);
- allocated = 128;
- data = runtime_mallocgc(allocated * sizeof(PollDesc *), 0,
- FlagNoScan|FlagNoProfiling|FlagNoInvokeGC);
-
- if(pipe(p) < 0)
- runtime_throw("netpollinit: failed to create pipe");
- rdwake = p[0];
- wrwake = p[1];
-
- fl = fcntl(rdwake, F_GETFL);
- if(fl < 0)
- runtime_throw("netpollinit: fcntl failed");
- fl |= O_NONBLOCK;
- if(fcntl(rdwake, F_SETFL, fl))
- runtime_throw("netpollinit: fcntl failed");
- fcntl(rdwake, F_SETFD, FD_CLOEXEC);
-
- fl = fcntl(wrwake, F_GETFL);
- if(fl < 0)
- runtime_throw("netpollinit: fcntl failed");
- fl |= O_NONBLOCK;
- if(fcntl(wrwake, F_SETFL, fl))
- runtime_throw("netpollinit: fcntl failed");
- fcntl(wrwake, F_SETFD, FD_CLOEXEC);
-
- FD_SET(rdwake, &fds);
-}
-
-int32
-runtime_netpollopen(uintptr fd, PollDesc *pd)
-{
- byte b;
-
- runtime_lock(&selectlock);
-
- if((int)fd >= allocated) {
- int c;
- PollDesc **n;
-
- c = allocated;
-
- runtime_unlock(&selectlock);
-
- while((int)fd >= c)
- c *= 2;
- n = runtime_mallocgc(c * sizeof(PollDesc *), 0,
- FlagNoScan|FlagNoProfiling|FlagNoInvokeGC);
-
- runtime_lock(&selectlock);
-
- if(c > allocated) {
- __builtin_memcpy(n, data, allocated * sizeof(PollDesc *));
- allocated = c;
- data = n;
- }
- }
- FD_SET(fd, &fds);
- data[fd] = pd;
-
- runtime_unlock(&selectlock);
-
- b = 0;
- write(wrwake, &b, sizeof b);
-
- return 0;
-}
-
-int32
-runtime_netpollclose(uintptr fd)
-{
- byte b;
-
- runtime_lock(&selectlock);
-
- FD_CLR(fd, &fds);
- data[fd] = nil;
-
- runtime_unlock(&selectlock);
-
- b = 0;
- write(wrwake, &b, sizeof b);
-
- return 0;
-}
-
-/* Used to avoid using too much stack memory. */
-static bool inuse;
-static fd_set grfds, gwfds, gefds, gtfds;
-
-G*
-runtime_netpoll(bool block)
-{
- fd_set *prfds, *pwfds, *pefds, *ptfds;
- bool allocatedfds;
- struct timeval timeout;
- struct timeval *pt;
- int max, c, i;
- G *gp;
- int32 mode;
- byte b;
- struct stat st;
-
- allocatedfds = false;
-
- retry:
- runtime_lock(&selectlock);
-
- max = allocated;
-
- if(max == 0) {
- runtime_unlock(&selectlock);
- return nil;
- }
-
- if(inuse) {
- if(!allocatedfds) {
- prfds = runtime_SysAlloc(4 * sizeof fds, &mstats.other_sys);
- pwfds = prfds + 1;
- pefds = pwfds + 1;
- ptfds = pefds + 1;
- allocatedfds = true;
- }
- } else {
- prfds = &grfds;
- pwfds = &gwfds;
- pefds = &gefds;
- ptfds = &gtfds;
- inuse = true;
- allocatedfds = false;
- }
-
- __builtin_memcpy(prfds, &fds, sizeof fds);
-
- runtime_unlock(&selectlock);
-
- __builtin_memcpy(pwfds, prfds, sizeof fds);
- FD_CLR(rdwake, pwfds);
- __builtin_memcpy(pefds, pwfds, sizeof fds);
-
- __builtin_memcpy(ptfds, pwfds, sizeof fds);
-
- __builtin_memset(&timeout, 0, sizeof timeout);
- pt = &timeout;
- if(block)
- pt = nil;
-
- c = select(max, prfds, pwfds, pefds, pt);
- if(c < 0) {
- if(errno == EBADF) {
- // Some file descriptor has been closed.
- // Check each one, and treat each closed
- // descriptor as ready for read/write.
- c = 0;
- FD_ZERO(prfds);
- FD_ZERO(pwfds);
- FD_ZERO(pefds);
- for(i = 0; i < max; i++) {
- if(FD_ISSET(i, ptfds)
- && fstat(i, &st) < 0
- && errno == EBADF) {
- FD_SET(i, prfds);
- FD_SET(i, pwfds);
- c += 2;
- }
- }
- }
- else {
- if(errno != EINTR)
- runtime_printf("runtime: select failed with %d\n", errno);
- goto retry;
- }
- }
- gp = nil;
- for(i = 0; i < max && c > 0; i++) {
- mode = 0;
- if(FD_ISSET(i, prfds)) {
- mode += 'r';
- --c;
- }
- if(FD_ISSET(i, pwfds)) {
- mode += 'w';
- --c;
- }
- if(FD_ISSET(i, pefds)) {
- mode = 'r' + 'w';
- --c;
- }
- if(i == rdwake && mode != 0) {
- while(read(rdwake, &b, sizeof b) > 0)
- ;
- continue;
- }
- if(mode) {
- PollDesc *pd;
-
- runtime_lock(&selectlock);
- pd = data[i];
- runtime_unlock(&selectlock);
- if(pd != nil)
- runtime_netpollready(&gp, pd, mode);
- }
- }
- if(block && gp == nil)
- goto retry;
-
- if(allocatedfds) {
- runtime_SysFree(prfds, 4 * sizeof fds, &mstats.other_sys);
- } else {
- runtime_lock(&selectlock);
- inuse = false;
- runtime_unlock(&selectlock);
- }
-
- return gp;
-}
-
-void
-runtime_netpoll_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj))
-{
- enqueue1(wbufp, (Obj){(byte*)&data, sizeof data, 0});
-}
diff --git a/libgo/runtime/netpoll_stub.c b/libgo/runtime/netpoll_stub.c
deleted file mode 100644
index 468a610f6f..0000000000
--- a/libgo/runtime/netpoll_stub.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build plan9
-
-#include "runtime.h"
-#include "malloc.h"
-
-// Polls for ready network connections.
-// Returns list of goroutines that become runnable.
-G*
-runtime_netpoll(bool block)
-{
- // Implementation for platforms that do not support
- // integrated network poller.
- USED(block);
- return nil;
-}
-
-void
-runtime_netpoll_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj))
-{
- USED(wbufp);
- USED(addroot);
-}
diff --git a/libgo/runtime/panic.c b/libgo/runtime/panic.c
index de000db988..493fde8932 100644
--- a/libgo/runtime/panic.c
+++ b/libgo/runtime/panic.c
@@ -3,196 +3,14 @@
// license that can be found in the LICENSE file.
#include "runtime.h"
-#include "malloc.h"
-#include "go-defer.h"
-#include "go-panic.h"
-// Code related to defer, panic and recover.
-
-uint32 runtime_panicking;
-static Lock paniclk;
-
-// Allocate a Defer, usually using per-P pool.
-// Each defer must be released with freedefer.
-Defer*
-runtime_newdefer()
-{
- Defer *d;
- P *p;
-
- d = nil;
- p = runtime_m()->p;
- d = p->deferpool;
- if(d)
- p->deferpool = d->__next;
- if(d == nil) {
- // deferpool is empty
- d = runtime_malloc(sizeof(Defer));
- }
- return d;
-}
-
-// Free the given defer.
-// The defer cannot be used after this call.
-void
-runtime_freedefer(Defer *d)
-{
- P *p;
-
- if(d->__special)
- return;
- p = runtime_m()->p;
- d->__next = p->deferpool;
- p->deferpool = d;
- // No need to wipe out pointers in argp/pc/fn/args,
- // because we empty the pool before GC.
-}
-
-// Run all deferred functions for the current goroutine.
-// This is noinline for go_can_recover.
-static void __go_rundefer (void) __attribute__ ((noinline));
-static void
-__go_rundefer(void)
-{
- G *g;
- Defer *d;
-
- g = runtime_g();
- while((d = g->defer) != nil) {
- void (*pfn)(void*);
-
- g->defer = d->__next;
- pfn = d->__pfn;
- d->__pfn = nil;
- if (pfn != nil)
- (*pfn)(d->__arg);
- runtime_freedefer(d);
- }
-}
-
-void
-runtime_startpanic(void)
-{
- M *m;
-
- m = runtime_m();
- if(runtime_mheap.cachealloc.size == 0) { // very early
- runtime_printf("runtime: panic before malloc heap initialized\n");
- m->mallocing = 1; // tell rest of panic not to try to malloc
- } else if(m->mcache == nil) // can happen if called from signal handler or throw
- m->mcache = runtime_allocmcache();
- switch(m->dying) {
- case 0:
- m->dying = 1;
- if(runtime_g() != nil)
- runtime_g()->writebuf = nil;
- runtime_xadd(&runtime_panicking, 1);
- runtime_lock(&paniclk);
- if(runtime_debug.schedtrace > 0 || runtime_debug.scheddetail > 0)
- runtime_schedtrace(true);
- runtime_freezetheworld();
- return;
- case 1:
- // Something failed while panicing, probably the print of the
- // argument to panic(). Just print a stack trace and exit.
- m->dying = 2;
- runtime_printf("panic during panic\n");
- runtime_dopanic(0);
- runtime_exit(3);
- case 2:
- // This is a genuine bug in the runtime, we couldn't even
- // print the stack trace successfully.
- m->dying = 3;
- runtime_printf("stack trace unavailable\n");
- runtime_exit(4);
- default:
- // Can't even print! Just exit.
- runtime_exit(5);
- }
-}
-
-void
-runtime_dopanic(int32 unused __attribute__ ((unused)))
-{
- G *g;
- static bool didothers;
- bool crash;
- int32 t;
-
- g = runtime_g();
- if(g->sig != 0)
- runtime_printf("[signal %x code=%p addr=%p]\n",
- g->sig, (void*)g->sigcode0, (void*)g->sigcode1);
-
- if((t = runtime_gotraceback(&crash)) > 0){
- if(g != runtime_m()->g0) {
- runtime_printf("\n");
- runtime_goroutineheader(g);
- runtime_traceback();
- runtime_printcreatedby(g);
- } else if(t >= 2 || runtime_m()->throwing > 0) {
- runtime_printf("\nruntime stack:\n");
- runtime_traceback();
- }
- if(!didothers) {
- didothers = true;
- runtime_tracebackothers(g);
- }
- }
- runtime_unlock(&paniclk);
- if(runtime_xadd(&runtime_panicking, -1) != 0) {
- // Some other m is panicking too.
- // Let it print what it needs to print.
- // Wait forever without chewing up cpu.
- // It will exit when it's done.
- static Lock deadlock;
- runtime_lock(&deadlock);
- runtime_lock(&deadlock);
- }
-
- if(crash)
- runtime_crash();
-
- runtime_exit(2);
-}
-
-bool
-runtime_canpanic(G *gp)
-{
- M *m = runtime_m();
- byte g;
-
- USED(&g); // don't use global g, it points to gsignal
-
- // Is it okay for gp to panic instead of crashing the program?
- // Yes, as long as it is running Go code, not runtime code,
- // and not stuck in a system call.
- if(gp == nil || gp != m->curg)
- return false;
- if(m->locks-m->softfloat != 0 || m->mallocing != 0 || m->throwing != 0 || m->gcing != 0 || m->dying != 0)
- return false;
- if(gp->status != Grunning)
- return false;
-#ifdef GOOS_windows
- if(m->libcallsp != 0)
- return false;
-#endif
- return true;
-}
+extern void gothrow(String) __attribute__((noreturn));
+extern void gothrow(String) __asm__(GOSYM_PREFIX "runtime.throw");
void
runtime_throw(const char *s)
{
- M *mp;
-
- mp = runtime_m();
- if(mp->throwing == 0)
- mp->throwing = 1;
- runtime_startpanic();
- runtime_printf("fatal error: %s\n", s);
- runtime_dopanic(0);
- *(int32*)0 = 0; // not reached
- runtime_exit(1); // even more not reached
+ gothrow(runtime_gostringnocopy((const byte *)s));
}
void
@@ -215,18 +33,3 @@ runtime_panicstring(const char *s)
runtime_newErrorCString(s, &err);
runtime_panic(err);
}
-
-void runtime_Goexit (void) __asm__ (GOSYM_PREFIX "runtime.Goexit");
-
-void
-runtime_Goexit(void)
-{
- __go_rundefer();
- runtime_goexit();
-}
-
-void
-runtime_panicdivide(void)
-{
- runtime_panicstring("integer divide by zero");
-}
diff --git a/libgo/runtime/parfor.c b/libgo/runtime/parfor.c
index ede921b9aa..d64d74ccd3 100644
--- a/libgo/runtime/parfor.c
+++ b/libgo/runtime/parfor.c
@@ -5,12 +5,13 @@
// Parallel for algorithm.
#include "runtime.h"
+#include "malloc.h"
#include "arch.h"
struct ParForThread
{
// the thread's iteration space [32lsb, 32msb)
- uint64 pos;
+ uint64 pos __attribute__((aligned(8)));
// stats
uint64 nsteal;
uint64 nstealcnt;
@@ -27,7 +28,7 @@ runtime_parforalloc(uint32 nthrmax)
// The ParFor object is followed by CacheLineSize padding
// and then nthrmax ParForThread.
- desc = (ParFor*)runtime_malloc(sizeof(ParFor) + CacheLineSize + nthrmax * sizeof(ParForThread));
+ desc = (ParFor*)runtime_mallocgc(sizeof(ParFor) + CacheLineSize + nthrmax * sizeof(ParForThread), 0, FlagNoInvokeGC);
desc->thr = (ParForThread*)((byte*)(desc+1) + CacheLineSize);
desc->nthrmax = nthrmax;
return desc;
@@ -125,7 +126,7 @@ runtime_parfordo(ParFor *desc)
goto exit;
}
// Choose a random victim for stealing.
- victim = runtime_fastrand1() % (desc->nthr-1);
+ victim = runtime_fastrand() % (desc->nthr-1);
if(victim >= tid)
victim++;
victimpos = &desc->thr[victim].pos;
diff --git a/libgo/runtime/print.c b/libgo/runtime/print.c
index 69b1f81fb4..4da879620c 100644
--- a/libgo/runtime/print.c
+++ b/libgo/runtime/print.c
@@ -9,58 +9,60 @@
#include "array.h"
#include "go-type.h"
-//static Lock debuglock;
+extern void runtime_printlock(void)
+ __asm__(GOSYM_PREFIX "runtime.printlock");
+extern void runtime_printunlock(void)
+ __asm__(GOSYM_PREFIX "runtime.printunlock");
+extern void gwrite(Slice)
+ __asm__(GOSYM_PREFIX "runtime.gwrite");
+extern void runtime_printint(int64)
+ __asm__(GOSYM_PREFIX "runtime.printint");
+extern void runtime_printuint(uint64)
+ __asm__(GOSYM_PREFIX "runtime.printuint");
+extern void runtime_printhex(uint64)
+ __asm__(GOSYM_PREFIX "runtime.printhex");
+extern void runtime_printfloat(float64)
+ __asm__(GOSYM_PREFIX "runtime.printfloat");
+extern void runtime_printcomplex(complex double)
+ __asm__(GOSYM_PREFIX "runtime.printcomplex");
+extern void runtime_printbool(_Bool)
+ __asm__(GOSYM_PREFIX "runtime.printbool");
+extern void runtime_printstring(String)
+ __asm__(GOSYM_PREFIX "runtime.printstring");
+extern void runtime_printpointer(void *)
+ __asm__(GOSYM_PREFIX "runtime.printpointer");
+extern void runtime_printslice(Slice)
+ __asm__(GOSYM_PREFIX "runtime.printslice");
+extern void runtime_printeface(Eface)
+ __asm__(GOSYM_PREFIX "runtime.printeface");
+extern void runtime_printiface(Iface)
+ __asm__(GOSYM_PREFIX "runtime.printiface");
// Clang requires this function to not be inlined (see below).
static void go_vprintf(const char*, va_list)
__attribute__((noinline));
-// write to goroutine-local buffer if diverting output,
-// or else standard error.
static void
-gwrite(const void *v, intgo n)
+runtime_prints(const char *s)
{
- G* g = runtime_g();
-
- if(g == nil || g->writebuf == nil) {
- // Avoid -D_FORTIFY_SOURCE problems.
- int rv __attribute__((unused));
-
- rv = runtime_write(2, v, n);
- return;
- }
-
- if(g->writenbuf == 0)
- return;
+ Slice sl;
- if(n > g->writenbuf)
- n = g->writenbuf;
- runtime_memmove(g->writebuf, v, n);
- g->writebuf += n;
- g->writenbuf -= n;
+ // Use memcpy to avoid const-cast warning.
+ memcpy(&sl.__values, &s, sizeof(char*));
+ sl.__count = runtime_findnull((const byte*)s);
+ sl.__capacity = sl.__count;
+ gwrite(sl);
}
-void
-runtime_dump(byte *p, int32 n)
+static void
+runtime_printbyte(int8 c)
{
- int32 i;
-
- for(i=0; i<n; i++) {
- runtime_printpointer((byte*)(uintptr)(p[i]>>4));
- runtime_printpointer((byte*)(uintptr)(p[i]&0xf));
- if((i&15) == 15)
- runtime_prints("\n");
- else
- runtime_prints(" ");
- }
- if(n & 15)
- runtime_prints("\n");
-}
+ Slice sl;
-void
-runtime_prints(const char *s)
-{
- gwrite(s, runtime_findnull((const byte*)s));
+ sl.__values = &c;
+ sl.__count = 1;
+ sl.__capacity = 1;
+ gwrite(sl);
}
#if defined (__clang__) && (defined (__i386__) || defined (__x86_64__))
@@ -104,15 +106,17 @@ runtime_snprintf(byte *buf, int32 n, const char *s, ...)
va_list va;
int32 m;
- g->writebuf = buf;
- g->writenbuf = n-1;
+ g->writebuf.__values = buf;
+ g->writebuf.__count = 0;
+ g->writebuf.__capacity = n-1;
va_start(va, s);
go_vprintf(s, va);
va_end(va);
- *g->writebuf = '\0';
- m = g->writebuf - buf;
- g->writenbuf = 0;
- g->writebuf = nil;
+ m = g->writebuf.__count;
+ ((byte*)g->writebuf.__values)[m] = '\0';
+ g->writebuf.__values = nil;
+ g->writebuf.__count = 0;
+ g->writebuf.__capacity = 0;
return m;
}
@@ -122,15 +126,21 @@ static void
go_vprintf(const char *s, va_list va)
{
const char *p, *lp;
+ Slice sl;
- //runtime_lock(&debuglock);
+ runtime_printlock();
lp = p = s;
for(; *p; p++) {
if(*p != '%')
continue;
- if(p > lp)
- gwrite(lp, p-lp);
+ if(p > lp) {
+ // Use memcpy to avoid const-cast warning.
+ memcpy(&sl.__values, &lp, sizeof(char*));
+ sl.__count = p - lp;
+ sl.__capacity = p - lp;
+ gwrite(sl);
+ }
p++;
switch(*p) {
case 'a':
@@ -181,192 +191,13 @@ go_vprintf(const char *s, va_list va)
}
lp = p+1;
}
- if(p > lp)
- gwrite(lp, p-lp);
-
- //runtime_unlock(&debuglock);
-}
-
-void
-runtime_printpc(void *p __attribute__ ((unused)))
-{
- runtime_prints("PC=");
- runtime_printhex((uint64)(uintptr)runtime_getcallerpc(p));
-}
-
-void
-runtime_printbool(_Bool v)
-{
- if(v) {
- gwrite("true", 4);
- return;
- }
- gwrite("false", 5);
-}
-
-void
-runtime_printbyte(int8 c)
-{
- gwrite(&c, 1);
-}
-
-void
-runtime_printfloat(double v)
-{
- byte buf[20];
- int32 e, s, i, n;
- float64 h;
-
- if(ISNAN(v)) {
- gwrite("NaN", 3);
- return;
- }
- if(isinf(v)) {
- if(signbit(v)) {
- gwrite("-Inf", 4);
- } else {
- gwrite("+Inf", 4);
- }
- return;
+ if(p > lp) {
+ // Use memcpy to avoid const-cast warning.
+ memcpy(&sl.__values, &lp, sizeof(char*));
+ sl.__count = p - lp;
+ sl.__capacity = p - lp;
+ gwrite(sl);
}
- n = 7; // digits printed
- e = 0; // exp
- s = 0; // sign
- if(v == 0) {
- if(isinf(1/v) && 1/v < 0)
- s = 1;
- } else {
- // sign
- if(v < 0) {
- v = -v;
- s = 1;
- }
-
- // normalize
- while(v >= 10) {
- e++;
- v /= 10;
- }
- while(v < 1) {
- e--;
- v *= 10;
- }
-
- // round
- h = 5;
- for(i=0; i<n; i++)
- h /= 10;
-
- v += h;
- if(v >= 10) {
- e++;
- v /= 10;
- }
- }
-
- // format +d.dddd+edd
- buf[0] = '+';
- if(s)
- buf[0] = '-';
- for(i=0; i<n; i++) {
- s = v;
- buf[i+2] = s+'0';
- v -= s;
- v *= 10.;
- }
- buf[1] = buf[2];
- buf[2] = '.';
-
- buf[n+2] = 'e';
- buf[n+3] = '+';
- if(e < 0) {
- e = -e;
- buf[n+3] = '-';
- }
-
- buf[n+4] = (e/100) + '0';
- buf[n+5] = (e/10)%10 + '0';
- buf[n+6] = (e%10) + '0';
- gwrite(buf, n+7);
-}
-
-void
-runtime_printcomplex(complex double v)
-{
- gwrite("(", 1);
- runtime_printfloat(creal(v));
- runtime_printfloat(cimag(v));
- gwrite("i)", 2);
-}
-
-void
-runtime_printuint(uint64 v)
-{
- byte buf[100];
- int32 i;
-
- for(i=nelem(buf)-1; i>0; i--) {
- buf[i] = v%10 + '0';
- if(v < 10)
- break;
- v = v/10;
- }
- gwrite(buf+i, nelem(buf)-i);
-}
-
-void
-runtime_printint(int64 v)
-{
- if(v < 0) {
- gwrite("-", 1);
- v = -v;
- }
- runtime_printuint(v);
-}
-
-void
-runtime_printhex(uint64 v)
-{
- static const char *dig = "0123456789abcdef";
- byte buf[100];
- int32 i;
-
- i=nelem(buf);
- for(; v>0; v/=16)
- buf[--i] = dig[v%16];
- if(i == nelem(buf))
- buf[--i] = '0';
- buf[--i] = 'x';
- buf[--i] = '0';
- gwrite(buf+i, nelem(buf)-i);
-}
-
-void
-runtime_printpointer(void *p)
-{
- runtime_printhex((uintptr)p);
-}
-
-void
-runtime_printstring(String v)
-{
- // if(v.len > runtime_maxstring) {
- // gwrite("[string too long]", 17);
- // return;
- // }
- if(v.len > 0)
- gwrite(v.str, v.len);
-}
-
-void
-__go_print_space(void)
-{
- gwrite(" ", 1);
-}
-
-void
-__go_print_nl(void)
-{
- gwrite("\n", 1);
+ runtime_printunlock();
}
diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c
index c6ac972bd4..06a9c2ad6b 100644
--- a/libgo/runtime/proc.c
+++ b/libgo/runtime/proc.c
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+#include <errno.h>
#include <limits.h>
#include <signal.h>
#include <stdlib.h>
@@ -19,7 +20,6 @@
#include "defs.h"
#include "malloc.h"
#include "go-type.h"
-#include "go-defer.h"
#ifdef USING_SPLIT_STACK
@@ -62,7 +62,6 @@ static void gtraceback(G*);
#endif
static __thread G *g;
-static __thread M *m;
#ifndef SETCONTEXT_CLOBBERS_TLS
@@ -158,6 +157,26 @@ fixcontext(ucontext_t *c)
#endif
+// ucontext_arg returns a properly aligned ucontext_t value. On some
+// systems a ucontext_t value must be aligned to a 16-byte boundary.
+// The g structure that has fields of type ucontext_t is defined in
+// Go, and Go has no simple way to align a field to such a boundary.
+// So we make the field larger in runtime2.go and pick an appropriate
+// offset within the field here.
+static ucontext_t*
+ucontext_arg(void** go_ucontext)
+{
+ uintptr_t p = (uintptr_t)go_ucontext;
+ size_t align = __alignof__(ucontext_t);
+ if(align > 16) {
+ // We only ensured space for up to a 16 byte alignment
+ // in libgo/go/runtime/runtime2.go.
+ runtime_throw("required alignment of ucontext_t too large");
+ }
+ p = (p + align - 1) &~ (uintptr_t)(align - 1);
+ return (ucontext_t*)p;
+}
+
// We can not always refer to the TLS variables directly. The
// compiler will call tls_get_addr to get the address of the variable,
// and it may hold it in a register across a call to schedule. When
@@ -179,14 +198,15 @@ M* runtime_m(void) __attribute__ ((noinline, no_split_stack));
M*
runtime_m(void)
{
- return m;
+ if(g == nil)
+ return nil;
+ return g->m;
}
-// Set m and g.
+// Set g.
void
-runtime_setmg(M* mp, G* gp)
+runtime_setg(G* gp)
{
- m = mp;
g = gp;
}
@@ -197,6 +217,7 @@ runtime_newosproc(M *mp)
pthread_attr_t attr;
sigset_t clear, old;
pthread_t tid;
+ int tries;
int ret;
if(pthread_attr_init(&attr) != 0)
@@ -215,11 +236,21 @@ runtime_newosproc(M *mp)
sigemptyset(&old);
pthread_sigmask(SIG_BLOCK, &clear, &old);
- ret = pthread_create(&tid, &attr, runtime_mstart, mp);
+
+ for (tries = 0; tries < 20; tries++) {
+ ret = pthread_create(&tid, &attr, runtime_mstart, mp);
+ if (ret != EAGAIN) {
+ break;
+ }
+ runtime_usleep((tries + 1) * 1000); // Milliseconds.
+ }
+
pthread_sigmask(SIG_SETMASK, &old, nil);
- if (ret != 0)
+ if (ret != 0) {
+ runtime_printf("pthread_create failed: %d\n", ret);
runtime_throw("pthread_create");
+ }
}
// First function run by a new goroutine. This replaces gogocall.
@@ -227,13 +258,17 @@ static void
kickoff(void)
{
void (*fn)(void*);
+ void *param;
if(g->traceback != nil)
gtraceback(g);
fn = (void (*)(void*))(g->entry);
- fn(g->param);
- runtime_goexit();
+ param = g->param;
+ g->entry = nil;
+ g->param = nil;
+ fn(param);
+ runtime_goexit1();
}
// Switch context to a different goroutine. This is like longjmp.
@@ -242,12 +277,12 @@ void
runtime_gogo(G* newg)
{
#ifdef USING_SPLIT_STACK
- __splitstack_setcontext(&newg->stack_context[0]);
+ __splitstack_setcontext(&newg->stackcontext[0]);
#endif
g = newg;
newg->fromgogo = true;
- fixcontext(&newg->context);
- setcontext(&newg->context);
+ fixcontext(ucontext_arg(&newg->context[0]));
+ setcontext(ucontext_arg(&newg->context[0]));
runtime_throw("gogo setcontext returned");
}
@@ -261,42 +296,47 @@ runtime_mcall(void (*pfn)(G*))
{
M *mp;
G *gp;
+#ifndef USING_SPLIT_STACK
+ void *afterregs;
+#endif
// Ensure that all registers are on the stack for the garbage
// collector.
__builtin_unwind_init();
- mp = m;
gp = g;
+ mp = gp->m;
if(gp == mp->g0)
runtime_throw("runtime: mcall called on m->g0 stack");
if(gp != nil) {
#ifdef USING_SPLIT_STACK
- __splitstack_getcontext(&g->stack_context[0]);
+ __splitstack_getcontext(&g->stackcontext[0]);
#else
- gp->gcnext_sp = &pfn;
+ // We have to point to an address on the stack that is
+ // below the saved registers.
+ gp->gcnextsp = &afterregs;
#endif
gp->fromgogo = false;
- getcontext(&gp->context);
+ getcontext(ucontext_arg(&gp->context[0]));
// When we return from getcontext, we may be running
- // in a new thread. That means that m and g may have
- // changed. They are global variables so we will
- // reload them, but the addresses of m and g may be
- // cached in our local stack frame, and those
- // addresses may be wrong. Call functions to reload
- // the values for this thread.
- mp = runtime_m();
+ // in a new thread. That means that g may have
+ // changed. It is a global variables so we will
+ // reload it, but the address of g may be cached in
+ // our local stack frame, and that address may be
+ // wrong. Call the function to reload the value for
+ // this thread.
gp = runtime_g();
+ mp = gp->m;
if(gp->traceback != nil)
gtraceback(gp);
}
if (gp == nil || !gp->fromgogo) {
#ifdef USING_SPLIT_STACK
- __splitstack_setcontext(&mp->g0->stack_context[0]);
+ __splitstack_setcontext(&mp->g0->stackcontext[0]);
#endif
mp->g0->entry = (byte*)pfn;
mp->g0->param = gp;
@@ -306,8 +346,8 @@ runtime_mcall(void (*pfn)(G*))
// the getcontext call just above.
g = mp->g0;
- fixcontext(&mp->g0->context);
- setcontext(&mp->g0->context);
+ fixcontext(ucontext_arg(&mp->g0->context[0]));
+ setcontext(ucontext_arg(&mp->g0->context[0]));
runtime_throw("runtime: mcall function returned");
}
}
@@ -324,110 +364,92 @@ runtime_mcall(void (*pfn)(G*))
//
// Design doc at http://golang.org/s/go11sched.
-typedef struct Sched Sched;
-struct Sched {
- Lock;
-
- uint64 goidgen;
- M* midle; // idle m's waiting for work
- int32 nmidle; // number of idle m's waiting for work
- int32 nmidlelocked; // number of locked m's waiting for work
- int32 mcount; // number of m's that have been created
- int32 maxmcount; // maximum number of m's allowed (or die)
-
- P* pidle; // idle P's
- uint32 npidle;
- uint32 nmspinning;
-
- // Global runnable queue.
- G* runqhead;
- G* runqtail;
- int32 runqsize;
-
- // Global cache of dead G's.
- Lock gflock;
- G* gfree;
-
- uint32 gcwaiting; // gc is waiting to run
- int32 stopwait;
- Note stopnote;
- uint32 sysmonwait;
- Note sysmonnote;
- uint64 lastpoll;
-
- int32 profilehz; // cpu profiling rate
-};
-
enum
{
- // The max value of GOMAXPROCS.
- // There are no fundamental restrictions on the value.
- MaxGomaxprocs = 1<<8,
-
- // Number of goroutine ids to grab from runtime_sched.goidgen to local per-P cache at once.
+ // Number of goroutine ids to grab from runtime_sched->goidgen to local per-P cache at once.
// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
GoidCacheBatch = 16,
};
-Sched runtime_sched;
-int32 runtime_gomaxprocs;
-uint32 runtime_needextram = 1;
+extern Sched* runtime_getsched() __asm__ (GOSYM_PREFIX "runtime.getsched");
+extern bool* runtime_getCgoHasExtraM()
+ __asm__ (GOSYM_PREFIX "runtime.getCgoHasExtraM");
+extern P** runtime_getAllP()
+ __asm__ (GOSYM_PREFIX "runtime.getAllP");
+extern G* allocg(void)
+ __asm__ (GOSYM_PREFIX "runtime.allocg");
+extern bool needaddgcproc(void)
+ __asm__ (GOSYM_PREFIX "runtime.needaddgcproc");
+extern void startm(P*, bool)
+ __asm__(GOSYM_PREFIX "runtime.startm");
+extern void newm(void(*)(void), P*)
+ __asm__(GOSYM_PREFIX "runtime.newm");
+
+Sched* runtime_sched;
M runtime_m0;
G runtime_g0; // idle goroutine for m0
G* runtime_lastg;
-M* runtime_allm;
P** runtime_allp;
-M* runtime_extram;
int8* runtime_goos;
int32 runtime_ncpu;
bool runtime_precisestack;
-static int32 newprocs;
-
-static Lock allglock; // the following vars are protected by this lock or by stoptheworld
-G** runtime_allg;
-uintptr runtime_allglen;
-static uintptr allgcap;
bool runtime_isarchive;
void* runtime_mstart(void*);
-static void runqput(P*, G*);
-static G* runqget(P*);
-static bool runqputslow(P*, G*, uint32, uint32);
-static G* runqsteal(P*, P*);
-static void mput(M*);
-static M* mget(void);
-static void mcommoninit(M*);
-static void schedule(void);
-static void procresize(int32);
-static void acquirep(P*);
-static P* releasep(void);
-static void newm(void(*)(void), P*);
-static void stopm(void);
-static void startm(P*, bool);
-static void handoffp(P*);
-static void wakep(void);
-static void stoplockedm(void);
-static void startlockedm(G*);
-static void sysmon(void);
-static uint32 retake(int64);
-static void incidlelocked(int32);
-static void checkdead(void);
static void exitsyscall0(G*);
static void park0(G*);
static void goexit0(G*);
-static void gfput(P*, G*);
-static G* gfget(P*);
-static void gfpurge(P*);
-static void globrunqput(G*);
-static void globrunqputbatch(G*, G*, int32);
-static G* globrunqget(P*, int32);
-static P* pidleget(void);
-static void pidleput(P*);
-static void injectglist(G*);
-static bool preemptall(void);
static bool exitsyscallfast(void);
-static void allgadd(G*);
+
+extern void setncpu(int32)
+ __asm__(GOSYM_PREFIX "runtime.setncpu");
+extern void setpagesize(uintptr_t)
+ __asm__(GOSYM_PREFIX "runtime.setpagesize");
+extern void allgadd(G*)
+ __asm__(GOSYM_PREFIX "runtime.allgadd");
+extern void mcommoninit(M*)
+ __asm__(GOSYM_PREFIX "runtime.mcommoninit");
+extern void stopm(void)
+ __asm__(GOSYM_PREFIX "runtime.stopm");
+extern void handoffp(P*)
+ __asm__(GOSYM_PREFIX "runtime.handoffp");
+extern void wakep(void)
+ __asm__(GOSYM_PREFIX "runtime.wakep");
+extern void stoplockedm(void)
+ __asm__(GOSYM_PREFIX "runtime.stoplockedm");
+extern void schedule(void)
+ __asm__(GOSYM_PREFIX "runtime.schedule");
+extern void execute(G*, bool)
+ __asm__(GOSYM_PREFIX "runtime.execute");
+extern void gfput(P*, G*)
+ __asm__(GOSYM_PREFIX "runtime.gfput");
+extern G* gfget(P*)
+ __asm__(GOSYM_PREFIX "runtime.gfget");
+extern void procresize(int32)
+ __asm__(GOSYM_PREFIX "runtime.procresize");
+extern void acquirep(P*)
+ __asm__(GOSYM_PREFIX "runtime.acquirep");
+extern P* releasep(void)
+ __asm__(GOSYM_PREFIX "runtime.releasep");
+extern void incidlelocked(int32)
+ __asm__(GOSYM_PREFIX "runtime.incidlelocked");
+extern void checkdead(void)
+ __asm__(GOSYM_PREFIX "runtime.checkdead");
+extern void sysmon(void)
+ __asm__(GOSYM_PREFIX "runtime.sysmon");
+extern void mput(M*)
+ __asm__(GOSYM_PREFIX "runtime.mput");
+extern M* mget(void)
+ __asm__(GOSYM_PREFIX "runtime.mget");
+extern void globrunqput(G*)
+ __asm__(GOSYM_PREFIX "runtime.globrunqput");
+extern P* pidleget(void)
+ __asm__(GOSYM_PREFIX "runtime.pidleget");
+extern bool runqempty(P*)
+ __asm__(GOSYM_PREFIX "runtime.runqempty");
+extern void runqput(P*, G*, bool)
+ __asm__(GOSYM_PREFIX "runtime.runqput");
bool runtime_isstarted;
@@ -442,11 +464,16 @@ bool runtime_isstarted;
void
runtime_schedinit(void)
{
+ M *m;
int32 n, procs;
String s;
const byte *p;
Eface i;
+ setncpu(runtime_ncpu);
+ setpagesize(getpagesize());
+ runtime_sched = runtime_getsched();
+
m = &runtime_m0;
g = &runtime_g0;
m->g0 = g;
@@ -455,13 +482,14 @@ runtime_schedinit(void)
initcontext();
- runtime_sched.maxmcount = 10000;
+ runtime_sched->maxmcount = 10000;
runtime_precisestack = 0;
// runtime_symtabinit();
runtime_mallocinit();
mcommoninit(m);
-
+ runtime_alginit(); // maps must not be used before this call
+
// Initialize the itable value for newErrorCString,
// so that the next time it gets called, possibly
// in a fault during a garbage collection, it will not
@@ -476,20 +504,20 @@ runtime_schedinit(void)
runtime_goenvs();
runtime_parsedebugvars();
- runtime_sched.lastpoll = runtime_nanotime();
+ runtime_sched->lastpoll = runtime_nanotime();
procs = 1;
s = runtime_getenv("GOMAXPROCS");
p = s.str;
if(p != nil && (n = runtime_atoi(p, s.len)) > 0) {
- if(n > MaxGomaxprocs)
- n = MaxGomaxprocs;
+ if(n > _MaxGomaxprocs)
+ n = _MaxGomaxprocs;
procs = n;
}
- runtime_allp = runtime_malloc((MaxGomaxprocs+1)*sizeof(runtime_allp[0]));
+ runtime_allp = runtime_getAllP();
procresize(procs);
// Can not enable GC until all roots are registered.
- // mstats.enablegc = 1;
+ // mstats()->enablegc = 1;
}
extern void main_init(void) __asm__ (GOSYM_PREFIX "__go_init_main");
@@ -503,54 +531,6 @@ struct field_align
Hchan *p;
};
-// main_init_done is a signal used by cgocallbackg that initialization
-// has been completed. It is made before _cgo_notify_runtime_init_done,
-// so all cgo calls can rely on it existing. When main_init is
-// complete, it is closed, meaning cgocallbackg can reliably receive
-// from it.
-Hchan *runtime_main_init_done;
-
-// The chan bool type, for runtime_main_init_done.
-
-extern const struct __go_type_descriptor bool_type_descriptor
- __asm__ (GOSYM_PREFIX "__go_tdn_bool");
-
-static struct __go_channel_type chan_bool_type_descriptor =
- {
- /* __common */
- {
- /* __code */
- GO_CHAN,
- /* __align */
- __alignof (Hchan *),
- /* __field_align */
- offsetof (struct field_align, p) - 1,
- /* __size */
- sizeof (Hchan *),
- /* __hash */
- 0, /* This value doesn't matter. */
- /* __hashfn */
- &__go_type_hash_error_descriptor,
- /* __equalfn */
- &__go_type_equal_error_descriptor,
- /* __gc */
- NULL, /* This value doesn't matter */
- /* __reflection */
- NULL, /* This value doesn't matter */
- /* __uncommon */
- NULL,
- /* __pointer_to_this */
- NULL
- },
- /* __element_type */
- &bool_type_descriptor,
- /* __dir */
- CHANNEL_BOTH_DIR
- };
-
-extern Hchan *__go_new_channel (ChanType *, uintptr);
-extern void closechan(Hchan *) __asm__ (GOSYM_PREFIX "runtime.closechan");
-
static void
initDone(void *arg __attribute__ ((unused))) {
runtime_unlockOSThread();
@@ -583,37 +563,37 @@ runtime_main(void* dummy __attribute__((unused)))
runtime_lockOSThread();
// Defer unlock so that runtime.Goexit during init does the unlock too.
- d.__pfn = initDone;
- d.__next = g->defer;
- d.__arg = (void*)-1;
- d.__panic = g->panic;
- d.__retaddr = nil;
- d.__makefunc_can_recover = 0;
- d.__frame = &frame;
- d.__special = true;
- g->defer = &d;
-
- if(m != &runtime_m0)
+ d.pfn = (uintptr)(void*)initDone;
+ d.link = g->_defer;
+ d.arg = (void*)-1;
+ d._panic = g->_panic;
+ d.retaddr = 0;
+ d.makefunccanrecover = 0;
+ d.frame = &frame;
+ d.special = true;
+ g->_defer = &d;
+
+ if(g->m != &runtime_m0)
runtime_throw("runtime_main not on m0");
__go_go(runtime_MHeap_Scavenger, nil);
- runtime_main_init_done = __go_new_channel(&chan_bool_type_descriptor, 0);
+ makeMainInitDone();
_cgo_notify_runtime_init_done();
main_init();
- closechan(runtime_main_init_done);
+ closeMainInitDone();
- if(g->defer != &d || d.__pfn != initDone)
+ if(g->_defer != &d || (void*)d.pfn != initDone)
runtime_throw("runtime: bad defer entry after init");
- g->defer = d.__next;
+ g->_defer = d.link;
runtime_unlockOSThread();
// For gccgo we have to wait until after main is initialized
// to enable GC, because initializing main registers the GC
// roots.
- mstats.enablegc = 1;
+ mstats()->enablegc = 1;
if(runtime_isarchive) {
// This is not a complete program, but is instead a
@@ -629,7 +609,7 @@ runtime_main(void* dummy __attribute__((unused)))
// another goroutine at the same time as main returns,
// let the other goroutine finish printing the panic trace.
// Once it does, it will exit. See issue 3934.
- if(runtime_panicking)
+ if(runtime_panicking())
runtime_park(nil, nil, "panicwait");
runtime_exit(0);
@@ -637,150 +617,24 @@ runtime_main(void* dummy __attribute__((unused)))
*(int32*)0 = 0;
}
-void
-runtime_goroutineheader(G *gp)
-{
- const char *status;
- int64 waitfor;
-
- switch(gp->status) {
- case Gidle:
- status = "idle";
- break;
- case Grunnable:
- status = "runnable";
- break;
- case Grunning:
- status = "running";
- break;
- case Gsyscall:
- status = "syscall";
- break;
- case Gwaiting:
- if(gp->waitreason)
- status = gp->waitreason;
- else
- status = "waiting";
- break;
- default:
- status = "???";
- break;
- }
-
- // approx time the G is blocked, in minutes
- waitfor = 0;
- if((gp->status == Gwaiting || gp->status == Gsyscall) && gp->waitsince != 0)
- waitfor = (runtime_nanotime() - gp->waitsince) / (60LL*1000*1000*1000);
-
- if(waitfor < 1)
- runtime_printf("goroutine %D [%s]:\n", gp->goid, status);
- else
- runtime_printf("goroutine %D [%s, %D minutes]:\n", gp->goid, status, waitfor);
-}
-
-void
-runtime_printcreatedby(G *g)
-{
- if(g != nil && g->gopc != 0 && g->goid != 1) {
- String fn;
- String file;
- intgo line;
-
- if(__go_file_line(g->gopc - 1, &fn, &file, &line)) {
- runtime_printf("created by %S\n", fn);
- runtime_printf("\t%S:%D\n", file, (int64) line);
- }
- }
-}
-
-struct Traceback
-{
- G* gp;
- Location locbuf[TracebackMaxFrames];
- int32 c;
-};
+void getTraceback(G*, G*) __asm__(GOSYM_PREFIX "runtime.getTraceback");
-void
-runtime_tracebackothers(G * volatile me)
+// getTraceback stores a traceback of gp in the g's traceback field
+// and then returns to me. We expect that gp's traceback is not nil.
+// It works by saving me's current context, and checking gp's traceback field.
+// If gp's traceback field is not nil, it starts running gp.
+// In places where we call getcontext, we check the traceback field.
+// If it is not nil, we collect a traceback, and then return to the
+// goroutine stored in the traceback field, which is me.
+void getTraceback(G* me, G* gp)
{
- G * volatile gp;
- Traceback tb;
- int32 traceback;
- volatile uintptr i;
-
- tb.gp = me;
- traceback = runtime_gotraceback(nil);
-
- // Show the current goroutine first, if we haven't already.
- if((gp = m->curg) != nil && gp != me) {
- runtime_printf("\n");
- runtime_goroutineheader(gp);
- gp->traceback = &tb;
-
-#ifdef USING_SPLIT_STACK
- __splitstack_getcontext(&me->stack_context[0]);
-#endif
- getcontext(&me->context);
-
- if(gp->traceback != nil) {
- runtime_gogo(gp);
- }
-
- runtime_printtrace(tb.locbuf, tb.c, false);
- runtime_printcreatedby(gp);
- }
-
- runtime_lock(&allglock);
- for(i = 0; i < runtime_allglen; i++) {
- gp = runtime_allg[i];
- if(gp == me || gp == m->curg || gp->status == Gdead)
- continue;
- if(gp->issystem && traceback < 2)
- continue;
- runtime_printf("\n");
- runtime_goroutineheader(gp);
-
- // Our only mechanism for doing a stack trace is
- // _Unwind_Backtrace. And that only works for the
- // current thread, not for other random goroutines.
- // So we need to switch context to the goroutine, get
- // the backtrace, and then switch back.
-
- // This means that if g is running or in a syscall, we
- // can't reliably print a stack trace. FIXME.
-
- if(gp->status == Grunning) {
- runtime_printf("\tgoroutine running on other thread; stack unavailable\n");
- runtime_printcreatedby(gp);
- } else if(gp->status == Gsyscall) {
- runtime_printf("\tgoroutine in C code; stack unavailable\n");
- runtime_printcreatedby(gp);
- } else {
- gp->traceback = &tb;
-
#ifdef USING_SPLIT_STACK
- __splitstack_getcontext(&me->stack_context[0]);
+ __splitstack_getcontext(&me->stackcontext[0]);
#endif
- getcontext(&me->context);
-
- if(gp->traceback != nil) {
- runtime_gogo(gp);
- }
+ getcontext(ucontext_arg(&me->context[0]));
- runtime_printtrace(tb.locbuf, tb.c, false);
- runtime_printcreatedby(gp);
- }
- }
- runtime_unlock(&allglock);
-}
-
-static void
-checkmcount(void)
-{
- // sched lock is held
- if(runtime_sched.mcount > runtime_sched.maxmcount) {
- runtime_printf("runtime: program exceeds %d-thread limit\n", runtime_sched.maxmcount);
- runtime_throw("thread exhaustion");
+ if (gp->traceback != nil) {
+ runtime_gogo(gp);
}
}
@@ -794,286 +648,56 @@ gtraceback(G* gp)
traceback = gp->traceback;
gp->traceback = nil;
+ if(gp->m != nil)
+ runtime_throw("gtraceback: m is not nil");
+ gp->m = traceback->gp->m;
traceback->c = runtime_callers(1, traceback->locbuf,
sizeof traceback->locbuf / sizeof traceback->locbuf[0], false);
+ gp->m = nil;
runtime_gogo(traceback->gp);
}
-static void
-mcommoninit(M *mp)
-{
- // If there is no mcache runtime_callers() will crash,
- // and we are most likely in sysmon thread so the stack is senseless anyway.
- if(m->mcache)
- runtime_callers(1, mp->createstack, nelem(mp->createstack), false);
-
- mp->fastrand = 0x49f6428aUL + mp->id + runtime_cputicks();
-
- runtime_lock(&runtime_sched);
- mp->id = runtime_sched.mcount++;
- checkmcount();
- runtime_mpreinit(mp);
-
- // Add to runtime_allm so garbage collector doesn't free m
- // when it is just in a register or thread-local storage.
- mp->alllink = runtime_allm;
- // runtime_NumCgoCall() iterates over allm w/o schedlock,
- // so we need to publish it safely.
- runtime_atomicstorep(&runtime_allm, mp);
- runtime_unlock(&runtime_sched);
-}
-
-// Mark gp ready to run.
-void
-runtime_ready(G *gp)
-{
- // Mark runnable.
- m->locks++; // disable preemption because it can be holding p in a local var
- if(gp->status != Gwaiting) {
- runtime_printf("goroutine %D has status %d\n", gp->goid, gp->status);
- runtime_throw("bad g->status in ready");
- }
- gp->status = Grunnable;
- runqput(m->p, gp);
- if(runtime_atomicload(&runtime_sched.npidle) != 0 && runtime_atomicload(&runtime_sched.nmspinning) == 0) // TODO: fast atomic
- wakep();
- m->locks--;
-}
-
-int32
-runtime_gcprocs(void)
-{
- int32 n;
-
- // Figure out how many CPUs to use during GC.
- // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
- runtime_lock(&runtime_sched);
- n = runtime_gomaxprocs;
- if(n > runtime_ncpu)
- n = runtime_ncpu > 0 ? runtime_ncpu : 1;
- if(n > MaxGcproc)
- n = MaxGcproc;
- if(n > runtime_sched.nmidle+1) // one M is currently running
- n = runtime_sched.nmidle+1;
- runtime_unlock(&runtime_sched);
- return n;
-}
-
-static bool
-needaddgcproc(void)
-{
- int32 n;
-
- runtime_lock(&runtime_sched);
- n = runtime_gomaxprocs;
- if(n > runtime_ncpu)
- n = runtime_ncpu;
- if(n > MaxGcproc)
- n = MaxGcproc;
- n -= runtime_sched.nmidle+1; // one M is currently running
- runtime_unlock(&runtime_sched);
- return n > 0;
-}
-
-void
-runtime_helpgc(int32 nproc)
-{
- M *mp;
- int32 n, pos;
-
- runtime_lock(&runtime_sched);
- pos = 0;
- for(n = 1; n < nproc; n++) { // one M is currently running
- if(runtime_allp[pos]->mcache == m->mcache)
- pos++;
- mp = mget();
- if(mp == nil)
- runtime_throw("runtime_gcprocs inconsistency");
- mp->helpgc = n;
- mp->mcache = runtime_allp[pos]->mcache;
- pos++;
- runtime_notewakeup(&mp->park);
- }
- runtime_unlock(&runtime_sched);
-}
-
-// Similar to stoptheworld but best-effort and can be called several times.
-// There is no reverse operation, used during crashing.
-// This function must not lock any mutexes.
-void
-runtime_freezetheworld(void)
-{
- int32 i;
-
- if(runtime_gomaxprocs == 1)
- return;
- // stopwait and preemption requests can be lost
- // due to races with concurrently executing threads,
- // so try several times
- for(i = 0; i < 5; i++) {
- // this should tell the scheduler to not start any new goroutines
- runtime_sched.stopwait = 0x7fffffff;
- runtime_atomicstore((uint32*)&runtime_sched.gcwaiting, 1);
- // this should stop running goroutines
- if(!preemptall())
- break; // no running goroutines
- runtime_usleep(1000);
- }
- // to be sure
- runtime_usleep(1000);
- preemptall();
- runtime_usleep(1000);
-}
-
-void
-runtime_stoptheworld(void)
-{
- int32 i;
- uint32 s;
- P *p;
- bool wait;
-
- runtime_lock(&runtime_sched);
- runtime_sched.stopwait = runtime_gomaxprocs;
- runtime_atomicstore((uint32*)&runtime_sched.gcwaiting, 1);
- preemptall();
- // stop current P
- m->p->status = Pgcstop;
- runtime_sched.stopwait--;
- // try to retake all P's in Psyscall status
- for(i = 0; i < runtime_gomaxprocs; i++) {
- p = runtime_allp[i];
- s = p->status;
- if(s == Psyscall && runtime_cas(&p->status, s, Pgcstop))
- runtime_sched.stopwait--;
- }
- // stop idle P's
- while((p = pidleget()) != nil) {
- p->status = Pgcstop;
- runtime_sched.stopwait--;
- }
- wait = runtime_sched.stopwait > 0;
- runtime_unlock(&runtime_sched);
-
- // wait for remaining P's to stop voluntarily
- if(wait) {
- runtime_notesleep(&runtime_sched.stopnote);
- runtime_noteclear(&runtime_sched.stopnote);
- }
- if(runtime_sched.stopwait)
- runtime_throw("stoptheworld: not stopped");
- for(i = 0; i < runtime_gomaxprocs; i++) {
- p = runtime_allp[i];
- if(p->status != Pgcstop)
- runtime_throw("stoptheworld: not stopped");
- }
-}
-
-static void
-mhelpgc(void)
-{
- m->helpgc = -1;
-}
-
-void
-runtime_starttheworld(void)
-{
- P *p, *p1;
- M *mp;
- G *gp;
- bool add;
-
- m->locks++; // disable preemption because it can be holding p in a local var
- gp = runtime_netpoll(false); // non-blocking
- injectglist(gp);
- add = needaddgcproc();
- runtime_lock(&runtime_sched);
- if(newprocs) {
- procresize(newprocs);
- newprocs = 0;
- } else
- procresize(runtime_gomaxprocs);
- runtime_sched.gcwaiting = 0;
-
- p1 = nil;
- while((p = pidleget()) != nil) {
- // procresize() puts p's with work at the beginning of the list.
- // Once we reach a p without a run queue, the rest don't have one either.
- if(p->runqhead == p->runqtail) {
- pidleput(p);
- break;
- }
- p->m = mget();
- p->link = p1;
- p1 = p;
- }
- if(runtime_sched.sysmonwait) {
- runtime_sched.sysmonwait = false;
- runtime_notewakeup(&runtime_sched.sysmonnote);
- }
- runtime_unlock(&runtime_sched);
-
- while(p1) {
- p = p1;
- p1 = p1->link;
- if(p->m) {
- mp = p->m;
- p->m = nil;
- if(mp->nextp)
- runtime_throw("starttheworld: inconsistent mp->nextp");
- mp->nextp = p;
- runtime_notewakeup(&mp->park);
- } else {
- // Start M to run P. Do not start another M below.
- newm(nil, p);
- add = false;
- }
- }
-
- if(add) {
- // If GC could have used another helper proc, start one now,
- // in the hope that it will be available next time.
- // It would have been even better to start it before the collection,
- // but doing so requires allocating memory, so it's tricky to
- // coordinate. This lazy approach works out in practice:
- // we don't mind if the first couple gc rounds don't have quite
- // the maximum number of procs.
- newm(mhelpgc, nil);
- }
- m->locks--;
-}
-
// Called to start an M.
void*
runtime_mstart(void* mp)
{
+ M *m;
+ G *gp;
+
m = (M*)mp;
g = m->g0;
+ g->m = m;
+ gp = g;
initcontext();
- g->entry = nil;
- g->param = nil;
+ gp->entry = nil;
+ gp->param = nil;
// Record top of stack for use by mcall.
// Once we call schedule we're never coming back,
// so other calls can reuse this stack space.
#ifdef USING_SPLIT_STACK
- __splitstack_getcontext(&g->stack_context[0]);
+ __splitstack_getcontext(&g->stackcontext[0]);
#else
- g->gcinitial_sp = &mp;
- // Setting gcstack_size to 0 is a marker meaning that gcinitial_sp
+ gp->gcinitialsp = &mp;
+ // Setting gcstacksize to 0 is a marker meaning that gcinitialsp
// is the top of the stack, not the bottom.
- g->gcstack_size = 0;
- g->gcnext_sp = &mp;
+ gp->gcstacksize = 0;
+ gp->gcnextsp = &mp;
#endif
- getcontext(&g->context);
+ getcontext(ucontext_arg(&gp->context[0]));
+
+ if(gp->traceback != nil)
+ gtraceback(gp);
- if(g->entry != nil) {
+ if(gp->entry != nil) {
// Got here from mcall.
- void (*pfn)(G*) = (void (*)(G*))g->entry;
- G* gp = (G*)g->param;
- pfn(gp);
+ void (*pfn)(G*) = (void (*)(G*))gp->entry;
+ G* gp1 = (G*)gp->param;
+ gp->entry = nil;
+ gp->param = nil;
+ pfn(gp1);
*(int*)0x21 = 0x21;
}
runtime_minit();
@@ -1088,23 +712,25 @@ runtime_mstart(void* mp)
// Install signal handlers; after minit so that minit can
// prepare the thread to be able to handle the signals.
if(m == &runtime_m0) {
- if(runtime_iscgo && !runtime_cgoHasExtraM) {
- runtime_cgoHasExtraM = true;
- runtime_newextram();
- runtime_needextram = 0;
+ if(runtime_iscgo) {
+ bool* cgoHasExtraM = runtime_getCgoHasExtraM();
+ if(!*cgoHasExtraM) {
+ *cgoHasExtraM = true;
+ runtime_newextram();
+ }
}
runtime_initsig(false);
}
if(m->mstartfn)
- m->mstartfn();
+ ((void (*)(void))m->mstartfn)();
if(m->helpgc) {
m->helpgc = 0;
stopm();
} else if(m != &runtime_m0) {
- acquirep(m->nextp);
- m->nextp = nil;
+ acquirep((P*)m->nextp);
+ m->nextp = 0;
}
schedule();
@@ -1124,15 +750,18 @@ struct CgoThreadStart
void (*fn)(void);
};
+M* runtime_allocm(P*, bool, byte**, uintptr*)
+ __asm__(GOSYM_PREFIX "runtime.allocm");
+
// Allocate a new m unassociated with any thread.
// Can use p for allocation context if needed.
M*
-runtime_allocm(P *p, int32 stacksize, byte** ret_g0_stack, size_t* ret_g0_stacksize)
+runtime_allocm(P *p, bool allocatestack, byte** ret_g0_stack, uintptr* ret_g0_stacksize)
{
M *mp;
- m->locks++; // disable GC because it can be called from sysmon
- if(m->p == nil)
+ g->m->locks++; // disable GC because it can be called from sysmon
+ if(g->m->p == 0)
acquirep(p); // temporarily borrow p for mallocs in this function
#if 0
if(mtype == nil) {
@@ -1144,355 +773,106 @@ runtime_allocm(P *p, int32 stacksize, byte** ret_g0_stack, size_t* ret_g0_stacks
mp = runtime_mal(sizeof *mp);
mcommoninit(mp);
- mp->g0 = runtime_malg(stacksize, ret_g0_stack, ret_g0_stacksize);
+ mp->g0 = runtime_malg(allocatestack, false, ret_g0_stack, ret_g0_stacksize);
+ mp->g0->m = mp;
- if(p == m->p)
+ if(p == (P*)g->m->p)
releasep();
- m->locks--;
+ g->m->locks--;
return mp;
}
-static G*
-allocg(void)
-{
- G *gp;
- // static Type *gtype;
-
- // if(gtype == nil) {
- // Eface e;
- // runtime_gc_g_ptr(&e);
- // gtype = ((PtrType*)e.__type_descriptor)->__element_type;
- // }
- // gp = runtime_cnew(gtype);
- gp = runtime_malloc(sizeof(G));
- return gp;
-}
+void setGContext(void) __asm__ (GOSYM_PREFIX "runtime.setGContext");
-static M* lockextra(bool nilokay);
-static void unlockextra(M*);
-
-// needm is called when a cgo callback happens on a
-// thread without an m (a thread not created by Go).
-// In this case, needm is expected to find an m to use
-// and return with m, g initialized correctly.
-// Since m and g are not set now (likely nil, but see below)
-// needm is limited in what routines it can call. In particular
-// it can only call nosplit functions (textflag 7) and cannot
-// do any scheduling that requires an m.
-//
-// In order to avoid needing heavy lifting here, we adopt
-// the following strategy: there is a stack of available m's
-// that can be stolen. Using compare-and-swap
-// to pop from the stack has ABA races, so we simulate
-// a lock by doing an exchange (via casp) to steal the stack
-// head and replace the top pointer with MLOCKED (1).
-// This serves as a simple spin lock that we can use even
-// without an m. The thread that locks the stack in this way
-// unlocks the stack by storing a valid stack head pointer.
-//
-// In order to make sure that there is always an m structure
-// available to be stolen, we maintain the invariant that there
-// is always one more than needed. At the beginning of the
-// program (if cgo is in use) the list is seeded with a single m.
-// If needm finds that it has taken the last m off the list, its job
-// is - once it has installed its own m so that it can do things like
-// allocate memory - to create a spare m and put it on the list.
-//
-// Each of these extra m's also has a g0 and a curg that are
-// pressed into service as the scheduling stack and current
-// goroutine for the duration of the cgo callback.
-//
-// When the callback is done with the m, it calls dropm to
-// put the m back on the list.
-//
-// Unlike the gc toolchain, we start running on curg, since we are
-// just going to return and let the caller continue.
+// setGContext sets up a new goroutine context for the current g.
void
-runtime_needm(void)
+setGContext()
{
- M *mp;
-
- if(runtime_needextram) {
- // Can happen if C/C++ code calls Go from a global ctor.
- // Can not throw, because scheduler is not initialized yet.
- int rv __attribute__((unused));
- rv = runtime_write(2, "fatal error: cgo callback before cgo call\n",
- sizeof("fatal error: cgo callback before cgo call\n")-1);
- runtime_exit(1);
- }
+ int val;
+ G *gp;
- // Lock extra list, take head, unlock popped list.
- // nilokay=false is safe here because of the invariant above,
- // that the extra list always contains or will soon contain
- // at least one m.
- mp = lockextra(false);
-
- // Set needextram when we've just emptied the list,
- // so that the eventual call into cgocallbackg will
- // allocate a new m for the extra list. We delay the
- // allocation until then so that it can be done
- // after exitsyscall makes sure it is okay to be
- // running at all (that is, there's no garbage collection
- // running right now).
- mp->needextram = mp->schedlink == nil;
- unlockextra(mp->schedlink);
-
- // Install m and g (= m->curg).
- runtime_setmg(mp, mp->curg);
-
- // Initialize g's context as in mstart.
initcontext();
- g->status = Gsyscall;
- g->entry = nil;
- g->param = nil;
+ gp = g;
+ gp->entry = nil;
+ gp->param = nil;
#ifdef USING_SPLIT_STACK
- __splitstack_getcontext(&g->stack_context[0]);
+ __splitstack_getcontext(&gp->stackcontext[0]);
+ val = 0;
+ __splitstack_block_signals(&val, nil);
#else
- g->gcinitial_sp = &mp;
- g->gcstack = nil;
- g->gcstack_size = 0;
- g->gcnext_sp = &mp;
+ gp->gcinitialsp = &val;
+ gp->gcstack = nil;
+ gp->gcstacksize = 0;
+ gp->gcnextsp = &val;
#endif
- getcontext(&g->context);
+ getcontext(ucontext_arg(&gp->context[0]));
- if(g->entry != nil) {
+ if(gp->entry != nil) {
// Got here from mcall.
- void (*pfn)(G*) = (void (*)(G*))g->entry;
- G* gp = (G*)g->param;
- pfn(gp);
+ void (*pfn)(G*) = (void (*)(G*))gp->entry;
+ G* gp1 = (G*)gp->param;
+ gp->entry = nil;
+ gp->param = nil;
+ pfn(gp1);
*(int*)0x22 = 0x22;
}
-
- // Initialize this thread to use the m.
- runtime_minit();
-
-#ifdef USING_SPLIT_STACK
- {
- int dont_block_signals = 0;
- __splitstack_block_signals(&dont_block_signals, nil);
- }
-#endif
}
-// newextram allocates an m and puts it on the extra list.
-// It is called with a working local m, so that it can do things
-// like call schedlock and allocate.
-void
-runtime_newextram(void)
-{
- M *mp, *mnext;
- G *gp;
- byte *g0_sp, *sp;
- size_t g0_spsize, spsize;
-
- // Create extra goroutine locked to extra m.
- // The goroutine is the context in which the cgo callback will run.
- // The sched.pc will never be returned to, but setting it to
- // runtime.goexit makes clear to the traceback routines where
- // the goroutine stack ends.
- mp = runtime_allocm(nil, StackMin, &g0_sp, &g0_spsize);
- gp = runtime_malg(StackMin, &sp, &spsize);
- gp->status = Gdead;
- mp->curg = gp;
- mp->locked = LockInternal;
- mp->lockedg = gp;
- gp->lockedm = mp;
- gp->goid = runtime_xadd64(&runtime_sched.goidgen, 1);
- // put on allg for garbage collector
- allgadd(gp);
-
- // The context for gp will be set up in runtime_needm. But
- // here we need to set up the context for g0.
- getcontext(&mp->g0->context);
- mp->g0->context.uc_stack.ss_sp = g0_sp;
- mp->g0->context.uc_stack.ss_size = g0_spsize;
- makecontext(&mp->g0->context, kickoff, 0);
-
- // Add m to the extra list.
- mnext = lockextra(true);
- mp->schedlink = mnext;
- unlockextra(mp);
-}
+void makeGContext(G*, byte*, uintptr)
+ __asm__(GOSYM_PREFIX "runtime.makeGContext");
-// dropm is called when a cgo callback has called needm but is now
-// done with the callback and returning back into the non-Go thread.
-// It puts the current m back onto the extra list.
-//
-// The main expense here is the call to signalstack to release the
-// m's signal stack, and then the call to needm on the next callback
-// from this thread. It is tempting to try to save the m for next time,
-// which would eliminate both these costs, but there might not be
-// a next time: the current thread (which Go does not control) might exit.
-// If we saved the m for that thread, there would be an m leak each time
-// such a thread exited. Instead, we acquire and release an m on each
-// call. These should typically not be scheduling operations, just a few
-// atomics, so the cost should be small.
-//
-// TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
-// variable using pthread_key_create. Unlike the pthread keys we already use
-// on OS X, this dummy key would never be read by Go code. It would exist
-// only so that we could register at thread-exit-time destructor.
-// That destructor would put the m back onto the extra list.
-// This is purely a performance optimization. The current version,
-// in which dropm happens on each cgo call, is still correct too.
-// We may have to keep the current version on systems with cgo
-// but without pthreads, like Windows.
+// makeGContext makes a new context for a g.
void
-runtime_dropm(void)
-{
- M *mp, *mnext;
-
- // Undo whatever initialization minit did during needm.
- runtime_unminit();
+makeGContext(G* gp, byte* sp, uintptr spsize) {
+ ucontext_t *uc;
- // Clear m and g, and return m to the extra list.
- // After the call to setmg we can only call nosplit functions.
- mp = m;
- runtime_setmg(nil, nil);
-
- mp->curg->status = Gdead;
- mp->curg->gcstack = nil;
- mp->curg->gcnext_sp = nil;
-
- mnext = lockextra(true);
- mp->schedlink = mnext;
- unlockextra(mp);
-}
-
-#define MLOCKED ((M*)1)
-
-// lockextra locks the extra list and returns the list head.
-// The caller must unlock the list by storing a new list head
-// to runtime.extram. If nilokay is true, then lockextra will
-// return a nil list head if that's what it finds. If nilokay is false,
-// lockextra will keep waiting until the list head is no longer nil.
-static M*
-lockextra(bool nilokay)
-{
- M *mp;
- void (*yield)(void);
-
- for(;;) {
- mp = runtime_atomicloadp(&runtime_extram);
- if(mp == MLOCKED) {
- yield = runtime_osyield;
- yield();
- continue;
- }
- if(mp == nil && !nilokay) {
- runtime_usleep(1);
- continue;
- }
- if(!runtime_casp(&runtime_extram, mp, MLOCKED)) {
- yield = runtime_osyield;
- yield();
- continue;
- }
- break;
- }
- return mp;
-}
-
-static void
-unlockextra(M *mp)
-{
- runtime_atomicstorep(&runtime_extram, mp);
-}
-
-static int32
-countextra()
-{
- M *mp, *mc;
- int32 c;
-
- for(;;) {
- mp = runtime_atomicloadp(&runtime_extram);
- if(mp == MLOCKED) {
- runtime_osyield();
- continue;
- }
- if(!runtime_casp(&runtime_extram, mp, MLOCKED)) {
- runtime_osyield();
- continue;
- }
- c = 0;
- for(mc = mp; mc != nil; mc = mc->schedlink)
- c++;
- runtime_atomicstorep(&runtime_extram, mp);
- return c;
- }
+ uc = ucontext_arg(&gp->context[0]);
+ getcontext(uc);
+ uc->uc_stack.ss_sp = sp;
+ uc->uc_stack.ss_size = (size_t)spsize;
+ makecontext(uc, kickoff, 0);
}
// Create a new m. It will start off with a call to fn, or else the scheduler.
-static void
+void
newm(void(*fn)(void), P *p)
{
M *mp;
- mp = runtime_allocm(p, -1, nil, nil);
- mp->nextp = p;
- mp->mstartfn = fn;
+ mp = runtime_allocm(p, false, nil, nil);
+ mp->nextp = (uintptr)p;
+ mp->mstartfn = (uintptr)(void*)fn;
runtime_newosproc(mp);
}
-// Stops execution of the current m until new work is available.
-// Returns with acquired P.
-static void
-stopm(void)
-{
- if(m->locks)
- runtime_throw("stopm holding locks");
- if(m->p)
- runtime_throw("stopm holding p");
- if(m->spinning) {
- m->spinning = false;
- runtime_xadd(&runtime_sched.nmspinning, -1);
- }
-
-retry:
- runtime_lock(&runtime_sched);
- mput(m);
- runtime_unlock(&runtime_sched);
- runtime_notesleep(&m->park);
- runtime_noteclear(&m->park);
- if(m->helpgc) {
- runtime_gchelper();
- m->helpgc = 0;
- m->mcache = nil;
- goto retry;
- }
- acquirep(m->nextp);
- m->nextp = nil;
-}
-
static void
mspinning(void)
{
- m->spinning = true;
+ g->m->spinning = true;
}
// Schedules some M to run the p (creates an M if necessary).
// If p==nil, tries to get an idle P, if no idle P's does nothing.
-static void
+void
startm(P *p, bool spinning)
{
M *mp;
void (*fn)(void);
- runtime_lock(&runtime_sched);
+ runtime_lock(&runtime_sched->lock);
if(p == nil) {
p = pidleget();
if(p == nil) {
- runtime_unlock(&runtime_sched);
+ runtime_unlock(&runtime_sched->lock);
if(spinning)
- runtime_xadd(&runtime_sched.nmspinning, -1);
+ runtime_xadd(&runtime_sched->nmspinning, -1);
return;
}
}
mp = mget();
- runtime_unlock(&runtime_sched);
+ runtime_unlock(&runtime_sched->lock);
if(mp == nil) {
fn = nil;
if(spinning)
@@ -1504,369 +884,39 @@ startm(P *p, bool spinning)
runtime_throw("startm: m is spinning");
if(mp->nextp)
runtime_throw("startm: m has p");
- mp->spinning = spinning;
- mp->nextp = p;
- runtime_notewakeup(&mp->park);
-}
-
-// Hands off P from syscall or locked M.
-static void
-handoffp(P *p)
-{
- // if it has local work, start it straight away
- if(p->runqhead != p->runqtail || runtime_sched.runqsize) {
- startm(p, false);
- return;
- }
- // no local work, check that there are no spinning/idle M's,
- // otherwise our help is not required
- if(runtime_atomicload(&runtime_sched.nmspinning) + runtime_atomicload(&runtime_sched.npidle) == 0 && // TODO: fast atomic
- runtime_cas(&runtime_sched.nmspinning, 0, 1)) {
- startm(p, true);
- return;
- }
- runtime_lock(&runtime_sched);
- if(runtime_sched.gcwaiting) {
- p->status = Pgcstop;
- if(--runtime_sched.stopwait == 0)
- runtime_notewakeup(&runtime_sched.stopnote);
- runtime_unlock(&runtime_sched);
- return;
- }
- if(runtime_sched.runqsize) {
- runtime_unlock(&runtime_sched);
- startm(p, false);
- return;
- }
- // If this is the last running P and nobody is polling network,
- // need to wakeup another M to poll network.
- if(runtime_sched.npidle == (uint32)runtime_gomaxprocs-1 && runtime_atomicload64(&runtime_sched.lastpoll) != 0) {
- runtime_unlock(&runtime_sched);
- startm(p, false);
- return;
- }
- pidleput(p);
- runtime_unlock(&runtime_sched);
-}
-
-// Tries to add one more P to execute G's.
-// Called when a G is made runnable (newproc, ready).
-static void
-wakep(void)
-{
- // be conservative about spinning threads
- if(!runtime_cas(&runtime_sched.nmspinning, 0, 1))
- return;
- startm(nil, true);
-}
-
-// Stops execution of the current m that is locked to a g until the g is runnable again.
-// Returns with acquired P.
-static void
-stoplockedm(void)
-{
- P *p;
-
- if(m->lockedg == nil || m->lockedg->lockedm != m)
- runtime_throw("stoplockedm: inconsistent locking");
- if(m->p) {
- // Schedule another M to run this p.
- p = releasep();
- handoffp(p);
+ if(spinning && !runqempty(p)) {
+ runtime_throw("startm: p has runnable gs");
}
- incidlelocked(1);
- // Wait until another thread schedules lockedg again.
- runtime_notesleep(&m->park);
- runtime_noteclear(&m->park);
- if(m->lockedg->status != Grunnable)
- runtime_throw("stoplockedm: not runnable");
- acquirep(m->nextp);
- m->nextp = nil;
-}
-
-// Schedules the locked m to run the locked gp.
-static void
-startlockedm(G *gp)
-{
- M *mp;
- P *p;
-
- mp = gp->lockedm;
- if(mp == m)
- runtime_throw("startlockedm: locked to me");
- if(mp->nextp)
- runtime_throw("startlockedm: m has p");
- // directly handoff current P to the locked m
- incidlelocked(-1);
- p = releasep();
- mp->nextp = p;
+ mp->spinning = spinning;
+ mp->nextp = (uintptr)p;
runtime_notewakeup(&mp->park);
- stopm();
-}
-
-// Stops the current m for stoptheworld.
-// Returns when the world is restarted.
-static void
-gcstopm(void)
-{
- P *p;
-
- if(!runtime_sched.gcwaiting)
- runtime_throw("gcstopm: not waiting for gc");
- if(m->spinning) {
- m->spinning = false;
- runtime_xadd(&runtime_sched.nmspinning, -1);
- }
- p = releasep();
- runtime_lock(&runtime_sched);
- p->status = Pgcstop;
- if(--runtime_sched.stopwait == 0)
- runtime_notewakeup(&runtime_sched.stopnote);
- runtime_unlock(&runtime_sched);
- stopm();
-}
-
-// Schedules gp to run on the current M.
-// Never returns.
-static void
-execute(G *gp)
-{
- int32 hz;
-
- if(gp->status != Grunnable) {
- runtime_printf("execute: bad g status %d\n", gp->status);
- runtime_throw("execute: bad g status");
- }
- gp->status = Grunning;
- gp->waitsince = 0;
- m->p->schedtick++;
- m->curg = gp;
- gp->m = m;
-
- // Check whether the profiler needs to be turned on or off.
- hz = runtime_sched.profilehz;
- if(m->profilehz != hz)
- runtime_resetcpuprofiler(hz);
-
- runtime_gogo(gp);
}
-// Finds a runnable goroutine to execute.
-// Tries to steal from other P's, get g from global queue, poll network.
-static G*
-findrunnable(void)
-{
- G *gp;
- P *p;
- int32 i;
-
-top:
- if(runtime_sched.gcwaiting) {
- gcstopm();
- goto top;
- }
- if(runtime_fingwait && runtime_fingwake && (gp = runtime_wakefing()) != nil)
- runtime_ready(gp);
- // local runq
- gp = runqget(m->p);
- if(gp)
- return gp;
- // global runq
- if(runtime_sched.runqsize) {
- runtime_lock(&runtime_sched);
- gp = globrunqget(m->p, 0);
- runtime_unlock(&runtime_sched);
- if(gp)
- return gp;
- }
- // poll network
- gp = runtime_netpoll(false); // non-blocking
- if(gp) {
- injectglist(gp->schedlink);
- gp->status = Grunnable;
- return gp;
- }
- // If number of spinning M's >= number of busy P's, block.
- // This is necessary to prevent excessive CPU consumption
- // when GOMAXPROCS>>1 but the program parallelism is low.
- if(!m->spinning && 2 * runtime_atomicload(&runtime_sched.nmspinning) >= runtime_gomaxprocs - runtime_atomicload(&runtime_sched.npidle)) // TODO: fast atomic
- goto stop;
- if(!m->spinning) {
- m->spinning = true;
- runtime_xadd(&runtime_sched.nmspinning, 1);
- }
- // random steal from other P's
- for(i = 0; i < 2*runtime_gomaxprocs; i++) {
- if(runtime_sched.gcwaiting)
- goto top;
- p = runtime_allp[runtime_fastrand1()%runtime_gomaxprocs];
- if(p == m->p)
- gp = runqget(p);
- else
- gp = runqsteal(m->p, p);
- if(gp)
- return gp;
- }
-stop:
- // return P and block
- runtime_lock(&runtime_sched);
- if(runtime_sched.gcwaiting) {
- runtime_unlock(&runtime_sched);
- goto top;
- }
- if(runtime_sched.runqsize) {
- gp = globrunqget(m->p, 0);
- runtime_unlock(&runtime_sched);
- return gp;
- }
- p = releasep();
- pidleput(p);
- runtime_unlock(&runtime_sched);
- if(m->spinning) {
- m->spinning = false;
- runtime_xadd(&runtime_sched.nmspinning, -1);
- }
- // check all runqueues once again
- for(i = 0; i < runtime_gomaxprocs; i++) {
- p = runtime_allp[i];
- if(p && p->runqhead != p->runqtail) {
- runtime_lock(&runtime_sched);
- p = pidleget();
- runtime_unlock(&runtime_sched);
- if(p) {
- acquirep(p);
- goto top;
- }
- break;
- }
- }
- // poll network
- if(runtime_xchg64(&runtime_sched.lastpoll, 0) != 0) {
- if(m->p)
- runtime_throw("findrunnable: netpoll with p");
- if(m->spinning)
- runtime_throw("findrunnable: netpoll with spinning");
- gp = runtime_netpoll(true); // block until new work is available
- runtime_atomicstore64(&runtime_sched.lastpoll, runtime_nanotime());
- if(gp) {
- runtime_lock(&runtime_sched);
- p = pidleget();
- runtime_unlock(&runtime_sched);
- if(p) {
- acquirep(p);
- injectglist(gp->schedlink);
- gp->status = Grunnable;
- return gp;
- }
- injectglist(gp);
- }
- }
- stopm();
- goto top;
-}
-
-static void
-resetspinning(void)
-{
- int32 nmspinning;
-
- if(m->spinning) {
- m->spinning = false;
- nmspinning = runtime_xadd(&runtime_sched.nmspinning, -1);
- if(nmspinning < 0)
- runtime_throw("findrunnable: negative nmspinning");
- } else
- nmspinning = runtime_atomicload(&runtime_sched.nmspinning);
-
- // M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
- // so see if we need to wakeup another P here.
- if (nmspinning == 0 && runtime_atomicload(&runtime_sched.npidle) > 0)
- wakep();
-}
-
-// Injects the list of runnable G's into the scheduler.
-// Can run concurrently with GC.
-static void
-injectglist(G *glist)
+// Puts the current goroutine into a waiting state and calls unlockf.
+// If unlockf returns false, the goroutine is resumed.
+void
+runtime_park(bool(*unlockf)(G*, void*), void *lock, const char *reason)
{
- int32 n;
- G *gp;
-
- if(glist == nil)
- return;
- runtime_lock(&runtime_sched);
- for(n = 0; glist; n++) {
- gp = glist;
- glist = gp->schedlink;
- gp->status = Grunnable;
- globrunqput(gp);
- }
- runtime_unlock(&runtime_sched);
-
- for(; n && runtime_sched.npidle; n--)
- startm(nil, false);
+ if(g->atomicstatus != _Grunning)
+ runtime_throw("bad g status");
+ g->m->waitlock = lock;
+ g->m->waitunlockf = unlockf;
+ g->waitreason = runtime_gostringnocopy((const byte*)reason);
+ runtime_mcall(park0);
}
-// One round of scheduler: find a runnable goroutine and execute it.
-// Never returns.
-static void
-schedule(void)
-{
- G *gp;
- uint32 tick;
-
- if(m->locks)
- runtime_throw("schedule: holding locks");
-
-top:
- if(runtime_sched.gcwaiting) {
- gcstopm();
- goto top;
- }
+void gopark(FuncVal *, void *, String, byte, int)
+ __asm__ ("runtime.gopark");
- gp = nil;
- // Check the global runnable queue once in a while to ensure fairness.
- // Otherwise two goroutines can completely occupy the local runqueue
- // by constantly respawning each other.
- tick = m->p->schedtick;
- // This is a fancy way to say tick%61==0,
- // it uses 2 MUL instructions instead of a single DIV and so is faster on modern processors.
- if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime_sched.runqsize > 0) {
- runtime_lock(&runtime_sched);
- gp = globrunqget(m->p, 1);
- runtime_unlock(&runtime_sched);
- if(gp)
- resetspinning();
- }
- if(gp == nil) {
- gp = runqget(m->p);
- if(gp && m->spinning)
- runtime_throw("schedule: spinning with local work");
- }
- if(gp == nil) {
- gp = findrunnable(); // blocks until work is available
- resetspinning();
- }
-
- if(gp->lockedm) {
- // Hands off own p to the locked m,
- // then blocks waiting for a new p.
- startlockedm(gp);
- goto top;
- }
-
- execute(gp);
-}
-
-// Puts the current goroutine into a waiting state and calls unlockf.
-// If unlockf returns false, the goroutine is resumed.
void
-runtime_park(bool(*unlockf)(G*, void*), void *lock, const char *reason)
+gopark(FuncVal *unlockf, void *lock, String reason,
+ byte traceEv __attribute__ ((unused)),
+ int traceskip __attribute__ ((unused)))
{
- if(g->status != Grunning)
+ if(g->atomicstatus != _Grunning)
runtime_throw("bad g status");
- m->waitlock = lock;
- m->waitunlockf = unlockf;
+ g->m->waitlock = lock;
+ g->m->waitunlockf = unlockf == nil ? nil : (void*)unlockf->fn;
g->waitreason = reason;
runtime_mcall(park0);
}
@@ -1887,27 +937,44 @@ runtime_parkunlock(Lock *lock, const char *reason)
runtime_park(parkunlock, lock, reason);
}
+void goparkunlock(Lock *, String, byte, int)
+ __asm__ (GOSYM_PREFIX "runtime.goparkunlock");
+
+void
+goparkunlock(Lock *lock, String reason, byte traceEv __attribute__ ((unused)),
+ int traceskip __attribute__ ((unused)))
+{
+ if(g->atomicstatus != _Grunning)
+ runtime_throw("bad g status");
+ g->m->waitlock = lock;
+ g->m->waitunlockf = parkunlock;
+ g->waitreason = reason;
+ runtime_mcall(park0);
+}
+
// runtime_park continuation on g0.
static void
park0(G *gp)
{
+ M *m;
bool ok;
- gp->status = Gwaiting;
+ m = g->m;
+ gp->atomicstatus = _Gwaiting;
gp->m = nil;
m->curg = nil;
if(m->waitunlockf) {
- ok = m->waitunlockf(gp, m->waitlock);
+ ok = ((bool (*)(G*, void*))m->waitunlockf)(gp, m->waitlock);
m->waitunlockf = nil;
m->waitlock = nil;
if(!ok) {
- gp->status = Grunnable;
- execute(gp); // Schedule it back, never returns.
+ gp->atomicstatus = _Grunnable;
+ execute(gp, true); // Schedule it back, never returns.
}
}
if(m->lockedg) {
stoplockedm();
- execute(gp); // Never returns.
+ execute(gp, true); // Never returns.
}
schedule();
}
@@ -1916,7 +983,7 @@ park0(G *gp)
void
runtime_gosched(void)
{
- if(g->status != Grunning)
+ if(g->atomicstatus != _Grunning)
runtime_throw("bad g status");
runtime_mcall(runtime_gosched0);
}
@@ -1925,15 +992,18 @@ runtime_gosched(void)
void
runtime_gosched0(G *gp)
{
- gp->status = Grunnable;
+ M *m;
+
+ m = g->m;
+ gp->atomicstatus = _Grunnable;
gp->m = nil;
m->curg = nil;
- runtime_lock(&runtime_sched);
+ runtime_lock(&runtime_sched->lock);
globrunqput(gp);
- runtime_unlock(&runtime_sched);
+ runtime_unlock(&runtime_sched->lock);
if(m->lockedg) {
stoplockedm();
- execute(gp); // Never returns.
+ execute(gp, true); // Never returns.
}
schedule();
}
@@ -1942,38 +1012,43 @@ runtime_gosched0(G *gp)
// Need to mark it as nosplit, because it runs with sp > stackbase (as runtime_lessstack).
// Since it does not return it does not matter. But if it is preempted
// at the split stack check, GC will complain about inconsistent sp.
-void runtime_goexit(void) __attribute__ ((noinline));
+void runtime_goexit1(void) __attribute__ ((noinline));
void
-runtime_goexit(void)
+runtime_goexit1(void)
{
- if(g->status != Grunning)
+ if(g->atomicstatus != _Grunning)
runtime_throw("bad g status");
runtime_mcall(goexit0);
}
-// runtime_goexit continuation on g0.
+// runtime_goexit1 continuation on g0.
static void
goexit0(G *gp)
{
- gp->status = Gdead;
+ M *m;
+
+ m = g->m;
+ gp->atomicstatus = _Gdead;
gp->entry = nil;
gp->m = nil;
gp->lockedm = nil;
gp->paniconfault = 0;
- gp->defer = nil; // should be true already but just in case.
- gp->panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
- gp->writenbuf = 0;
- gp->writebuf = nil;
- gp->waitreason = nil;
+ gp->_defer = nil; // should be true already but just in case.
+ gp->_panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
+ gp->writebuf.__values = nil;
+ gp->writebuf.__count = 0;
+ gp->writebuf.__capacity = 0;
+ gp->waitreason = runtime_gostringnocopy(nil);
gp->param = nil;
+ m->curg->m = nil;
m->curg = nil;
m->lockedg = nil;
- if(m->locked & ~LockExternal) {
+ if(m->locked & ~_LockExternal) {
runtime_printf("invalid m->locked = %d\n", m->locked);
runtime_throw("internal lockOSThread error");
}
m->locked = 0;
- gfput(m->p, gp);
+ gfput((P*)m->p, gp);
schedule();
}
@@ -1986,15 +1061,16 @@ goexit0(G *gp)
// make g->sched refer to the caller's stack segment, because
// entersyscall is going to return immediately after.
-void runtime_entersyscall(void) __attribute__ ((no_split_stack));
-static void doentersyscall(void) __attribute__ ((no_split_stack, noinline));
+void runtime_entersyscall(int32) __attribute__ ((no_split_stack));
+static void doentersyscall(uintptr, uintptr)
+ __attribute__ ((no_split_stack, noinline));
void
-runtime_entersyscall()
+runtime_entersyscall(int32 dummy __attribute__ ((unused)))
{
// Save the registers in the g structure so that any pointers
// held in registers will be seen by the garbage collector.
- getcontext(&g->gcregs);
+ getcontext(ucontext_arg(&g->gcregs[0]));
// Do the work in a separate function, so that this function
// doesn't save any registers on its own stack. If this
@@ -2005,84 +1081,99 @@ runtime_entersyscall()
// callee-saved registers to access the TLS variable g. We
// don't want to put the ucontext_t on the stack because it is
// large and we can not split the stack here.
- doentersyscall();
+ doentersyscall((uintptr)runtime_getcallerpc(&dummy),
+ (uintptr)runtime_getcallersp(&dummy));
}
static void
-doentersyscall()
+doentersyscall(uintptr pc, uintptr sp)
{
- // Disable preemption because during this function g is in Gsyscall status,
+ // Disable preemption because during this function g is in _Gsyscall status,
// but can have inconsistent g->sched, do not let GC observe it.
- m->locks++;
+ g->m->locks++;
// Leave SP around for GC and traceback.
#ifdef USING_SPLIT_STACK
- g->gcstack = __splitstack_find(nil, nil, &g->gcstack_size,
- &g->gcnext_segment, &g->gcnext_sp,
- &g->gcinitial_sp);
+ {
+ size_t gcstacksize;
+ g->gcstack = __splitstack_find(nil, nil, &gcstacksize,
+ &g->gcnextsegment, &g->gcnextsp,
+ &g->gcinitialsp);
+ g->gcstacksize = (uintptr)gcstacksize;
+ }
#else
{
void *v;
- g->gcnext_sp = (byte *) &v;
+ g->gcnextsp = (byte *) &v;
}
#endif
- g->status = Gsyscall;
+ g->syscallsp = sp;
+ g->syscallpc = pc;
- if(runtime_atomicload(&runtime_sched.sysmonwait)) { // TODO: fast atomic
- runtime_lock(&runtime_sched);
- if(runtime_atomicload(&runtime_sched.sysmonwait)) {
- runtime_atomicstore(&runtime_sched.sysmonwait, 0);
- runtime_notewakeup(&runtime_sched.sysmonnote);
+ g->atomicstatus = _Gsyscall;
+
+ if(runtime_atomicload(&runtime_sched->sysmonwait)) { // TODO: fast atomic
+ runtime_lock(&runtime_sched->lock);
+ if(runtime_atomicload(&runtime_sched->sysmonwait)) {
+ runtime_atomicstore(&runtime_sched->sysmonwait, 0);
+ runtime_notewakeup(&runtime_sched->sysmonnote);
}
- runtime_unlock(&runtime_sched);
+ runtime_unlock(&runtime_sched->lock);
}
- m->mcache = nil;
- m->p->m = nil;
- runtime_atomicstore(&m->p->status, Psyscall);
- if(runtime_atomicload(&runtime_sched.gcwaiting)) {
- runtime_lock(&runtime_sched);
- if (runtime_sched.stopwait > 0 && runtime_cas(&m->p->status, Psyscall, Pgcstop)) {
- if(--runtime_sched.stopwait == 0)
- runtime_notewakeup(&runtime_sched.stopnote);
+ g->m->mcache = nil;
+ ((P*)(g->m->p))->m = 0;
+ runtime_atomicstore(&((P*)g->m->p)->status, _Psyscall);
+ if(runtime_atomicload(&runtime_sched->gcwaiting)) {
+ runtime_lock(&runtime_sched->lock);
+ if (runtime_sched->stopwait > 0 && runtime_cas(&((P*)g->m->p)->status, _Psyscall, _Pgcstop)) {
+ if(--runtime_sched->stopwait == 0)
+ runtime_notewakeup(&runtime_sched->stopnote);
}
- runtime_unlock(&runtime_sched);
+ runtime_unlock(&runtime_sched->lock);
}
- m->locks--;
+ g->m->locks--;
}
// The same as runtime_entersyscall(), but with a hint that the syscall is blocking.
void
-runtime_entersyscallblock(void)
+runtime_entersyscallblock(int32 dummy __attribute__ ((unused)))
{
P *p;
- m->locks++; // see comment in entersyscall
+ g->m->locks++; // see comment in entersyscall
// Leave SP around for GC and traceback.
#ifdef USING_SPLIT_STACK
- g->gcstack = __splitstack_find(nil, nil, &g->gcstack_size,
- &g->gcnext_segment, &g->gcnext_sp,
- &g->gcinitial_sp);
+ {
+ size_t gcstacksize;
+ g->gcstack = __splitstack_find(nil, nil, &gcstacksize,
+ &g->gcnextsegment, &g->gcnextsp,
+ &g->gcinitialsp);
+ g->gcstacksize = (uintptr)gcstacksize;
+ }
#else
- g->gcnext_sp = (byte *) &p;
+ g->gcnextsp = (byte *) &p;
#endif
// Save the registers in the g structure so that any pointers
// held in registers will be seen by the garbage collector.
- getcontext(&g->gcregs);
+ getcontext(ucontext_arg(&g->gcregs[0]));
+
+ g->syscallpc = (uintptr)runtime_getcallerpc(&dummy);
+ g->syscallsp = (uintptr)runtime_getcallersp(&dummy);
- g->status = Gsyscall;
+ g->atomicstatus = _Gsyscall;
p = releasep();
handoffp(p);
if(g->isbackground) // do not consider blocked scavenger for deadlock detection
incidlelocked(1);
- m->locks--;
+ g->m->locks--;
}
// The goroutine g exited its system call.
@@ -2090,33 +1181,34 @@ runtime_entersyscallblock(void)
// This is called only from the go syscall library, not
// from the low-level system calls used by the runtime.
void
-runtime_exitsyscall(void)
+runtime_exitsyscall(int32 dummy __attribute__ ((unused)))
{
G *gp;
- m->locks++; // see comment in entersyscall
-
gp = g;
+ gp->m->locks++; // see comment in entersyscall
+
if(gp->isbackground) // do not consider blocked scavenger for deadlock detection
incidlelocked(-1);
- g->waitsince = 0;
+ gp->waitsince = 0;
if(exitsyscallfast()) {
// There's a cpu for us, so we can run.
- m->p->syscalltick++;
- gp->status = Grunning;
+ ((P*)gp->m->p)->syscalltick++;
+ gp->atomicstatus = _Grunning;
// Garbage collector isn't running (since we are),
// so okay to clear gcstack and gcsp.
#ifdef USING_SPLIT_STACK
gp->gcstack = nil;
#endif
- gp->gcnext_sp = nil;
- runtime_memclr(&gp->gcregs, sizeof gp->gcregs);
- m->locks--;
+ gp->gcnextsp = nil;
+ runtime_memclr(&gp->gcregs[0], sizeof gp->gcregs);
+ gp->syscallsp = 0;
+ gp->m->locks--;
return;
}
- m->locks--;
+ gp->m->locks--;
// Call the scheduler.
runtime_mcall(exitsyscall0);
@@ -2130,42 +1222,47 @@ runtime_exitsyscall(void)
#ifdef USING_SPLIT_STACK
gp->gcstack = nil;
#endif
- gp->gcnext_sp = nil;
- runtime_memclr(&gp->gcregs, sizeof gp->gcregs);
+ gp->gcnextsp = nil;
+ runtime_memclr(&gp->gcregs[0], sizeof gp->gcregs);
+
+ gp->syscallsp = 0;
- // Don't refer to m again, we might be running on a different
- // thread after returning from runtime_mcall.
- runtime_m()->p->syscalltick++;
+ // Note that this gp->m might be different than the earlier
+ // gp->m after returning from runtime_mcall.
+ ((P*)gp->m->p)->syscalltick++;
}
static bool
exitsyscallfast(void)
{
+ G *gp;
P *p;
+ gp = g;
+
// Freezetheworld sets stopwait but does not retake P's.
- if(runtime_sched.stopwait) {
- m->p = nil;
+ if(runtime_sched->stopwait) {
+ gp->m->p = 0;
return false;
}
// Try to re-acquire the last P.
- if(m->p && m->p->status == Psyscall && runtime_cas(&m->p->status, Psyscall, Prunning)) {
+ if(gp->m->p && ((P*)gp->m->p)->status == _Psyscall && runtime_cas(&((P*)gp->m->p)->status, _Psyscall, _Prunning)) {
// There's a cpu for us, so we can run.
- m->mcache = m->p->mcache;
- m->p->m = m;
+ gp->m->mcache = ((P*)gp->m->p)->mcache;
+ ((P*)gp->m->p)->m = (uintptr)gp->m;
return true;
}
// Try to get any other idle P.
- m->p = nil;
- if(runtime_sched.pidle) {
- runtime_lock(&runtime_sched);
+ gp->m->p = 0;
+ if(runtime_sched->pidle) {
+ runtime_lock(&runtime_sched->lock);
p = pidleget();
- if(p && runtime_atomicload(&runtime_sched.sysmonwait)) {
- runtime_atomicstore(&runtime_sched.sysmonwait, 0);
- runtime_notewakeup(&runtime_sched.sysmonnote);
+ if(p && runtime_atomicload(&runtime_sched->sysmonwait)) {
+ runtime_atomicstore(&runtime_sched->sysmonwait, 0);
+ runtime_notewakeup(&runtime_sched->sysmonnote);
}
- runtime_unlock(&runtime_sched);
+ runtime_unlock(&runtime_sched->lock);
if(p) {
acquirep(p);
return true;
@@ -2179,75 +1276,93 @@ exitsyscallfast(void)
static void
exitsyscall0(G *gp)
{
+ M *m;
P *p;
- gp->status = Grunnable;
+ m = g->m;
+ gp->atomicstatus = _Grunnable;
gp->m = nil;
m->curg = nil;
- runtime_lock(&runtime_sched);
+ runtime_lock(&runtime_sched->lock);
p = pidleget();
if(p == nil)
globrunqput(gp);
- else if(runtime_atomicload(&runtime_sched.sysmonwait)) {
- runtime_atomicstore(&runtime_sched.sysmonwait, 0);
- runtime_notewakeup(&runtime_sched.sysmonnote);
+ else if(runtime_atomicload(&runtime_sched->sysmonwait)) {
+ runtime_atomicstore(&runtime_sched->sysmonwait, 0);
+ runtime_notewakeup(&runtime_sched->sysmonnote);
}
- runtime_unlock(&runtime_sched);
+ runtime_unlock(&runtime_sched->lock);
if(p) {
acquirep(p);
- execute(gp); // Never returns.
+ execute(gp, false); // Never returns.
}
if(m->lockedg) {
// Wait until another thread schedules gp and so m again.
stoplockedm();
- execute(gp); // Never returns.
+ execute(gp, false); // Never returns.
}
stopm();
schedule(); // Never returns.
}
-// Called from syscall package before fork.
-void syscall_runtime_BeforeFork(void)
- __asm__(GOSYM_PREFIX "syscall.runtime_BeforeFork");
+void syscall_entersyscall(void)
+ __asm__(GOSYM_PREFIX "syscall.Entersyscall");
+
+void syscall_entersyscall(void) __attribute__ ((no_split_stack));
+
void
-syscall_runtime_BeforeFork(void)
+syscall_entersyscall()
{
- // Fork can hang if preempted with signals frequently enough (see issue 5517).
- // Ensure that we stay on the same M where we disable profiling.
- runtime_m()->locks++;
- if(runtime_m()->profilehz != 0)
- runtime_resetcpuprofiler(0);
+ runtime_entersyscall(0);
}
-// Called from syscall package after fork in parent.
-void syscall_runtime_AfterFork(void)
- __asm__(GOSYM_PREFIX "syscall.runtime_AfterFork");
+void syscall_exitsyscall(void)
+ __asm__(GOSYM_PREFIX "syscall.Exitsyscall");
+
+void syscall_exitsyscall(void) __attribute__ ((no_split_stack));
+
void
-syscall_runtime_AfterFork(void)
+syscall_exitsyscall()
{
- int32 hz;
-
- hz = runtime_sched.profilehz;
- if(hz != 0)
- runtime_resetcpuprofiler(hz);
- runtime_m()->locks--;
+ runtime_exitsyscall(0);
}
// Allocate a new g, with a stack big enough for stacksize bytes.
G*
-runtime_malg(int32 stacksize, byte** ret_stack, size_t* ret_stacksize)
+runtime_malg(bool allocatestack, bool signalstack, byte** ret_stack, uintptr* ret_stacksize)
{
+ uintptr stacksize;
G *newg;
+ byte* unused_stack;
+ uintptr unused_stacksize;
+#if USING_SPLIT_STACK
+ int dont_block_signals = 0;
+ size_t ss_stacksize;
+#endif
+ if (ret_stack == nil) {
+ ret_stack = &unused_stack;
+ }
+ if (ret_stacksize == nil) {
+ ret_stacksize = &unused_stacksize;
+ }
newg = allocg();
- if(stacksize >= 0) {
-#if USING_SPLIT_STACK
- int dont_block_signals = 0;
+ if(allocatestack) {
+ stacksize = StackMin;
+ if(signalstack) {
+ stacksize = 32 * 1024; // OS X wants >= 8K, GNU/Linux >= 2K
+#ifdef SIGSTKSZ
+ if(stacksize < SIGSTKSZ)
+ stacksize = SIGSTKSZ;
+#endif
+ }
+#if USING_SPLIT_STACK
*ret_stack = __splitstack_makecontext(stacksize,
- &newg->stack_context[0],
- ret_stacksize);
- __splitstack_block_signals_context(&newg->stack_context[0],
+ &newg->stackcontext[0],
+ &ss_stacksize);
+ *ret_stacksize = (uintptr)ss_stacksize;
+ __splitstack_block_signals_context(&newg->stackcontext[0],
&dont_block_signals, nil);
#else
// In 64-bit mode, the maximum Go allocation space is
@@ -2257,7 +1372,7 @@ runtime_malg(int32 stacksize, byte** ret_stack, size_t* ret_stacksize)
// 32-bit mode, the Go allocation space is all of
// memory anyhow.
if(sizeof(void*) == 8) {
- void *p = runtime_SysAlloc(stacksize, &mstats.other_sys);
+ void *p = runtime_SysAlloc(stacksize, &mstats()->other_sys);
if(p == nil)
runtime_throw("runtime: cannot allocate memory for goroutine stack");
*ret_stack = (byte*)p;
@@ -2265,41 +1380,14 @@ runtime_malg(int32 stacksize, byte** ret_stack, size_t* ret_stacksize)
*ret_stack = runtime_mallocgc(stacksize, 0, FlagNoProfiling|FlagNoGC);
runtime_xadd(&runtime_stacks_sys, stacksize);
}
- *ret_stacksize = stacksize;
- newg->gcinitial_sp = *ret_stack;
- newg->gcstack_size = (size_t)stacksize;
+ *ret_stacksize = (uintptr)stacksize;
+ newg->gcinitialsp = *ret_stack;
+ newg->gcstacksize = (uintptr)stacksize;
#endif
}
return newg;
}
-/* For runtime package testing. */
-
-
-// Create a new g running fn with siz bytes of arguments.
-// Put it on the queue of g's waiting to run.
-// The compiler turns a go statement into a call to this.
-// Cannot split the stack because it assumes that the arguments
-// are available sequentially after &fn; they would not be
-// copied if a stack split occurred. It's OK for this to call
-// functions that split the stack.
-void runtime_testing_entersyscall(int32)
- __asm__ (GOSYM_PREFIX "runtime.entersyscall");
-void
-runtime_testing_entersyscall(int32 dummy __attribute__ ((unused)))
-{
- runtime_entersyscall();
-}
-
-void runtime_testing_exitsyscall(int32)
- __asm__ (GOSYM_PREFIX "runtime.exitsyscall");
-
-void
-runtime_testing_exitsyscall(int32 dummy __attribute__ ((unused)))
-{
- runtime_exitsyscall();
-}
-
G*
__go_go(void (*fn)(void*), void* arg)
{
@@ -2310,155 +1398,55 @@ __go_go(void (*fn)(void*), void* arg)
//runtime_printf("newproc1 %p %p narg=%d nret=%d\n", fn->fn, argp, narg, nret);
if(fn == nil) {
- m->throwing = -1; // do not dump full stacks
+ g->m->throwing = -1; // do not dump full stacks
runtime_throw("go of nil func value");
}
- m->locks++; // disable preemption because it can be holding p in a local var
+ g->m->locks++; // disable preemption because it can be holding p in a local var
- p = m->p;
+ p = (P*)g->m->p;
if((newg = gfget(p)) != nil) {
#ifdef USING_SPLIT_STACK
int dont_block_signals = 0;
- sp = __splitstack_resetcontext(&newg->stack_context[0],
+ sp = __splitstack_resetcontext(&newg->stackcontext[0],
&spsize);
- __splitstack_block_signals_context(&newg->stack_context[0],
+ __splitstack_block_signals_context(&newg->stackcontext[0],
&dont_block_signals, nil);
#else
- sp = newg->gcinitial_sp;
- spsize = newg->gcstack_size;
+ sp = newg->gcinitialsp;
+ spsize = newg->gcstacksize;
if(spsize == 0)
runtime_throw("bad spsize in __go_go");
- newg->gcnext_sp = sp;
+ newg->gcnextsp = sp;
#endif
+ newg->traceback = nil;
} else {
- newg = runtime_malg(StackMin, &sp, &spsize);
+ uintptr malsize;
+
+ newg = runtime_malg(true, false, &sp, &malsize);
+ spsize = (size_t)malsize;
+ newg->atomicstatus = _Gdead;
allgadd(newg);
}
newg->entry = (byte*)fn;
newg->param = arg;
newg->gopc = (uintptr)__builtin_return_address(0);
- newg->status = Grunnable;
+ newg->atomicstatus = _Grunnable;
if(p->goidcache == p->goidcacheend) {
- p->goidcache = runtime_xadd64(&runtime_sched.goidgen, GoidCacheBatch);
+ p->goidcache = runtime_xadd64(&runtime_sched->goidgen, GoidCacheBatch);
p->goidcacheend = p->goidcache + GoidCacheBatch;
}
newg->goid = p->goidcache++;
- {
- // Avoid warnings about variables clobbered by
- // longjmp.
- byte * volatile vsp = sp;
- size_t volatile vspsize = spsize;
- G * volatile vnewg = newg;
-
- getcontext(&vnewg->context);
- vnewg->context.uc_stack.ss_sp = vsp;
-#ifdef MAKECONTEXT_STACK_TOP
- vnewg->context.uc_stack.ss_sp += vspsize;
-#endif
- vnewg->context.uc_stack.ss_size = vspsize;
- makecontext(&vnewg->context, kickoff, 0);
-
- runqput(p, vnewg);
-
- if(runtime_atomicload(&runtime_sched.npidle) != 0 && runtime_atomicload(&runtime_sched.nmspinning) == 0 && fn != runtime_main) // TODO: fast atomic
- wakep();
- m->locks--;
- return vnewg;
- }
-}
-
-static void
-allgadd(G *gp)
-{
- G **new;
- uintptr cap;
-
- runtime_lock(&allglock);
- if(runtime_allglen >= allgcap) {
- cap = 4096/sizeof(new[0]);
- if(cap < 2*allgcap)
- cap = 2*allgcap;
- new = runtime_malloc(cap*sizeof(new[0]));
- if(new == nil)
- runtime_throw("runtime: cannot allocate memory");
- if(runtime_allg != nil) {
- runtime_memmove(new, runtime_allg, runtime_allglen*sizeof(new[0]));
- runtime_free(runtime_allg);
- }
- runtime_allg = new;
- allgcap = cap;
- }
- runtime_allg[runtime_allglen++] = gp;
- runtime_unlock(&allglock);
-}
-
-// Put on gfree list.
-// If local list is too long, transfer a batch to the global list.
-static void
-gfput(P *p, G *gp)
-{
- gp->schedlink = p->gfree;
- p->gfree = gp;
- p->gfreecnt++;
- if(p->gfreecnt >= 64) {
- runtime_lock(&runtime_sched.gflock);
- while(p->gfreecnt >= 32) {
- p->gfreecnt--;
- gp = p->gfree;
- p->gfree = gp->schedlink;
- gp->schedlink = runtime_sched.gfree;
- runtime_sched.gfree = gp;
- }
- runtime_unlock(&runtime_sched.gflock);
- }
-}
+ makeGContext(newg, sp, (uintptr)spsize);
-// Get from gfree list.
-// If local list is empty, grab a batch from global list.
-static G*
-gfget(P *p)
-{
- G *gp;
+ runqput(p, newg, true);
-retry:
- gp = p->gfree;
- if(gp == nil && runtime_sched.gfree) {
- runtime_lock(&runtime_sched.gflock);
- while(p->gfreecnt < 32 && runtime_sched.gfree) {
- p->gfreecnt++;
- gp = runtime_sched.gfree;
- runtime_sched.gfree = gp->schedlink;
- gp->schedlink = p->gfree;
- p->gfree = gp;
- }
- runtime_unlock(&runtime_sched.gflock);
- goto retry;
- }
- if(gp) {
- p->gfree = gp->schedlink;
- p->gfreecnt--;
- }
- return gp;
-}
-
-// Purge all cached G's from gfree list to the global list.
-static void
-gfpurge(P *p)
-{
- G *gp;
-
- runtime_lock(&runtime_sched.gflock);
- while(p->gfreecnt) {
- p->gfreecnt--;
- gp = p->gfree;
- p->gfree = gp->schedlink;
- gp->schedlink = runtime_sched.gfree;
- runtime_sched.gfree = gp;
- }
- runtime_unlock(&runtime_sched.gflock);
+ if(runtime_atomicload(&runtime_sched->npidle) != 0 && runtime_atomicload(&runtime_sched->nmspinning) == 0 && fn != runtime_main) // TODO: fast atomic
+ wakep();
+ g->m->locks--;
+ return newg;
}
void
@@ -2475,131 +1463,9 @@ runtime_Gosched(void)
runtime_gosched();
}
-// Implementation of runtime.GOMAXPROCS.
-// delete when scheduler is even stronger
-int32
-runtime_gomaxprocsfunc(int32 n)
-{
- int32 ret;
-
- if(n > MaxGomaxprocs)
- n = MaxGomaxprocs;
- runtime_lock(&runtime_sched);
- ret = runtime_gomaxprocs;
- if(n <= 0 || n == ret) {
- runtime_unlock(&runtime_sched);
- return ret;
- }
- runtime_unlock(&runtime_sched);
-
- runtime_semacquire(&runtime_worldsema, false);
- m->gcing = 1;
- runtime_stoptheworld();
- newprocs = n;
- m->gcing = 0;
- runtime_semrelease(&runtime_worldsema);
- runtime_starttheworld();
-
- return ret;
-}
-
-// lockOSThread is called by runtime.LockOSThread and runtime.lockOSThread below
-// after they modify m->locked. Do not allow preemption during this call,
-// or else the m might be different in this function than in the caller.
-static void
-lockOSThread(void)
-{
- m->lockedg = g;
- g->lockedm = m;
-}
-
-void runtime_LockOSThread(void) __asm__ (GOSYM_PREFIX "runtime.LockOSThread");
-void
-runtime_LockOSThread(void)
-{
- m->locked |= LockExternal;
- lockOSThread();
-}
-
-void
-runtime_lockOSThread(void)
-{
- m->locked += LockInternal;
- lockOSThread();
-}
-
-
-// unlockOSThread is called by runtime.UnlockOSThread and runtime.unlockOSThread below
-// after they update m->locked. Do not allow preemption during this call,
-// or else the m might be in different in this function than in the caller.
-static void
-unlockOSThread(void)
-{
- if(m->locked != 0)
- return;
- m->lockedg = nil;
- g->lockedm = nil;
-}
-
-void runtime_UnlockOSThread(void) __asm__ (GOSYM_PREFIX "runtime.UnlockOSThread");
-
-void
-runtime_UnlockOSThread(void)
-{
- m->locked &= ~LockExternal;
- unlockOSThread();
-}
-
-void
-runtime_unlockOSThread(void)
-{
- if(m->locked < LockInternal)
- runtime_throw("runtime: internal error: misuse of lockOSThread/unlockOSThread");
- m->locked -= LockInternal;
- unlockOSThread();
-}
-
-bool
-runtime_lockedOSThread(void)
-{
- return g->lockedm != nil && m->lockedg != nil;
-}
-
-int32
-runtime_gcount(void)
-{
- G *gp;
- int32 n, s;
- uintptr i;
-
- n = 0;
- runtime_lock(&allglock);
- // TODO(dvyukov): runtime.NumGoroutine() is O(N).
- // We do not want to increment/decrement centralized counter in newproc/goexit,
- // just to make runtime.NumGoroutine() faster.
- // Compromise solution is to introduce per-P counters of active goroutines.
- for(i = 0; i < runtime_allglen; i++) {
- gp = runtime_allg[i];
- s = gp->status;
- if(s == Grunnable || s == Grunning || s == Gsyscall || s == Gwaiting)
- n++;
- }
- runtime_unlock(&allglock);
- return n;
-}
-
-int32
-runtime_mcount(void)
-{
- return runtime_sched.mcount;
-}
-
static struct {
- Lock;
- void (*fn)(uintptr*, int32);
+ uint32 lock;
int32 hz;
- uintptr pcbuf[TracebackMaxFrames];
- Location locbuf[TracebackMaxFrames];
} prof;
static void System(void) {}
@@ -2609,11 +1475,14 @@ static void GC(void) {}
void
runtime_sigprof()
{
- M *mp = m;
+ M *mp = g->m;
int32 n, i;
bool traceback;
+ uintptr pcbuf[TracebackMaxFrames];
+ Location locbuf[TracebackMaxFrames];
+ Slice stk;
- if(prof.fn == nil || prof.hz == 0)
+ if(prof.hz == 0)
return;
if(mp == nil)
@@ -2627,12 +1496,6 @@ runtime_sigprof()
if(mp->mcache == nil)
traceback = false;
- runtime_lock(&prof);
- if(prof.fn == nil) {
- runtime_unlock(&prof);
- mp->mallocing--;
- return;
- }
n = 0;
if(runtime_atomicload(&runtime_in_callers) > 0) {
@@ -2644,797 +1507,68 @@ runtime_sigprof()
}
if(traceback) {
- n = runtime_callers(0, prof.locbuf, nelem(prof.locbuf), false);
+ n = runtime_callers(0, locbuf, nelem(locbuf), false);
for(i = 0; i < n; i++)
- prof.pcbuf[i] = prof.locbuf[i].pc;
+ pcbuf[i] = locbuf[i].pc;
}
if(!traceback || n <= 0) {
n = 2;
- prof.pcbuf[0] = (uintptr)runtime_getcallerpc(&n);
+ pcbuf[0] = (uintptr)runtime_getcallerpc(&n);
if(mp->gcing || mp->helpgc)
- prof.pcbuf[1] = (uintptr)GC;
+ pcbuf[1] = (uintptr)GC;
else
- prof.pcbuf[1] = (uintptr)System;
+ pcbuf[1] = (uintptr)System;
+ }
+
+ if (prof.hz != 0) {
+ stk.__values = &pcbuf[0];
+ stk.__count = n;
+ stk.__capacity = n;
+
+ // Simple cas-lock to coordinate with setcpuprofilerate.
+ while (!runtime_cas(&prof.lock, 0, 1)) {
+ runtime_osyield();
+ }
+ if (prof.hz != 0) {
+ runtime_cpuprofAdd(stk);
+ }
+ runtime_atomicstore(&prof.lock, 0);
}
- prof.fn(prof.pcbuf, n);
- runtime_unlock(&prof);
+
mp->mallocing--;
}
// Arrange to call fn with a traceback hz times a second.
void
-runtime_setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz)
+runtime_setcpuprofilerate_m(int32 hz)
{
// Force sane arguments.
if(hz < 0)
hz = 0;
- if(hz == 0)
- fn = nil;
- if(fn == nil)
- hz = 0;
// Disable preemption, otherwise we can be rescheduled to another thread
// that has profiling enabled.
- m->locks++;
+ g->m->locks++;
// Stop profiler on this thread so that it is safe to lock prof.
// if a profiling signal came in while we had prof locked,
// it would deadlock.
runtime_resetcpuprofiler(0);
- runtime_lock(&prof);
- prof.fn = fn;
+ while (!runtime_cas(&prof.lock, 0, 1)) {
+ runtime_osyield();
+ }
prof.hz = hz;
- runtime_unlock(&prof);
- runtime_lock(&runtime_sched);
- runtime_sched.profilehz = hz;
- runtime_unlock(&runtime_sched);
+ runtime_atomicstore(&prof.lock, 0);
+
+ runtime_lock(&runtime_sched->lock);
+ runtime_sched->profilehz = hz;
+ runtime_unlock(&runtime_sched->lock);
if(hz != 0)
runtime_resetcpuprofiler(hz);
- m->locks--;
-}
-
-// Change number of processors. The world is stopped, sched is locked.
-static void
-procresize(int32 new)
-{
- int32 i, old;
- bool empty;
- G *gp;
- P *p;
-
- old = runtime_gomaxprocs;
- if(old < 0 || old > MaxGomaxprocs || new <= 0 || new >MaxGomaxprocs)
- runtime_throw("procresize: invalid arg");
- // initialize new P's
- for(i = 0; i < new; i++) {
- p = runtime_allp[i];
- if(p == nil) {
- p = (P*)runtime_mallocgc(sizeof(*p), 0, FlagNoInvokeGC);
- p->id = i;
- p->status = Pgcstop;
- runtime_atomicstorep(&runtime_allp[i], p);
- }
- if(p->mcache == nil) {
- if(old==0 && i==0)
- p->mcache = m->mcache; // bootstrap
- else
- p->mcache = runtime_allocmcache();
- }
- }
-
- // redistribute runnable G's evenly
- // collect all runnable goroutines in global queue preserving FIFO order
- // FIFO order is required to ensure fairness even during frequent GCs
- // see http://golang.org/issue/7126
- empty = false;
- while(!empty) {
- empty = true;
- for(i = 0; i < old; i++) {
- p = runtime_allp[i];
- if(p->runqhead == p->runqtail)
- continue;
- empty = false;
- // pop from tail of local queue
- p->runqtail--;
- gp = p->runq[p->runqtail%nelem(p->runq)];
- // push onto head of global queue
- gp->schedlink = runtime_sched.runqhead;
- runtime_sched.runqhead = gp;
- if(runtime_sched.runqtail == nil)
- runtime_sched.runqtail = gp;
- runtime_sched.runqsize++;
- }
- }
- // fill local queues with at most nelem(p->runq)/2 goroutines
- // start at 1 because current M already executes some G and will acquire allp[0] below,
- // so if we have a spare G we want to put it into allp[1].
- for(i = 1; (uint32)i < (uint32)new * nelem(p->runq)/2 && runtime_sched.runqsize > 0; i++) {
- gp = runtime_sched.runqhead;
- runtime_sched.runqhead = gp->schedlink;
- if(runtime_sched.runqhead == nil)
- runtime_sched.runqtail = nil;
- runtime_sched.runqsize--;
- runqput(runtime_allp[i%new], gp);
- }
-
- // free unused P's
- for(i = new; i < old; i++) {
- p = runtime_allp[i];
- runtime_freemcache(p->mcache);
- p->mcache = nil;
- gfpurge(p);
- p->status = Pdead;
- // can't free P itself because it can be referenced by an M in syscall
- }
-
- if(m->p)
- m->p->m = nil;
- m->p = nil;
- m->mcache = nil;
- p = runtime_allp[0];
- p->m = nil;
- p->status = Pidle;
- acquirep(p);
- for(i = new-1; i > 0; i--) {
- p = runtime_allp[i];
- p->status = Pidle;
- pidleput(p);
- }
- runtime_atomicstore((uint32*)&runtime_gomaxprocs, new);
-}
-
-// Associate p and the current m.
-static void
-acquirep(P *p)
-{
- if(m->p || m->mcache)
- runtime_throw("acquirep: already in go");
- if(p->m || p->status != Pidle) {
- runtime_printf("acquirep: p->m=%p(%d) p->status=%d\n", p->m, p->m ? p->m->id : 0, p->status);
- runtime_throw("acquirep: invalid p state");
- }
- m->mcache = p->mcache;
- m->p = p;
- p->m = m;
- p->status = Prunning;
-}
-
-// Disassociate p and the current m.
-static P*
-releasep(void)
-{
- P *p;
-
- if(m->p == nil || m->mcache == nil)
- runtime_throw("releasep: invalid arg");
- p = m->p;
- if(p->m != m || p->mcache != m->mcache || p->status != Prunning) {
- runtime_printf("releasep: m=%p m->p=%p p->m=%p m->mcache=%p p->mcache=%p p->status=%d\n",
- m, m->p, p->m, m->mcache, p->mcache, p->status);
- runtime_throw("releasep: invalid p state");
- }
- m->p = nil;
- m->mcache = nil;
- p->m = nil;
- p->status = Pidle;
- return p;
-}
-
-static void
-incidlelocked(int32 v)
-{
- runtime_lock(&runtime_sched);
- runtime_sched.nmidlelocked += v;
- if(v > 0)
- checkdead();
- runtime_unlock(&runtime_sched);
-}
-
-// Check for deadlock situation.
-// The check is based on number of running M's, if 0 -> deadlock.
-static void
-checkdead(void)
-{
- G *gp;
- int32 run, grunning, s;
- uintptr i;
-
- // For -buildmode=c-shared or -buildmode=c-archive it's OK if
- // there are no running goroutines. The calling program is
- // assumed to be running.
- if(runtime_isarchive) {
- return;
- }
-
- // -1 for sysmon
- run = runtime_sched.mcount - runtime_sched.nmidle - runtime_sched.nmidlelocked - 1 - countextra();
- if(run > 0)
- return;
- // If we are dying because of a signal caught on an already idle thread,
- // freezetheworld will cause all running threads to block.
- // And runtime will essentially enter into deadlock state,
- // except that there is a thread that will call runtime_exit soon.
- if(runtime_panicking > 0)
- return;
- if(run < 0) {
- runtime_printf("runtime: checkdead: nmidle=%d nmidlelocked=%d mcount=%d\n",
- runtime_sched.nmidle, runtime_sched.nmidlelocked, runtime_sched.mcount);
- runtime_throw("checkdead: inconsistent counts");
- }
- grunning = 0;
- runtime_lock(&allglock);
- for(i = 0; i < runtime_allglen; i++) {
- gp = runtime_allg[i];
- if(gp->isbackground)
- continue;
- s = gp->status;
- if(s == Gwaiting)
- grunning++;
- else if(s == Grunnable || s == Grunning || s == Gsyscall) {
- runtime_unlock(&allglock);
- runtime_printf("runtime: checkdead: find g %D in status %d\n", gp->goid, s);
- runtime_throw("checkdead: runnable g");
- }
- }
- runtime_unlock(&allglock);
- if(grunning == 0) // possible if main goroutine calls runtime_Goexit()
- runtime_throw("no goroutines (main called runtime.Goexit) - deadlock!");
- m->throwing = -1; // do not dump full stacks
- runtime_throw("all goroutines are asleep - deadlock!");
-}
-
-static void
-sysmon(void)
-{
- uint32 idle, delay;
- int64 now, lastpoll, lasttrace;
- G *gp;
-
- lasttrace = 0;
- idle = 0; // how many cycles in succession we had not wokeup somebody
- delay = 0;
- for(;;) {
- if(idle == 0) // start with 20us sleep...
- delay = 20;
- else if(idle > 50) // start doubling the sleep after 1ms...
- delay *= 2;
- if(delay > 10*1000) // up to 10ms
- delay = 10*1000;
- runtime_usleep(delay);
- if(runtime_debug.schedtrace <= 0 &&
- (runtime_sched.gcwaiting || runtime_atomicload(&runtime_sched.npidle) == (uint32)runtime_gomaxprocs)) { // TODO: fast atomic
- runtime_lock(&runtime_sched);
- if(runtime_atomicload(&runtime_sched.gcwaiting) || runtime_atomicload(&runtime_sched.npidle) == (uint32)runtime_gomaxprocs) {
- runtime_atomicstore(&runtime_sched.sysmonwait, 1);
- runtime_unlock(&runtime_sched);
- runtime_notesleep(&runtime_sched.sysmonnote);
- runtime_noteclear(&runtime_sched.sysmonnote);
- idle = 0;
- delay = 20;
- } else
- runtime_unlock(&runtime_sched);
- }
- // poll network if not polled for more than 10ms
- lastpoll = runtime_atomicload64(&runtime_sched.lastpoll);
- now = runtime_nanotime();
- if(lastpoll != 0 && lastpoll + 10*1000*1000 < now) {
- runtime_cas64(&runtime_sched.lastpoll, lastpoll, now);
- gp = runtime_netpoll(false); // non-blocking
- if(gp) {
- // Need to decrement number of idle locked M's
- // (pretending that one more is running) before injectglist.
- // Otherwise it can lead to the following situation:
- // injectglist grabs all P's but before it starts M's to run the P's,
- // another M returns from syscall, finishes running its G,
- // observes that there is no work to do and no other running M's
- // and reports deadlock.
- incidlelocked(-1);
- injectglist(gp);
- incidlelocked(1);
- }
- }
- // retake P's blocked in syscalls
- // and preempt long running G's
- if(retake(now))
- idle = 0;
- else
- idle++;
-
- if(runtime_debug.schedtrace > 0 && lasttrace + runtime_debug.schedtrace*1000000ll <= now) {
- lasttrace = now;
- runtime_schedtrace(runtime_debug.scheddetail);
- }
- }
-}
-
-typedef struct Pdesc Pdesc;
-struct Pdesc
-{
- uint32 schedtick;
- int64 schedwhen;
- uint32 syscalltick;
- int64 syscallwhen;
-};
-static Pdesc pdesc[MaxGomaxprocs];
-
-static uint32
-retake(int64 now)
-{
- uint32 i, s, n;
- int64 t;
- P *p;
- Pdesc *pd;
-
- n = 0;
- for(i = 0; i < (uint32)runtime_gomaxprocs; i++) {
- p = runtime_allp[i];
- if(p==nil)
- continue;
- pd = &pdesc[i];
- s = p->status;
- if(s == Psyscall) {
- // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
- t = p->syscalltick;
- if(pd->syscalltick != t) {
- pd->syscalltick = t;
- pd->syscallwhen = now;
- continue;
- }
- // On the one hand we don't want to retake Ps if there is no other work to do,
- // but on the other hand we want to retake them eventually
- // because they can prevent the sysmon thread from deep sleep.
- if(p->runqhead == p->runqtail &&
- runtime_atomicload(&runtime_sched.nmspinning) + runtime_atomicload(&runtime_sched.npidle) > 0 &&
- pd->syscallwhen + 10*1000*1000 > now)
- continue;
- // Need to decrement number of idle locked M's
- // (pretending that one more is running) before the CAS.
- // Otherwise the M from which we retake can exit the syscall,
- // increment nmidle and report deadlock.
- incidlelocked(-1);
- if(runtime_cas(&p->status, s, Pidle)) {
- n++;
- handoffp(p);
- }
- incidlelocked(1);
- } else if(s == Prunning) {
- // Preempt G if it's running for more than 10ms.
- t = p->schedtick;
- if(pd->schedtick != t) {
- pd->schedtick = t;
- pd->schedwhen = now;
- continue;
- }
- if(pd->schedwhen + 10*1000*1000 > now)
- continue;
- // preemptone(p);
- }
- }
- return n;
-}
-
-// Tell all goroutines that they have been preempted and they should stop.
-// This function is purely best-effort. It can fail to inform a goroutine if a
-// processor just started running it.
-// No locks need to be held.
-// Returns true if preemption request was issued to at least one goroutine.
-static bool
-preemptall(void)
-{
- return false;
-}
-
-void
-runtime_schedtrace(bool detailed)
-{
- static int64 starttime;
- int64 now;
- int64 id1, id2, id3;
- int32 i, t, h;
- uintptr gi;
- const char *fmt;
- M *mp, *lockedm;
- G *gp, *lockedg;
- P *p;
-
- now = runtime_nanotime();
- if(starttime == 0)
- starttime = now;
-
- runtime_lock(&runtime_sched);
- runtime_printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d idlethreads=%d runqueue=%d",
- (now-starttime)/1000000, runtime_gomaxprocs, runtime_sched.npidle, runtime_sched.mcount,
- runtime_sched.nmidle, runtime_sched.runqsize);
- if(detailed) {
- runtime_printf(" gcwaiting=%d nmidlelocked=%d nmspinning=%d stopwait=%d sysmonwait=%d\n",
- runtime_sched.gcwaiting, runtime_sched.nmidlelocked, runtime_sched.nmspinning,
- runtime_sched.stopwait, runtime_sched.sysmonwait);
- }
- // We must be careful while reading data from P's, M's and G's.
- // Even if we hold schedlock, most data can be changed concurrently.
- // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
- for(i = 0; i < runtime_gomaxprocs; i++) {
- p = runtime_allp[i];
- if(p == nil)
- continue;
- mp = p->m;
- h = runtime_atomicload(&p->runqhead);
- t = runtime_atomicload(&p->runqtail);
- if(detailed)
- runtime_printf(" P%d: status=%d schedtick=%d syscalltick=%d m=%d runqsize=%d gfreecnt=%d\n",
- i, p->status, p->schedtick, p->syscalltick, mp ? mp->id : -1, t-h, p->gfreecnt);
- else {
- // In non-detailed mode format lengths of per-P run queues as:
- // [len1 len2 len3 len4]
- fmt = " %d";
- if(runtime_gomaxprocs == 1)
- fmt = " [%d]\n";
- else if(i == 0)
- fmt = " [%d";
- else if(i == runtime_gomaxprocs-1)
- fmt = " %d]\n";
- runtime_printf(fmt, t-h);
- }
- }
- if(!detailed) {
- runtime_unlock(&runtime_sched);
- return;
- }
- for(mp = runtime_allm; mp; mp = mp->alllink) {
- p = mp->p;
- gp = mp->curg;
- lockedg = mp->lockedg;
- id1 = -1;
- if(p)
- id1 = p->id;
- id2 = -1;
- if(gp)
- id2 = gp->goid;
- id3 = -1;
- if(lockedg)
- id3 = lockedg->goid;
- runtime_printf(" M%d: p=%D curg=%D mallocing=%d throwing=%d gcing=%d"
- " locks=%d dying=%d helpgc=%d spinning=%d blocked=%d lockedg=%D\n",
- mp->id, id1, id2,
- mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->dying, mp->helpgc,
- mp->spinning, m->blocked, id3);
- }
- runtime_lock(&allglock);
- for(gi = 0; gi < runtime_allglen; gi++) {
- gp = runtime_allg[gi];
- mp = gp->m;
- lockedm = gp->lockedm;
- runtime_printf(" G%D: status=%d(%s) m=%d lockedm=%d\n",
- gp->goid, gp->status, gp->waitreason, mp ? mp->id : -1,
- lockedm ? lockedm->id : -1);
- }
- runtime_unlock(&allglock);
- runtime_unlock(&runtime_sched);
-}
-
-// Put mp on midle list.
-// Sched must be locked.
-static void
-mput(M *mp)
-{
- mp->schedlink = runtime_sched.midle;
- runtime_sched.midle = mp;
- runtime_sched.nmidle++;
- checkdead();
-}
-
-// Try to get an m from midle list.
-// Sched must be locked.
-static M*
-mget(void)
-{
- M *mp;
-
- if((mp = runtime_sched.midle) != nil){
- runtime_sched.midle = mp->schedlink;
- runtime_sched.nmidle--;
- }
- return mp;
-}
-
-// Put gp on the global runnable queue.
-// Sched must be locked.
-static void
-globrunqput(G *gp)
-{
- gp->schedlink = nil;
- if(runtime_sched.runqtail)
- runtime_sched.runqtail->schedlink = gp;
- else
- runtime_sched.runqhead = gp;
- runtime_sched.runqtail = gp;
- runtime_sched.runqsize++;
-}
-
-// Put a batch of runnable goroutines on the global runnable queue.
-// Sched must be locked.
-static void
-globrunqputbatch(G *ghead, G *gtail, int32 n)
-{
- gtail->schedlink = nil;
- if(runtime_sched.runqtail)
- runtime_sched.runqtail->schedlink = ghead;
- else
- runtime_sched.runqhead = ghead;
- runtime_sched.runqtail = gtail;
- runtime_sched.runqsize += n;
-}
-
-// Try get a batch of G's from the global runnable queue.
-// Sched must be locked.
-static G*
-globrunqget(P *p, int32 max)
-{
- G *gp, *gp1;
- int32 n;
-
- if(runtime_sched.runqsize == 0)
- return nil;
- n = runtime_sched.runqsize/runtime_gomaxprocs+1;
- if(n > runtime_sched.runqsize)
- n = runtime_sched.runqsize;
- if(max > 0 && n > max)
- n = max;
- if((uint32)n > nelem(p->runq)/2)
- n = nelem(p->runq)/2;
- runtime_sched.runqsize -= n;
- if(runtime_sched.runqsize == 0)
- runtime_sched.runqtail = nil;
- gp = runtime_sched.runqhead;
- runtime_sched.runqhead = gp->schedlink;
- n--;
- while(n--) {
- gp1 = runtime_sched.runqhead;
- runtime_sched.runqhead = gp1->schedlink;
- runqput(p, gp1);
- }
- return gp;
-}
-
-// Put p to on pidle list.
-// Sched must be locked.
-static void
-pidleput(P *p)
-{
- p->link = runtime_sched.pidle;
- runtime_sched.pidle = p;
- runtime_xadd(&runtime_sched.npidle, 1); // TODO: fast atomic
-}
-
-// Try get a p from pidle list.
-// Sched must be locked.
-static P*
-pidleget(void)
-{
- P *p;
-
- p = runtime_sched.pidle;
- if(p) {
- runtime_sched.pidle = p->link;
- runtime_xadd(&runtime_sched.npidle, -1); // TODO: fast atomic
- }
- return p;
-}
-
-// Try to put g on local runnable queue.
-// If it's full, put onto global queue.
-// Executed only by the owner P.
-static void
-runqput(P *p, G *gp)
-{
- uint32 h, t;
-
-retry:
- h = runtime_atomicload(&p->runqhead); // load-acquire, synchronize with consumers
- t = p->runqtail;
- if(t - h < nelem(p->runq)) {
- p->runq[t%nelem(p->runq)] = gp;
- runtime_atomicstore(&p->runqtail, t+1); // store-release, makes the item available for consumption
- return;
- }
- if(runqputslow(p, gp, h, t))
- return;
- // the queue is not full, now the put above must suceed
- goto retry;
-}
-
-// Put g and a batch of work from local runnable queue on global queue.
-// Executed only by the owner P.
-static bool
-runqputslow(P *p, G *gp, uint32 h, uint32 t)
-{
- G *batch[nelem(p->runq)/2+1];
- uint32 n, i;
-
- // First, grab a batch from local queue.
- n = t-h;
- n = n/2;
- if(n != nelem(p->runq)/2)
- runtime_throw("runqputslow: queue is not full");
- for(i=0; i<n; i++)
- batch[i] = p->runq[(h+i)%nelem(p->runq)];
- if(!runtime_cas(&p->runqhead, h, h+n)) // cas-release, commits consume
- return false;
- batch[n] = gp;
- // Link the goroutines.
- for(i=0; i<n; i++)
- batch[i]->schedlink = batch[i+1];
- // Now put the batch on global queue.
- runtime_lock(&runtime_sched);
- globrunqputbatch(batch[0], batch[n], n+1);
- runtime_unlock(&runtime_sched);
- return true;
-}
-
-// Get g from local runnable queue.
-// Executed only by the owner P.
-static G*
-runqget(P *p)
-{
- G *gp;
- uint32 t, h;
-
- for(;;) {
- h = runtime_atomicload(&p->runqhead); // load-acquire, synchronize with other consumers
- t = p->runqtail;
- if(t == h)
- return nil;
- gp = p->runq[h%nelem(p->runq)];
- if(runtime_cas(&p->runqhead, h, h+1)) // cas-release, commits consume
- return gp;
- }
-}
-
-// Grabs a batch of goroutines from local runnable queue.
-// batch array must be of size nelem(p->runq)/2. Returns number of grabbed goroutines.
-// Can be executed by any P.
-static uint32
-runqgrab(P *p, G **batch)
-{
- uint32 t, h, n, i;
-
- for(;;) {
- h = runtime_atomicload(&p->runqhead); // load-acquire, synchronize with other consumers
- t = runtime_atomicload(&p->runqtail); // load-acquire, synchronize with the producer
- n = t-h;
- n = n - n/2;
- if(n == 0)
- break;
- if(n > nelem(p->runq)/2) // read inconsistent h and t
- continue;
- for(i=0; i<n; i++)
- batch[i] = p->runq[(h+i)%nelem(p->runq)];
- if(runtime_cas(&p->runqhead, h, h+n)) // cas-release, commits consume
- break;
- }
- return n;
-}
-
-// Steal half of elements from local runnable queue of p2
-// and put onto local runnable queue of p.
-// Returns one of the stolen elements (or nil if failed).
-static G*
-runqsteal(P *p, P *p2)
-{
- G *gp;
- G *batch[nelem(p->runq)/2];
- uint32 t, h, n, i;
-
- n = runqgrab(p2, batch);
- if(n == 0)
- return nil;
- n--;
- gp = batch[n];
- if(n == 0)
- return gp;
- h = runtime_atomicload(&p->runqhead); // load-acquire, synchronize with consumers
- t = p->runqtail;
- if(t - h + n >= nelem(p->runq))
- runtime_throw("runqsteal: runq overflow");
- for(i=0; i<n; i++, t++)
- p->runq[t%nelem(p->runq)] = batch[i];
- runtime_atomicstore(&p->runqtail, t); // store-release, makes the item available for consumption
- return gp;
-}
-
-void runtime_testSchedLocalQueue(void)
- __asm__("runtime.testSchedLocalQueue");
-
-void
-runtime_testSchedLocalQueue(void)
-{
- P p;
- G gs[nelem(p.runq)];
- int32 i, j;
-
- runtime_memclr((byte*)&p, sizeof(p));
-
- for(i = 0; i < (int32)nelem(gs); i++) {
- if(runqget(&p) != nil)
- runtime_throw("runq is not empty initially");
- for(j = 0; j < i; j++)
- runqput(&p, &gs[i]);
- for(j = 0; j < i; j++) {
- if(runqget(&p) != &gs[i]) {
- runtime_printf("bad element at iter %d/%d\n", i, j);
- runtime_throw("bad element");
- }
- }
- if(runqget(&p) != nil)
- runtime_throw("runq is not empty afterwards");
- }
-}
-
-void runtime_testSchedLocalQueueSteal(void)
- __asm__("runtime.testSchedLocalQueueSteal");
-
-void
-runtime_testSchedLocalQueueSteal(void)
-{
- P p1, p2;
- G gs[nelem(p1.runq)], *gp;
- int32 i, j, s;
-
- runtime_memclr((byte*)&p1, sizeof(p1));
- runtime_memclr((byte*)&p2, sizeof(p2));
-
- for(i = 0; i < (int32)nelem(gs); i++) {
- for(j = 0; j < i; j++) {
- gs[j].sig = 0;
- runqput(&p1, &gs[j]);
- }
- gp = runqsteal(&p2, &p1);
- s = 0;
- if(gp) {
- s++;
- gp->sig++;
- }
- while((gp = runqget(&p2)) != nil) {
- s++;
- gp->sig++;
- }
- while((gp = runqget(&p1)) != nil)
- gp->sig++;
- for(j = 0; j < i; j++) {
- if(gs[j].sig != 1) {
- runtime_printf("bad element %d(%d) at iter %d\n", j, gs[j].sig, i);
- runtime_throw("bad element");
- }
- }
- if(s != i/2 && s != i/2+1) {
- runtime_printf("bad steal %d, want %d or %d, iter %d\n",
- s, i/2, i/2+1, i);
- runtime_throw("bad steal");
- }
- }
-}
-
-int32
-runtime_setmaxthreads(int32 in)
-{
- int32 out;
-
- runtime_lock(&runtime_sched);
- out = runtime_sched.maxmcount;
- runtime_sched.maxmcount = in;
- checkmcount();
- runtime_unlock(&runtime_sched);
- return out;
-}
-
-void
-runtime_proc_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj))
-{
- enqueue1(wbufp, (Obj){(byte*)&runtime_sched, sizeof runtime_sched, 0});
- enqueue1(wbufp, (Obj){(byte*)&runtime_main_init_done, sizeof runtime_main_init_done, 0});
+ g->m->locks--;
}
// Return whether we are waiting for a GC. This gc toolchain uses
@@ -3442,7 +1576,7 @@ runtime_proc_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj
bool
runtime_gcwaiting(void)
{
- return runtime_sched.gcwaiting;
+ return runtime_sched->gcwaiting;
}
// os_beforeExit is called from os.Exit(0).
@@ -3455,43 +1589,10 @@ os_beforeExit()
{
}
-// Active spinning for sync.Mutex.
-//go:linkname sync_runtime_canSpin sync.runtime_canSpin
-
-enum
-{
- ACTIVE_SPIN = 4,
- ACTIVE_SPIN_CNT = 30,
-};
-
-extern _Bool sync_runtime_canSpin(intgo i)
- __asm__ (GOSYM_PREFIX "sync.runtime_canSpin");
+intgo NumCPU(void) __asm__ (GOSYM_PREFIX "runtime.NumCPU");
-_Bool
-sync_runtime_canSpin(intgo i)
-{
- P *p;
-
- // sync.Mutex is cooperative, so we are conservative with spinning.
- // Spin only few times and only if running on a multicore machine and
- // GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
- // As opposed to runtime mutex we don't do passive spinning here,
- // because there can be work on global runq on on other Ps.
- if (i >= ACTIVE_SPIN || runtime_ncpu <= 1 || runtime_gomaxprocs <= (int32)(runtime_sched.npidle+runtime_sched.nmspinning)+1) {
- return false;
- }
- p = m->p;
- return p != nil && p->runqhead == p->runqtail;
-}
-
-//go:linkname sync_runtime_doSpin sync.runtime_doSpin
-//go:nosplit
-
-extern void sync_runtime_doSpin(void)
- __asm__ (GOSYM_PREFIX "sync.runtime_doSpin");
-
-void
-sync_runtime_doSpin()
+intgo
+NumCPU()
{
- runtime_procyield(ACTIVE_SPIN_CNT);
+ return (intgo)(runtime_ncpu);
}
diff --git a/libgo/runtime/rdebug.goc b/libgo/runtime/rdebug.goc
deleted file mode 100644
index 63eb4dd457..0000000000
--- a/libgo/runtime/rdebug.goc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime_debug
-#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
-
-func setMaxStack(in int) (out int) {
- out = runtime_maxstacksize;
- runtime_maxstacksize = in;
-}
-
-func setGCPercent(in int) (out int) {
- out = runtime_setgcpercent(in);
-}
-
-func setMaxThreads(in int) (out int) {
- out = runtime_setmaxthreads(in);
-}
-
-func SetPanicOnFault(enabled bool) (old bool) {
- old = runtime_g()->paniconfault;
- runtime_g()->paniconfault = enabled;
-}
diff --git a/libgo/runtime/reflect.goc b/libgo/runtime/reflect.goc
deleted file mode 100644
index 4e493ee810..0000000000
--- a/libgo/runtime/reflect.goc
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package reflect
-#include "runtime.h"
-#include "go-type.h"
-#include "interface.h"
-#include "go-panic.h"
-
-func ifaceE2I(inter *Type, e Eface, ret *Iface) {
- const Type *t;
- Eface err;
-
- t = e.__type_descriptor;
- if(t == nil) {
- // explicit conversions require non-nil interface value.
- runtime_newTypeAssertionError(
- nil, nil, inter->__reflection,
- nil, &err);
- runtime_panic(err);
- }
- ret->__object = e.__object;
- ret->__methods = __go_convert_interface(inter, t);
-}
diff --git a/libgo/runtime/runtime.c b/libgo/runtime/runtime.c
deleted file mode 100644
index 4140d33d04..0000000000
--- a/libgo/runtime/runtime.c
+++ /dev/null
@@ -1,454 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <signal.h>
-#include <unistd.h>
-
-#include "config.h"
-
-#include "runtime.h"
-#include "arch.h"
-#include "array.h"
-
-enum {
- maxround = sizeof(uintptr),
-};
-
-// Keep a cached value to make gotraceback fast,
-// since we call it on every call to gentraceback.
-// The cached value is a uint32 in which the low bit
-// is the "crash" setting and the top 31 bits are the
-// gotraceback value.
-enum {
- tracebackCrash = 1 << 0,
- tracebackAll = 1 << 1,
- tracebackShift = 2,
-};
-static uint32 traceback_cache = 2 << tracebackShift;
-static uint32 traceback_env;
-
-extern volatile intgo runtime_MemProfileRate
- __asm__ (GOSYM_PREFIX "runtime.MemProfileRate");
-
-
-// gotraceback returns the current traceback settings.
-//
-// If level is 0, suppress all tracebacks.
-// If level is 1, show tracebacks, but exclude runtime frames.
-// If level is 2, show tracebacks including runtime frames.
-// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
-// If crash is set, crash (core dump, etc) after tracebacking.
-int32
-runtime_gotraceback(bool *crash)
-{
- uint32 x;
-
- if(crash != nil)
- *crash = false;
- if(runtime_m()->traceback != 0)
- return runtime_m()->traceback;
- x = runtime_atomicload(&traceback_cache);
- if(crash != nil)
- *crash = x&tracebackCrash;
- return x>>tracebackShift;
-}
-
-static int32 argc;
-static byte** argv;
-
-static Slice args;
-Slice envs;
-
-void (*runtime_sysargs)(int32, uint8**);
-
-void
-runtime_args(int32 c, byte **v)
-{
- argc = c;
- argv = v;
- if(runtime_sysargs != nil)
- runtime_sysargs(c, v);
-}
-
-byte*
-runtime_progname()
-{
- return argc == 0 ? nil : argv[0];
-}
-
-void
-runtime_goargs(void)
-{
- String *s;
- int32 i;
-
- // for windows implementation see "os" package
- if(Windows)
- return;
-
- s = runtime_malloc(argc*sizeof s[0]);
- for(i=0; i<argc; i++)
- s[i] = runtime_gostringnocopy((const byte*)argv[i]);
- args.__values = (void*)s;
- args.__count = argc;
- args.__capacity = argc;
-}
-
-void
-runtime_goenvs_unix(void)
-{
- String *s;
- int32 i, n;
-
- for(n=0; argv[argc+1+n] != 0; n++)
- ;
-
- s = runtime_malloc(n*sizeof s[0]);
- for(i=0; i<n; i++)
- s[i] = runtime_gostringnocopy(argv[argc+1+i]);
- envs.__values = (void*)s;
- envs.__count = n;
- envs.__capacity = n;
-}
-
-// Called from the syscall package.
-Slice runtime_envs(void) __asm__ (GOSYM_PREFIX "syscall.runtime_envs");
-
-Slice
-runtime_envs()
-{
- return envs;
-}
-
-Slice os_runtime_args(void) __asm__ (GOSYM_PREFIX "os.runtime_args");
-
-Slice
-os_runtime_args()
-{
- return args;
-}
-
-int32
-runtime_atoi(const byte *p, intgo len)
-{
- int32 n;
-
- n = 0;
- while(len > 0 && '0' <= *p && *p <= '9') {
- n = n*10 + *p++ - '0';
- len--;
- }
- return n;
-}
-
-static struct root_list runtime_roots =
-{ nil,
- { { &envs, sizeof envs },
- { &args, sizeof args },
- { nil, 0 } },
-};
-
-static void
-TestAtomic64(void)
-{
- uint64 z64, x64;
-
- z64 = 42;
- x64 = 0;
- PREFETCH(&z64);
- if(runtime_cas64(&z64, x64, 1))
- runtime_throw("cas64 failed");
- if(x64 != 0)
- runtime_throw("cas64 failed");
- x64 = 42;
- if(!runtime_cas64(&z64, x64, 1))
- runtime_throw("cas64 failed");
- if(x64 != 42 || z64 != 1)
- runtime_throw("cas64 failed");
- if(runtime_atomicload64(&z64) != 1)
- runtime_throw("load64 failed");
- runtime_atomicstore64(&z64, (1ull<<40)+1);
- if(runtime_atomicload64(&z64) != (1ull<<40)+1)
- runtime_throw("store64 failed");
- if(runtime_xadd64(&z64, (1ull<<40)+1) != (2ull<<40)+2)
- runtime_throw("xadd64 failed");
- if(runtime_atomicload64(&z64) != (2ull<<40)+2)
- runtime_throw("xadd64 failed");
- if(runtime_xchg64(&z64, (3ull<<40)+3) != (2ull<<40)+2)
- runtime_throw("xchg64 failed");
- if(runtime_atomicload64(&z64) != (3ull<<40)+3)
- runtime_throw("xchg64 failed");
-}
-
-void
-runtime_check(void)
-{
- __go_register_gc_roots(&runtime_roots);
-
- TestAtomic64();
-}
-
-uint32
-runtime_fastrand1(void)
-{
- M *m;
- uint32 x;
-
- m = runtime_m();
- x = m->fastrand;
- x += x;
- if(x & 0x80000000L)
- x ^= 0x88888eefUL;
- m->fastrand = x;
- return x;
-}
-
-int64
-runtime_cputicks(void)
-{
-#if defined(__386__) || defined(__x86_64__)
- uint32 low, high;
- asm("rdtsc" : "=a" (low), "=d" (high));
- return (int64)(((uint64)high << 32) | (uint64)low);
-#elif defined (__s390__) || defined (__s390x__)
- uint64 clock = 0;
- /* stckf may not write the return variable in case of a clock error, so make
- it read-write to prevent that the initialisation is optimised out.
- Note: Targets below z9-109 will crash when executing store clock fast, i.e.
- we don't support Go for machines older than that. */
- asm volatile(".insn s,0xb27c0000,%0" /* stckf */ : "+Q" (clock) : : "cc" );
- return (int64)clock;
-#else
- // FIXME: implement for other processors.
- return 0;
-#endif
-}
-
-bool
-runtime_showframe(String s, bool current)
-{
- static int32 traceback = -1;
-
- if(current && runtime_m()->throwing > 0)
- return 1;
- if(traceback < 0)
- traceback = runtime_gotraceback(nil);
- return traceback > 1 || (__builtin_memchr(s.str, '.', s.len) != nil && __builtin_memcmp(s.str, "runtime.", 7) != 0);
-}
-
-static Lock ticksLock;
-static int64 ticks;
-
-int64
-runtime_tickspersecond(void)
-{
- int64 res, t0, t1, c0, c1;
-
- res = (int64)runtime_atomicload64((uint64*)&ticks);
- if(res != 0)
- return ticks;
- runtime_lock(&ticksLock);
- res = ticks;
- if(res == 0) {
- t0 = runtime_nanotime();
- c0 = runtime_cputicks();
- runtime_usleep(100*1000);
- t1 = runtime_nanotime();
- c1 = runtime_cputicks();
- if(t1 == t0)
- t1++;
- res = (c1-c0)*1000*1000*1000/(t1-t0);
- if(res == 0)
- res++;
- runtime_atomicstore64((uint64*)&ticks, res);
- }
- runtime_unlock(&ticksLock);
- return res;
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
-void
-runtime_mpreinit(M *mp)
-{
- mp->gsignal = runtime_malg(32*1024, &mp->gsignalstack, &mp->gsignalstacksize); // OS X wants >=8K, Linux >=2K
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the new thread, can not allocate memory.
-void
-runtime_minit(void)
-{
- M* m;
- sigset_t sigs;
-
- // Initialize signal handling.
- m = runtime_m();
- runtime_signalstack(m->gsignalstack, m->gsignalstacksize);
- if (sigemptyset(&sigs) != 0)
- runtime_throw("sigemptyset");
- pthread_sigmask(SIG_SETMASK, &sigs, nil);
-}
-
-// Called from dropm to undo the effect of an minit.
-void
-runtime_unminit(void)
-{
- runtime_signalstack(nil, 0);
-}
-
-
-void
-runtime_signalstack(byte *p, int32 n)
-{
- stack_t st;
-
- st.ss_sp = p;
- st.ss_size = n;
- st.ss_flags = 0;
- if(p == nil)
- st.ss_flags = SS_DISABLE;
- if(sigaltstack(&st, nil) < 0)
- *(int *)0xf1 = 0xf1;
-}
-
-void setTraceback(String level)
- __asm__ (GOSYM_PREFIX "runtime_debug.SetTraceback");
-
-void setTraceback(String level) {
- uint32 t;
-
- if (level.len == 4 && __builtin_memcmp(level.str, "none", 4) == 0) {
- t = 0;
- } else if (level.len == 0 || (level.len == 6 && __builtin_memcmp(level.str, "single", 6) == 0)) {
- t = 1 << tracebackShift;
- } else if (level.len == 3 && __builtin_memcmp(level.str, "all", 3) == 0) {
- t = (1<<tracebackShift) | tracebackAll;
- } else if (level.len == 6 && __builtin_memcmp(level.str, "system", 6) == 0) {
- t = (2<<tracebackShift) | tracebackAll;
- } else if (level.len == 5 && __builtin_memcmp(level.str, "crash", 5) == 0) {
- t = (2<<tracebackShift) | tracebackAll | tracebackCrash;
- } else {
- t = (runtime_atoi(level.str, level.len)<<tracebackShift) | tracebackAll;
- }
-
- t |= traceback_env;
-
- runtime_atomicstore(&traceback_cache, t);
-}
-
-DebugVars runtime_debug;
-
-// Holds variables parsed from GODEBUG env var,
-// except for "memprofilerate" since there is an
-// existing var for that value which is int
-// instead of in32 and might have an
-// initial value.
-static struct {
- const char* name;
- int32* value;
-} dbgvar[] = {
- {"allocfreetrace", &runtime_debug.allocfreetrace},
- {"cgocheck", &runtime_debug.cgocheck},
- {"efence", &runtime_debug.efence},
- {"gccheckmark", &runtime_debug.gccheckmark},
- {"gcpacertrace", &runtime_debug.gcpacertrace},
- {"gcshrinkstackoff", &runtime_debug.gcshrinkstackoff},
- {"gcstackbarrieroff", &runtime_debug.gcstackbarrieroff},
- {"gcstackbarrierall", &runtime_debug.gcstackbarrierall},
- {"gcstoptheworld", &runtime_debug.gcstoptheworld},
- {"gctrace", &runtime_debug.gctrace},
- {"gcdead", &runtime_debug.gcdead},
- {"invalidptr", &runtime_debug.invalidptr},
- {"sbrk", &runtime_debug.sbrk},
- {"scavenge", &runtime_debug.scavenge},
- {"scheddetail", &runtime_debug.scheddetail},
- {"schedtrace", &runtime_debug.schedtrace},
- {"wbshadow", &runtime_debug.wbshadow},
-};
-
-void
-runtime_parsedebugvars(void)
-{
- String s;
- const byte *p, *pn;
- intgo len;
- intgo i, n;
-
- s = runtime_getenv("GODEBUG");
- if(s.len == 0)
- return;
- p = s.str;
- len = s.len;
- for(;;) {
- for(i=0; i<(intgo)nelem(dbgvar); i++) {
- n = runtime_findnull((const byte*)dbgvar[i].name);
- if(len > n && runtime_mcmp(p, "memprofilerate", n) == 0 && p[n] == '=')
- // Set the MemProfileRate directly since it
- // is an int, not int32, and should only lbe
- // set here if specified by GODEBUG
- runtime_MemProfileRate = runtime_atoi(p+n+1, len-(n+1));
- else if(len > n && runtime_mcmp(p, dbgvar[i].name, n) == 0 && p[n] == '=')
- *dbgvar[i].value = runtime_atoi(p+n+1, len-(n+1));
- }
- pn = (const byte *)runtime_strstr((const char *)p, ",");
- if(pn == nil || pn - p >= len)
- break;
- len -= (pn - p) - 1;
- p = pn + 1;
- }
-
- setTraceback(runtime_getenv("GOTRACEBACK"));
- traceback_env = traceback_cache;
-}
-
-// SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
-// the "environment" traceback level, so later calls to
-// debug.SetTraceback (e.g., from testing timeouts) can't lower it.
-void SetTracebackEnv(String level)
- __asm__ (GOSYM_PREFIX "runtime.SetTracebackEnv");
-
-void SetTracebackEnv(String level) {
- setTraceback(level);
- traceback_env = traceback_cache;
-}
-
-// Poor mans 64-bit division.
-// This is a very special function, do not use it if you are not sure what you are doing.
-// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
-// Handles overflow in a time-specific manner.
-int32
-runtime_timediv(int64 v, int32 div, int32 *rem)
-{
- int32 res, bit;
-
- if(v >= (int64)div*0x7fffffffLL) {
- if(rem != nil)
- *rem = 0;
- return 0x7fffffff;
- }
- res = 0;
- for(bit = 30; bit >= 0; bit--) {
- if(v >= ((int64)div<<bit)) {
- v = v - ((int64)div<<bit);
- res += 1<<bit;
- }
- }
- if(rem != nil)
- *rem = v;
- return res;
-}
-
-// Setting the max stack size doesn't really do anything for gccgo.
-
-uintptr runtime_maxstacksize = 1<<20; // enough until runtime.main sets it for real
-
-void memclrBytes(Slice)
- __asm__ (GOSYM_PREFIX "runtime.memclrBytes");
-
-void
-memclrBytes(Slice s)
-{
- runtime_memclr(s.__values, s.__count);
-}
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
index 73c46e9117..644fe92865 100644
--- a/libgo/runtime/runtime.h
+++ b/libgo/runtime/runtime.h
@@ -7,6 +7,7 @@
#include "go-assert.h"
#include <complex.h>
#include <signal.h>
+#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -22,9 +23,6 @@
#include <sys/mman.h>
#endif
-#include "interface.h"
-#include "go-alloc.h"
-
#define _STRINGIFY2_(x) #x
#define _STRINGIFY_(x) _STRINGIFY2_(x)
#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
@@ -53,36 +51,35 @@ typedef uintptr uintreg;
/* Defined types. */
-typedef uint8 bool;
+typedef _Bool bool;
typedef uint8 byte;
-typedef struct Func Func;
-typedef struct G G;
-typedef struct Lock Lock;
-typedef struct M M;
-typedef struct P P;
-typedef struct Note Note;
+typedef struct g G;
+typedef struct mutex Lock;
+typedef struct m M;
+typedef struct p P;
+typedef struct note Note;
typedef struct String String;
typedef struct FuncVal FuncVal;
typedef struct SigTab SigTab;
-typedef struct MCache MCache;
+typedef struct mcache MCache;
typedef struct FixAlloc FixAlloc;
-typedef struct Hchan Hchan;
-typedef struct Timers Timers;
-typedef struct Timer Timer;
-typedef struct GCStats GCStats;
-typedef struct LFNode LFNode;
+typedef struct hchan Hchan;
+typedef struct timer Timer;
+typedef struct gcstats GCStats;
+typedef struct lfnode LFNode;
typedef struct ParFor ParFor;
typedef struct ParForThread ParForThread;
-typedef struct CgoMal CgoMal;
+typedef struct cgoMal CgoMal;
typedef struct PollDesc PollDesc;
-typedef struct DebugVars DebugVars;
+typedef struct sudog SudoG;
+typedef struct schedt Sched;
typedef struct __go_open_array Slice;
-typedef struct __go_interface Iface;
-typedef struct __go_empty_interface Eface;
+typedef struct iface Iface;
+typedef struct eface Eface;
typedef struct __go_type_descriptor Type;
-typedef struct __go_defer_stack Defer;
-typedef struct __go_panic_stack Panic;
+typedef struct _defer Defer;
+typedef struct _panic Panic;
typedef struct __go_ptr_type PtrType;
typedef struct __go_func_type FuncType;
@@ -90,46 +87,44 @@ typedef struct __go_interface_type InterfaceType;
typedef struct __go_map_type MapType;
typedef struct __go_channel_type ChanType;
-typedef struct Traceback Traceback;
+typedef struct tracebackg Traceback;
+
+typedef struct location Location;
-typedef struct Location Location;
+struct String
+{
+ const byte* str;
+ intgo len;
+};
+
+struct FuncVal
+{
+ void (*fn)(void);
+ // variable-size, fn-specific data here
+};
+
+#include "array.h"
+
+// Rename Go types generated by mkrsysinfo.sh from C types, to avoid
+// the name conflict.
+#define timeval go_timeval
+#define timespec go_timespec
+
+#include "runtime.inc"
+
+#undef timeval
+#undef timespec
/*
* Per-CPU declaration.
*/
extern M* runtime_m(void);
-extern G* runtime_g(void);
+extern G* runtime_g(void)
+ __asm__(GOSYM_PREFIX "runtime.getg");
extern M runtime_m0;
extern G runtime_g0;
-/*
- * defined constants
- */
-enum
-{
- // G status
- //
- // If you add to this list, add to the list
- // of "okay during garbage collection" status
- // in mgc0.c too.
- Gidle,
- Grunnable,
- Grunning,
- Gsyscall,
- Gwaiting,
- Gmoribund_unused, // currently unused, but hardcoded in gdb scripts
- Gdead,
-};
-enum
-{
- // P status
- Pidle,
- Prunning,
- Psyscall,
- Pgcstop,
- Pdead,
-};
enum
{
true = 1,
@@ -146,184 +141,6 @@ enum
// Global <-> per-M stack segment cache transfer batch size.
StackCacheBatch = 16,
};
-/*
- * structures
- */
-struct Lock
-{
- // Futex-based impl treats it as uint32 key,
- // while sema-based impl as M* waitm.
- // Used to be a union, but unions break precise GC.
- uintptr key;
-};
-struct Note
-{
- // Futex-based impl treats it as uint32 key,
- // while sema-based impl as M* waitm.
- // Used to be a union, but unions break precise GC.
- uintptr key;
-};
-struct String
-{
- const byte* str;
- intgo len;
-};
-struct FuncVal
-{
- void (*fn)(void);
- // variable-size, fn-specific data here
-};
-struct GCStats
-{
- // the struct must consist of only uint64's,
- // because it is casted to uint64[].
- uint64 nhandoff;
- uint64 nhandoffcnt;
- uint64 nprocyield;
- uint64 nosyield;
- uint64 nsleep;
-};
-
-// A location in the program, used for backtraces.
-struct Location
-{
- uintptr pc;
- String filename;
- String function;
- intgo lineno;
-};
-
-struct G
-{
- Defer* defer;
- Panic* panic;
- void* exception; // current exception being thrown
- bool is_foreign; // whether current exception from other language
- void *gcstack; // if status==Gsyscall, gcstack = stackbase to use during gc
- size_t gcstack_size;
- void* gcnext_segment;
- void* gcnext_sp;
- void* gcinitial_sp;
- ucontext_t gcregs;
- byte* entry; // initial function
- void* param; // passed parameter on wakeup
- bool fromgogo; // reached from gogo
- int16 status;
- uint32 selgen; // valid sudog pointer
- int64 goid;
- int64 waitsince; // approx time when the G become blocked
- const char* waitreason; // if status==Gwaiting
- G* schedlink;
- bool ispanic;
- bool issystem; // do not output in stack dump
- bool isbackground; // ignore in deadlock detector
- bool paniconfault; // panic (instead of crash) on unexpected fault address
- M* m; // for debuggers, but offset not hard-coded
- M* lockedm;
- int32 sig;
- int32 writenbuf;
- byte* writebuf;
- uintptr sigcode0;
- uintptr sigcode1;
- // uintptr sigpc;
- uintptr gopc; // pc of go statement that created this goroutine
-
- int32 ncgo;
- CgoMal* cgomal;
-
- Traceback* traceback;
-
- ucontext_t context;
- void* stack_context[10];
-};
-
-struct M
-{
- G* g0; // goroutine with scheduling stack
- G* gsignal; // signal-handling G
- byte* gsignalstack;
- size_t gsignalstacksize;
- void (*mstartfn)(void);
- G* curg; // current running goroutine
- G* caughtsig; // goroutine running during fatal signal
- P* p; // attached P for executing Go code (nil if not executing Go code)
- P* nextp;
- int32 id;
- int32 mallocing;
- int32 throwing;
- int32 gcing;
- int32 locks;
- int32 softfloat;
- int32 dying;
- int32 profilehz;
- int32 helpgc;
- bool spinning; // M is out of work and is actively looking for work
- bool blocked; // M is blocked on a Note
- uint32 fastrand;
- uint64 ncgocall; // number of cgo calls in total
- int32 ncgo; // number of cgo calls currently in progress
- CgoMal* cgomal;
- Note park;
- M* alllink; // on allm
- M* schedlink;
- MCache *mcache;
- G* lockedg;
- Location createstack[32]; // Stack that created this thread.
- uint32 locked; // tracking for LockOSThread
- M* nextwaitm; // next M waiting for lock
- uintptr waitsema; // semaphore for parking on locks
- uint32 waitsemacount;
- uint32 waitsemalock;
- GCStats gcstats;
- bool needextram;
- bool dropextram; // for gccgo: drop after call is done.
- uint8 traceback;
- bool (*waitunlockf)(G*, void*);
- void* waitlock;
- uintptr end[];
-};
-
-struct P
-{
- Lock;
-
- int32 id;
- uint32 status; // one of Pidle/Prunning/...
- P* link;
- uint32 schedtick; // incremented on every scheduler call
- uint32 syscalltick; // incremented on every system call
- M* m; // back-link to associated M (nil if idle)
- MCache* mcache;
- Defer* deferpool; // pool of available Defer structs (see panic.c)
-
- // Cache of goroutine ids, amortizes accesses to runtime_sched.goidgen.
- uint64 goidcache;
- uint64 goidcacheend;
-
- // Queue of runnable goroutines.
- uint32 runqhead;
- uint32 runqtail;
- G* runq[256];
-
- // Available G's (status == Gdead)
- G* gfree;
- int32 gfreecnt;
-
- byte pad[64];
-};
-
-// The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
-// The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
-// External locks are not recursive; a second lock is silently ignored.
-// The upper bits of m->lockedcount record the nesting depth of calls to lockOSThread
-// (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
-// Internal locks can be recursive. For instance, a lock for cgo can occur while the main
-// goroutine is holding the lock during the initialization phase.
-enum
-{
- LockExternal = 1,
- LockInternal = 2,
-};
struct SigTab
{
@@ -331,26 +148,6 @@ struct SigTab
int32 flags;
void* fwdsig;
};
-enum
-{
- SigNotify = 1<<0, // let signal.Notify have signal, even if from kernel
- SigKill = 1<<1, // if signal.Notify doesn't take it, exit quietly
- SigThrow = 1<<2, // if signal.Notify doesn't take it, exit loudly
- SigPanic = 1<<3, // if the signal is from the kernel, panic
- SigDefault = 1<<4, // if the signal isn't explicitly requested, don't monitor it
- SigHandling = 1<<5, // our signal handler is registered
- SigGoExit = 1<<6, // cause all runtime procs to exit (only used on Plan 9).
-};
-
-// Layout of in-memory per-function information prepared by linker
-// See http://golang.org/s/go12symtab.
-// Keep in sync with linker and with ../../libmach/sym.c
-// and with package debug/gosym.
-struct Func
-{
- String name;
- uintptr entry; // entry pc
-};
#ifdef GOOS_nacl
enum {
@@ -381,43 +178,6 @@ enum {
};
#endif
-struct Timers
-{
- Lock;
- G *timerproc;
- bool sleeping;
- bool rescheduling;
- Note waitnote;
- Timer **t;
- int32 len;
- int32 cap;
-};
-
-// Package time knows the layout of this structure.
-// If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
-// For GOOS=nacl, package syscall knows the layout of this structure.
-// If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer.
-struct Timer
-{
- intgo i; // heap index
-
- // Timer wakes up at when, and then at when+period, ... (period > 0 only)
- // each time calling f(now, arg) in the timer goroutine, so f must be
- // a well-behaved function and not block.
- int64 when;
- int64 period;
- FuncVal *fv;
- Eface arg;
- uintptr seq;
-};
-
-// Lock-free stack node.
-struct LFNode
-{
- LFNode *next;
- uintptr pushcnt;
-};
-
// Parallel for descriptor.
struct ParFor
{
@@ -431,43 +191,13 @@ struct ParFor
// otherwise parfor may return while other threads are still working
ParForThread *thr; // array of thread descriptors
// stats
- uint64 nsteal;
+ uint64 nsteal __attribute__((aligned(8))); // force alignment for m68k
uint64 nstealcnt;
uint64 nprocyield;
uint64 nosyield;
uint64 nsleep;
};
-// Track memory allocated by code not written in Go during a cgo call,
-// so that the garbage collector can see them.
-struct CgoMal
-{
- CgoMal *next;
- void *alloc;
-};
-
-// Holds variables parsed from GODEBUG env var.
-struct DebugVars
-{
- int32 allocfreetrace;
- int32 cgocheck;
- int32 efence;
- int32 gccheckmark;
- int32 gcpacertrace;
- int32 gcshrinkstackoff;
- int32 gcstackbarrieroff;
- int32 gcstackbarrierall;
- int32 gcstoptheworld;
- int32 gctrace;
- int32 gcdead;
- int32 invalidptr;
- int32 sbrk;
- int32 scavenge;
- int32 scheddetail;
- int32 schedtrace;
- int32 wbshadow;
-};
-
extern bool runtime_precisestack;
extern bool runtime_copystack;
@@ -481,18 +211,16 @@ extern bool runtime_copystack;
#define USED(v) ((void) v)
#define ROUND(x, n) (((x)+(n)-1)&~(uintptr)((n)-1)) /* all-caps to mark as macro: it evaluates n twice */
-byte* runtime_startup_random_data;
-uint32 runtime_startup_random_data_len;
-void runtime_get_random_data(byte**, int32*);
-
enum {
// hashinit wants this many random bytes
HashRandomBytes = 32
};
void runtime_hashinit(void);
-void runtime_traceback(void);
-void runtime_tracebackothers(G*);
+void runtime_traceback(int32)
+ __asm__ (GOSYM_PREFIX "runtime.traceback");
+void runtime_tracebackothers(G*)
+ __asm__ (GOSYM_PREFIX "runtime.tracebackothers");
enum
{
// The maximum number of frames we print for a traceback
@@ -502,21 +230,23 @@ enum
/*
* external data
*/
-extern uintptr runtime_zerobase;
-extern G** runtime_allg;
-extern uintptr runtime_allglen;
+extern uintptr* runtime_getZerobase(void)
+ __asm__(GOSYM_PREFIX "runtime.getZerobase");
+extern G* runtime_getallg(intgo)
+ __asm__(GOSYM_PREFIX "runtime.getallg");
+extern uintptr runtime_getallglen(void)
+ __asm__(GOSYM_PREFIX "runtime.getallglen");
extern G* runtime_lastg;
-extern M* runtime_allm;
+extern M* runtime_getallm(void)
+ __asm__(GOSYM_PREFIX "runtime.getallm");
extern P** runtime_allp;
-extern int32 runtime_gomaxprocs;
-extern uint32 runtime_needextram;
-extern uint32 runtime_panicking;
+extern Sched* runtime_sched;
+extern uint32 runtime_panicking(void)
+ __asm__ (GOSYM_PREFIX "runtime.getPanicking");
extern int8* runtime_goos;
extern int32 runtime_ncpu;
extern void (*runtime_sysargs)(int32, uint8**);
-extern uint32 runtime_Hchansize;
-extern DebugVars runtime_debug;
-extern uintptr runtime_maxstacksize;
+extern struct debugVars runtime_debug;
extern bool runtime_isstarted;
extern bool runtime_isarchive;
@@ -527,63 +257,66 @@ extern bool runtime_isarchive;
#define runtime_strcmp(s1, s2) __builtin_strcmp((s1), (s2))
#define runtime_strncmp(s1, s2, n) __builtin_strncmp((s1), (s2), (n))
#define runtime_strstr(s1, s2) __builtin_strstr((s1), (s2))
-intgo runtime_findnull(const byte*);
-intgo runtime_findnullw(const uint16*);
-void runtime_dump(byte*, int32);
+intgo runtime_findnull(const byte*)
+ __asm__ (GOSYM_PREFIX "runtime.findnull");
-void runtime_gogo(G*);
+void runtime_gogo(G*)
+ __asm__ (GOSYM_PREFIX "runtime.gogo");
struct __go_func_type;
-void runtime_args(int32, byte**);
+void runtime_args(int32, byte**)
+ __asm__ (GOSYM_PREFIX "runtime.args");
void runtime_osinit();
-void runtime_goargs(void);
+void runtime_alginit(void)
+ __asm__ (GOSYM_PREFIX "runtime.alginit");
+void runtime_goargs(void)
+ __asm__ (GOSYM_PREFIX "runtime.goargs");
void runtime_goenvs(void);
-void runtime_goenvs_unix(void);
+void runtime_goenvs_unix(void)
+ __asm__ (GOSYM_PREFIX "runtime.goenvs_unix");
void runtime_throw(const char*) __attribute__ ((noreturn));
void runtime_panicstring(const char*) __attribute__ ((noreturn));
bool runtime_canpanic(G*);
-void runtime_prints(const char*);
void runtime_printf(const char*, ...);
int32 runtime_snprintf(byte*, int32, const char*, ...);
#define runtime_mcmp(a, b, s) __builtin_memcmp((a), (b), (s))
#define runtime_memmove(a, b, s) __builtin_memmove((a), (b), (s))
void* runtime_mal(uintptr);
-String runtime_gostring(const byte*);
-String runtime_gostringnocopy(const byte*);
+String runtime_gostringnocopy(const byte*)
+ __asm__ (GOSYM_PREFIX "runtime.gostringnocopy");
void runtime_schedinit(void);
-void runtime_initsig(bool);
-void runtime_sigenable(uint32 sig);
-void runtime_sigdisable(uint32 sig);
-void runtime_sigignore(uint32 sig);
+void runtime_initsig(bool)
+ __asm__ (GOSYM_PREFIX "runtime.initsig");
int32 runtime_gotraceback(bool *crash);
-void runtime_goroutineheader(G*);
-void runtime_printtrace(Location*, int32, bool);
+void runtime_goroutineheader(G*)
+ __asm__ (GOSYM_PREFIX "runtime.goroutineheader");
+void runtime_printtrace(Slice, G*)
+ __asm__ (GOSYM_PREFIX "runtime.printtrace");
#define runtime_open(p, f, m) open((p), (f), (m))
#define runtime_read(d, v, n) read((d), (v), (n))
#define runtime_write(d, v, n) write((d), (v), (n))
#define runtime_close(d) close(d)
-void runtime_ready(G*);
+void runtime_ready(G*, intgo, bool)
+ __asm__ (GOSYM_PREFIX "runtime.ready");
String runtime_getenv(const char*);
int32 runtime_atoi(const byte*, intgo);
void* runtime_mstart(void*);
-G* runtime_malg(int32, byte**, size_t*);
-void runtime_mpreinit(M*);
-void runtime_minit(void);
-void runtime_unminit(void);
-void runtime_needm(void);
-void runtime_dropm(void);
-void runtime_signalstack(byte*, int32);
-MCache* runtime_allocmcache(void);
-void runtime_freemcache(MCache*);
+G* runtime_malg(bool, bool, byte**, uintptr*)
+ __asm__(GOSYM_PREFIX "runtime.malg");
+void runtime_minit(void)
+ __asm__ (GOSYM_PREFIX "runtime.minit");
+void runtime_signalstack(byte*, uintptr)
+ __asm__ (GOSYM_PREFIX "runtime.signalstack");
+MCache* runtime_allocmcache(void)
+ __asm__ (GOSYM_PREFIX "runtime.allocmcache");
+void runtime_freemcache(MCache*)
+ __asm__ (GOSYM_PREFIX "runtime.freemcache");
void runtime_mallocinit(void);
void runtime_mprofinit(void);
-#define runtime_malloc(s) __go_alloc(s)
-#define runtime_free(p) __go_free(p)
-#define runtime_getcallersp(p) __builtin_frame_address(1)
-int32 runtime_mcount(void);
-int32 runtime_gcount(void);
+#define runtime_getcallersp(p) __builtin_frame_address(0)
void runtime_mcall(void(*)(G*));
-uint32 runtime_fastrand1(void);
-int32 runtime_timediv(int64, int32, int32*);
+uint32 runtime_fastrand(void) __asm__ (GOSYM_PREFIX "runtime.fastrand");
+int32 runtime_timediv(int64, int32, int32*)
+ __asm__ (GOSYM_PREFIX "runtime.timediv");
int32 runtime_round2(int32 x); // round x up to a power of 2.
// atomic operations
@@ -604,63 +337,72 @@ int32 runtime_round2(int32 x); // round x up to a power of 2.
#define runtime_atomicloadp(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
#define runtime_atomicstorep(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
-void runtime_setmg(M*, G*);
-void runtime_newextram(void);
+void runtime_setg(G*)
+ __asm__ (GOSYM_PREFIX "runtime.setg");
+void runtime_newextram(void)
+ __asm__ (GOSYM_PREFIX "runtime.newextram");
#define runtime_exit(s) exit(s)
#define runtime_breakpoint() __builtin_trap()
void runtime_gosched(void);
void runtime_gosched0(G*);
-void runtime_schedtrace(bool);
+void runtime_schedtrace(bool)
+ __asm__ (GOSYM_PREFIX "runtime.schedtrace");
void runtime_park(bool(*)(G*, void*), void*, const char*);
void runtime_parkunlock(Lock*, const char*);
void runtime_tsleep(int64, const char*);
M* runtime_newm(void);
-void runtime_goexit(void);
-void runtime_entersyscall(void) __asm__ (GOSYM_PREFIX "syscall.Entersyscall");
-void runtime_entersyscallblock(void);
-void runtime_exitsyscall(void) __asm__ (GOSYM_PREFIX "syscall.Exitsyscall");
+void runtime_goexit1(void)
+ __asm__ (GOSYM_PREFIX "runtime.goexit1");
+void runtime_entersyscall(int32)
+ __asm__ (GOSYM_PREFIX "runtime.entersyscall");
+void runtime_entersyscallblock(int32)
+ __asm__ (GOSYM_PREFIX "runtime.entersyscallblock");
+void runtime_exitsyscall(int32)
+ __asm__ (GOSYM_PREFIX "runtime.exitsyscall");
G* __go_go(void (*pfn)(void*), void*);
-void siginit(void);
-bool __go_sigsend(int32 sig);
int32 runtime_callers(int32, Location*, int32, bool keep_callers);
-int64 runtime_nanotime(void); // monotonic time
-int64 runtime_unixnanotime(void); // real time, can skip
+int64 runtime_nanotime(void) // monotonic time
+ __asm__(GOSYM_PREFIX "runtime.nanotime");
+int64 runtime_unixnanotime(void) // real time, can skip
+ __asm__ (GOSYM_PREFIX "runtime.unixnanotime");
void runtime_dopanic(int32) __attribute__ ((noreturn));
-void runtime_startpanic(void);
-void runtime_freezetheworld(void);
+void runtime_startpanic(void)
+ __asm__ (GOSYM_PREFIX "runtime.startpanic");
void runtime_unwindstack(G*, byte*);
-void runtime_sigprof();
-void runtime_resetcpuprofiler(int32);
-void runtime_setcpuprofilerate(void(*)(uintptr*, int32), int32);
-void runtime_usleep(uint32);
-int64 runtime_cputicks(void);
-int64 runtime_tickspersecond(void);
+void runtime_sigprof()
+ __asm__ (GOSYM_PREFIX "runtime.sigprof");
+void runtime_resetcpuprofiler(int32)
+ __asm__ (GOSYM_PREFIX "runtime.resetcpuprofiler");
+void runtime_setcpuprofilerate_m(int32)
+ __asm__ (GOSYM_PREFIX "runtime.setcpuprofilerate_m");
+void runtime_cpuprofAdd(Slice)
+ __asm__ (GOSYM_PREFIX "runtime.cpuprofAdd");
+void runtime_usleep(uint32)
+ __asm__ (GOSYM_PREFIX "runtime.usleep");
+int64 runtime_cputicks(void)
+ __asm__ (GOSYM_PREFIX "runtime.cputicks");
+int64 runtime_tickspersecond(void)
+ __asm__ (GOSYM_PREFIX "runtime.tickspersecond");
void runtime_blockevent(int64, int32);
extern int64 runtime_blockprofilerate;
-void runtime_addtimer(Timer*);
-bool runtime_deltimer(Timer*);
-G* runtime_netpoll(bool);
-void runtime_netpollinit(void);
-int32 runtime_netpollopen(uintptr, PollDesc*);
-int32 runtime_netpollclose(uintptr);
-void runtime_netpollready(G**, PollDesc*, int32);
-uintptr runtime_netpollfd(PollDesc*);
-void runtime_netpollarm(PollDesc*, int32);
-void** runtime_netpolluser(PollDesc*);
-bool runtime_netpollclosing(PollDesc*);
-void runtime_netpolllock(PollDesc*);
-void runtime_netpollunlock(PollDesc*);
-void runtime_crash(void);
-void runtime_parsedebugvars(void);
+G* runtime_netpoll(bool)
+ __asm__ (GOSYM_PREFIX "runtime.netpoll");
+void runtime_crash(void)
+ __asm__ (GOSYM_PREFIX "runtime.crash");
+void runtime_parsedebugvars(void)
+ __asm__(GOSYM_PREFIX "runtime.parsedebugvars");
void _rt0_go(void);
-void* runtime_funcdata(Func*, int32);
-int32 runtime_setmaxthreads(int32);
G* runtime_timejump(void);
void runtime_iterate_finq(void (*callback)(FuncVal*, void*, const FuncType*, const PtrType*));
-void runtime_stoptheworld(void);
-void runtime_starttheworld(void);
-extern uint32 runtime_worldsema;
+void runtime_stopTheWorldWithSema(void)
+ __asm__(GOSYM_PREFIX "runtime.stopTheWorldWithSema");
+void runtime_startTheWorldWithSema(void)
+ __asm__(GOSYM_PREFIX "runtime.startTheWorldWithSema");
+void runtime_acquireWorldsema(void)
+ __asm__(GOSYM_PREFIX "runtime.acquireWorldsema");
+void runtime_releaseWorldsema(void)
+ __asm__(GOSYM_PREFIX "runtime.releaseWorldsema");
/*
* mutual exclusion locks. in the uncontended case,
@@ -668,8 +410,10 @@ extern uint32 runtime_worldsema;
* but on the contention path they sleep in the kernel.
* a zeroed Lock is unlocked (no need to initialize each lock).
*/
-void runtime_lock(Lock*);
-void runtime_unlock(Lock*);
+void runtime_lock(Lock*)
+ __asm__(GOSYM_PREFIX "runtime.lock");
+void runtime_unlock(Lock*)
+ __asm__(GOSYM_PREFIX "runtime.unlock");
/*
* sleep and wakeup on one-time events.
@@ -693,21 +437,16 @@ void runtime_unlock(Lock*);
* notesleep/notetsleep are generally called on g0,
* notetsleepg is similar to notetsleep but is called on user g.
*/
-void runtime_noteclear(Note*);
-void runtime_notesleep(Note*);
-void runtime_notewakeup(Note*);
-bool runtime_notetsleep(Note*, int64); // false - timeout
-bool runtime_notetsleepg(Note*, int64); // false - timeout
-
-/*
- * low-level synchronization for implementing the above
- */
-uintptr runtime_semacreate(void);
-int32 runtime_semasleep(int64);
-void runtime_semawakeup(M*);
-// or
-void runtime_futexsleep(uint32*, uint32, int64);
-void runtime_futexwakeup(uint32*, uint32);
+void runtime_noteclear(Note*)
+ __asm__ (GOSYM_PREFIX "runtime.noteclear");
+void runtime_notesleep(Note*)
+ __asm__ (GOSYM_PREFIX "runtime.notesleep");
+void runtime_notewakeup(Note*)
+ __asm__ (GOSYM_PREFIX "runtime.notewakeup");
+bool runtime_notetsleep(Note*, int64) // false - timeout
+ __asm__ (GOSYM_PREFIX "runtime.notetsleep");
+bool runtime_notetsleepg(Note*, int64) // false - timeout
+ __asm__ (GOSYM_PREFIX "runtime.notetsleepg");
/*
* Lock-free stack.
@@ -717,7 +456,8 @@ void runtime_futexwakeup(uint32*, uint32);
*/
void runtime_lfstackpush(uint64 *head, LFNode *node)
__asm__ (GOSYM_PREFIX "runtime.lfstackpush");
-LFNode* runtime_lfstackpop(uint64 *head);
+void* runtime_lfstackpop(uint64 *head)
+ __asm__ (GOSYM_PREFIX "runtime.lfstackpop");
/*
* Parallel for over [0, n).
@@ -745,45 +485,19 @@ void __wrap_rtems_task_variable_add(void **);
#endif
/*
- * Names generated by gccgo.
- */
-#define runtime_printbool __go_print_bool
-#define runtime_printfloat __go_print_double
-#define runtime_printint __go_print_int64
-#define runtime_printiface __go_print_interface
-#define runtime_printeface __go_print_empty_interface
-#define runtime_printstring __go_print_string
-#define runtime_printpointer __go_print_pointer
-#define runtime_printuint __go_print_uint64
-#define runtime_printslice __go_print_slice
-#define runtime_printcomplex __go_print_complex
-
-/*
* runtime go-called
*/
-void runtime_printbool(_Bool);
-void runtime_printbyte(int8);
-void runtime_printfloat(double);
-void runtime_printint(int64);
-void runtime_printiface(Iface);
-void runtime_printeface(Eface);
-void runtime_printstring(String);
-void runtime_printpc(void*);
-void runtime_printpointer(void*);
-void runtime_printuint(uint64);
-void runtime_printhex(uint64);
-void runtime_printslice(Slice);
-void runtime_printcomplex(complex double);
void reflect_call(const struct __go_func_type *, FuncVal *, _Bool, _Bool,
void **, void **)
__asm__ (GOSYM_PREFIX "reflect.call");
-#define runtime_panic __go_panic
+void runtime_panic(Eface)
+ __asm__ (GOSYM_PREFIX "runtime.gopanic");
+void runtime_panic(Eface)
+ __attribute__ ((noreturn));
/*
* runtime c-called (but written in Go)
*/
-void runtime_printany(Eface)
- __asm__ (GOSYM_PREFIX "runtime.Printany");
void runtime_newTypeAssertionError(const String*, const String*, const String*, const String*, Eface*)
__asm__ (GOSYM_PREFIX "runtime.NewTypeAssertionError");
void runtime_newErrorCString(const char*, Eface*)
@@ -792,17 +506,17 @@ void runtime_newErrorCString(const char*, Eface*)
/*
* wrapped for go users
*/
-void runtime_semacquire(uint32 volatile *, bool);
-void runtime_semrelease(uint32 volatile *);
-int32 runtime_gomaxprocsfunc(int32 n);
-void runtime_procyield(uint32);
-void runtime_osyield(void);
-void runtime_lockOSThread(void);
-void runtime_unlockOSThread(void);
-bool runtime_lockedOSThread(void);
-
-bool runtime_showframe(String, bool);
-void runtime_printcreatedby(G*);
+void runtime_procyield(uint32)
+ __asm__(GOSYM_PREFIX "runtime.procyield");
+void runtime_osyield(void)
+ __asm__(GOSYM_PREFIX "runtime.osyield");
+void runtime_lockOSThread(void)
+ __asm__(GOSYM_PREFIX "runtime.lockOSThread");
+void runtime_unlockOSThread(void)
+ __asm__(GOSYM_PREFIX "runtime.unlockOSThread");
+
+void runtime_printcreatedby(G*)
+ __asm__(GOSYM_PREFIX "runtime.printcreatedby");
uintptr runtime_memlimit(void);
@@ -815,7 +529,8 @@ enum
#define runtime_setitimer setitimer
-void runtime_check(void);
+void runtime_check(void)
+ __asm__ (GOSYM_PREFIX "runtime.check");
// A list of global variables that the garbage collector must scan.
struct root_list {
@@ -835,8 +550,7 @@ extern uintptr runtime_stacks_sys;
struct backtrace_state;
extern struct backtrace_state *__go_get_backtrace_state(void);
-extern _Bool __go_file_line(uintptr, String*, String*, intgo *);
-extern byte* runtime_progname();
+extern _Bool __go_file_line(uintptr, int, String*, String*, intgo *);
extern void runtime_main(void*);
extern uint32 runtime_in_callers;
@@ -861,6 +575,22 @@ struct time_now_ret now() __asm__ (GOSYM_PREFIX "time.now")
extern void _cgo_wait_runtime_init_done (void);
extern void _cgo_notify_runtime_init_done (void);
extern _Bool runtime_iscgo;
-extern _Bool runtime_cgoHasExtraM;
-extern Hchan *runtime_main_init_done;
extern uintptr __go_end __attribute__ ((weak));
+extern void *getitab(const struct __go_type_descriptor *,
+ const struct __go_type_descriptor *,
+ _Bool)
+ __asm__ (GOSYM_PREFIX "runtime.getitab");
+
+extern void runtime_cpuinit(void);
+extern void setIsCgo(void)
+ __asm__ (GOSYM_PREFIX "runtime.setIsCgo");
+extern void setCpuidECX(uint32)
+ __asm__ (GOSYM_PREFIX "runtime.setCpuidECX");
+extern void setSupportAES(bool)
+ __asm__ (GOSYM_PREFIX "runtime.setSupportAES");
+extern void makeMainInitDone(void)
+ __asm__ (GOSYM_PREFIX "runtime.makeMainInitDone");
+extern void closeMainInitDone(void)
+ __asm__ (GOSYM_PREFIX "runtime.closeMainInitDone");
+extern void typedmemmove(const Type *, void *, const void *)
+ __asm__ (GOSYM_PREFIX "runtime.typedmemmove");
diff --git a/libgo/runtime/runtime1.goc b/libgo/runtime/runtime1.goc
deleted file mode 100644
index cd9d3017b5..0000000000
--- a/libgo/runtime/runtime1.goc
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-#include "runtime.h"
-#include "arch.h"
-#include "go-type.h"
-
-func GOMAXPROCS(n int) (ret int) {
- ret = runtime_gomaxprocsfunc(n);
-}
-
-func NumCPU() (ret int) {
- ret = runtime_ncpu;
-}
-
-func NumCgoCall() (ret int64) {
- M *mp;
-
- ret = 0;
- for(mp=runtime_atomicloadp(&runtime_allm); mp; mp=mp->alllink)
- ret += mp->ncgocall;
-}
-
-func newParFor(nthrmax uint32) (desc *ParFor) {
- desc = runtime_parforalloc(nthrmax);
-}
-
-func parForSetup(desc *ParFor, nthr uint32, n uint32, wait bool, body *byte) {
- runtime_parforsetup(desc, nthr, n, wait, (const FuncVal*) body);
-}
-
-func parForDo(desc *ParFor) {
- runtime_parfordo(desc);
-}
-
-func parForIters(desc *ParFor, tid uintptr) (start uintptr, end uintptr) {
- runtime_parforiters(desc, tid, &start, &end);
-}
-
-func typestring(e Eface) (s String) {
- s = *e.__type_descriptor->__reflection;
-}
-
-func golockedOSThread() (ret bool) {
- ret = runtime_lockedOSThread();
-}
-
-func NumGoroutine() (ret int) {
- ret = runtime_gcount();
-}
-
-func getgoroot() (out String) {
- out = runtime_getenv("GOROOT");
-}
-
-func runtime_pprof.runtime_cyclesPerSecond() (res int64) {
- res = runtime_tickspersecond();
-}
-
-func sync.runtime_procPin() (p int) {
- M *mp;
-
- mp = runtime_m();
- // Disable preemption.
- mp->locks++;
- p = mp->p->id;
-}
-
-func sync.runtime_procUnpin() {
- runtime_m()->locks--;
-}
-
-func sync_atomic.runtime_procPin() (p int) {
- M *mp;
-
- mp = runtime_m();
- // Disable preemption.
- mp->locks++;
- p = mp->p->id;
-}
-
-func sync_atomic.runtime_procUnpin() {
- runtime_m()->locks--;
-}
-
-extern Slice envs;
-
-func envs() (s Slice) {
- s = envs;
-}
-
-func setenvs(e Slice) {
- envs = e;
-}
diff --git a/libgo/runtime/runtime_c.c b/libgo/runtime/runtime_c.c
new file mode 100644
index 0000000000..9883e0a6c9
--- /dev/null
+++ b/libgo/runtime/runtime_c.c
@@ -0,0 +1,190 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include <errno.h>
+#include <signal.h>
+#include <unistd.h>
+
+#if defined(__i386__) || defined(__x86_64__)
+#include <cpuid.h>
+#endif
+
+#include "config.h"
+
+#include "runtime.h"
+#include "arch.h"
+#include "array.h"
+
+enum {
+ maxround = sizeof(uintptr),
+};
+
+extern volatile intgo runtime_MemProfileRate
+ __asm__ (GOSYM_PREFIX "runtime.MemProfileRate");
+
+struct gotraceback_ret {
+ int32 level;
+ bool all;
+ bool crash;
+};
+
+extern struct gotraceback_ret gotraceback(void)
+ __asm__ (GOSYM_PREFIX "runtime.gotraceback");
+
+// runtime_gotraceback is the C interface to runtime.gotraceback.
+int32
+runtime_gotraceback(bool *crash)
+{
+ struct gotraceback_ret r;
+
+ r = gotraceback();
+ if(crash != nil)
+ *crash = r.crash;
+ return r.level;
+}
+
+int32
+runtime_atoi(const byte *p, intgo len)
+{
+ int32 n;
+
+ n = 0;
+ while(len > 0 && '0' <= *p && *p <= '9') {
+ n = n*10 + *p++ - '0';
+ len--;
+ }
+ return n;
+}
+
+uint32
+runtime_fastrand(void)
+{
+ M *m;
+ uint32 x;
+
+ m = runtime_m();
+ x = m->fastrand;
+ x += x;
+ if(x & 0x80000000L)
+ x ^= 0x88888eefUL;
+ m->fastrand = x;
+ return x;
+}
+
+int64
+runtime_cputicks(void)
+{
+#if defined(__386__) || defined(__x86_64__)
+ uint32 low, high;
+ asm("rdtsc" : "=a" (low), "=d" (high));
+ return (int64)(((uint64)high << 32) | (uint64)low);
+#elif defined (__s390__) || defined (__s390x__)
+ uint64 clock = 0;
+ /* stckf may not write the return variable in case of a clock error, so make
+ it read-write to prevent that the initialisation is optimised out.
+ Note: Targets below z9-109 will crash when executing store clock fast, i.e.
+ we don't support Go for machines older than that. */
+ asm volatile(".insn s,0xb27c0000,%0" /* stckf */ : "+Q" (clock) : : "cc" );
+ return (int64)clock;
+#else
+ // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
+ // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
+ // TODO: need more entropy to better seed fastrand.
+ return runtime_nanotime();
+#endif
+}
+
+void
+runtime_signalstack(byte *p, uintptr n)
+{
+ stack_t st;
+
+ st.ss_sp = p;
+ st.ss_size = n;
+ st.ss_flags = 0;
+ if(p == nil)
+ st.ss_flags = SS_DISABLE;
+ if(sigaltstack(&st, nil) < 0)
+ *(int *)0xf1 = 0xf1;
+}
+
+struct debugVars runtime_debug;
+
+void
+runtime_setdebug(struct debugVars* d) {
+ runtime_debug = *d;
+}
+
+void memclrBytes(Slice)
+ __asm__ (GOSYM_PREFIX "runtime.memclrBytes");
+
+void
+memclrBytes(Slice s)
+{
+ runtime_memclr(s.__values, s.__count);
+}
+
+int32 go_open(char *, int32, int32)
+ __asm__ (GOSYM_PREFIX "runtime.open");
+
+int32
+go_open(char *name, int32 mode, int32 perm)
+{
+ return runtime_open(name, mode, perm);
+}
+
+int32 go_read(int32, void *, int32)
+ __asm__ (GOSYM_PREFIX "runtime.read");
+
+int32
+go_read(int32 fd, void *p, int32 n)
+{
+ return runtime_read(fd, p, n);
+}
+
+int32 go_write(uintptr, void *, int32)
+ __asm__ (GOSYM_PREFIX "runtime.write");
+
+int32
+go_write(uintptr fd, void *p, int32 n)
+{
+ return runtime_write(fd, p, n);
+}
+
+int32 go_closefd(int32)
+ __asm__ (GOSYM_PREFIX "runtime.closefd");
+
+int32
+go_closefd(int32 fd)
+{
+ return runtime_close(fd);
+}
+
+intgo go_errno(void)
+ __asm__ (GOSYM_PREFIX "runtime.errno");
+
+intgo
+go_errno()
+{
+ return (intgo)errno;
+}
+
+// CPU-specific initialization.
+// Fetch CPUID info on x86.
+
+void
+runtime_cpuinit()
+{
+#if defined(__i386__) || defined(__x86_64__)
+ unsigned int eax, ebx, ecx, edx;
+
+ if (__get_cpuid(1, &eax, &ebx, &ecx, &edx)) {
+ setCpuidECX(ecx);
+ }
+
+#if defined(HAVE_AS_X86_AES)
+ setSupportAES(true);
+#endif
+#endif
+}
diff --git a/libgo/runtime/sema.goc b/libgo/runtime/sema.goc
deleted file mode 100644
index 50f0e973d7..0000000000
--- a/libgo/runtime/sema.goc
+++ /dev/null
@@ -1,299 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Semaphore implementation exposed to Go.
-// Intended use is provide a sleep and wakeup
-// primitive that can be used in the contended case
-// of other synchronization primitives.
-// Thus it targets the same goal as Linux's futex,
-// but it has much simpler semantics.
-//
-// That is, don't think of these as semaphores.
-// Think of them as a way to implement sleep and wakeup
-// such that every sleep is paired with a single wakeup,
-// even if, due to races, the wakeup happens before the sleep.
-//
-// See Mullender and Cox, ``Semaphores in Plan 9,''
-// http://swtch.com/semaphore.pdf
-
-package sync
-#include "runtime.h"
-#include "arch.h"
-
-typedef struct SemaWaiter SemaWaiter;
-struct SemaWaiter
-{
- uint32 volatile* addr;
- G* g;
- int64 releasetime;
- int32 nrelease; // -1 for acquire
- SemaWaiter* prev;
- SemaWaiter* next;
-};
-
-typedef struct SemaRoot SemaRoot;
-struct SemaRoot
-{
- Lock;
- SemaWaiter* head;
- SemaWaiter* tail;
- // Number of waiters. Read w/o the lock.
- uint32 volatile nwait;
-};
-
-// Prime to not correlate with any user patterns.
-#define SEMTABLESZ 251
-
-struct semtable
-{
- SemaRoot;
- uint8 pad[CacheLineSize-sizeof(SemaRoot)];
-};
-static struct semtable semtable[SEMTABLESZ];
-
-static SemaRoot*
-semroot(uint32 volatile *addr)
-{
- return &semtable[((uintptr)addr >> 3) % SEMTABLESZ];
-}
-
-static void
-semqueue(SemaRoot *root, uint32 volatile *addr, SemaWaiter *s)
-{
- s->g = runtime_g();
- s->addr = addr;
- s->next = nil;
- s->prev = root->tail;
- if(root->tail)
- root->tail->next = s;
- else
- root->head = s;
- root->tail = s;
-}
-
-static void
-semdequeue(SemaRoot *root, SemaWaiter *s)
-{
- if(s->next)
- s->next->prev = s->prev;
- else
- root->tail = s->prev;
- if(s->prev)
- s->prev->next = s->next;
- else
- root->head = s->next;
- s->prev = nil;
- s->next = nil;
-}
-
-static int32
-cansemacquire(uint32 volatile *addr)
-{
- uint32 v;
-
- while((v = runtime_atomicload(addr)) > 0)
- if(runtime_cas(addr, v, v-1))
- return 1;
- return 0;
-}
-
-void
-runtime_semacquire(uint32 volatile *addr, bool profile)
-{
- SemaWaiter s; // Needs to be allocated on stack, otherwise garbage collector could deallocate it
- SemaRoot *root;
- int64 t0;
-
- // Easy case.
- if(cansemacquire(addr))
- return;
-
- // Harder case:
- // increment waiter count
- // try cansemacquire one more time, return if succeeded
- // enqueue itself as a waiter
- // sleep
- // (waiter descriptor is dequeued by signaler)
- root = semroot(addr);
- t0 = 0;
- s.releasetime = 0;
- if(profile && runtime_blockprofilerate > 0) {
- t0 = runtime_cputicks();
- s.releasetime = -1;
- }
- for(;;) {
-
- runtime_lock(root);
- // Add ourselves to nwait to disable "easy case" in semrelease.
- runtime_xadd(&root->nwait, 1);
- // Check cansemacquire to avoid missed wakeup.
- if(cansemacquire(addr)) {
- runtime_xadd(&root->nwait, -1);
- runtime_unlock(root);
- return;
- }
- // Any semrelease after the cansemacquire knows we're waiting
- // (we set nwait above), so go to sleep.
- semqueue(root, addr, &s);
- runtime_parkunlock(root, "semacquire");
- if(cansemacquire(addr)) {
- if(t0)
- runtime_blockevent(s.releasetime - t0, 3);
- return;
- }
- }
-}
-
-void
-runtime_semrelease(uint32 volatile *addr)
-{
- SemaWaiter *s;
- SemaRoot *root;
-
- root = semroot(addr);
- runtime_xadd(addr, 1);
-
- // Easy case: no waiters?
- // This check must happen after the xadd, to avoid a missed wakeup
- // (see loop in semacquire).
- if(runtime_atomicload(&root->nwait) == 0)
- return;
-
- // Harder case: search for a waiter and wake it.
- runtime_lock(root);
- if(runtime_atomicload(&root->nwait) == 0) {
- // The count is already consumed by another goroutine,
- // so no need to wake up another goroutine.
- runtime_unlock(root);
- return;
- }
- for(s = root->head; s; s = s->next) {
- if(s->addr == addr) {
- runtime_xadd(&root->nwait, -1);
- semdequeue(root, s);
- break;
- }
- }
- runtime_unlock(root);
- if(s) {
- if(s->releasetime)
- s->releasetime = runtime_cputicks();
- runtime_ready(s->g);
- }
-}
-
-// TODO(dvyukov): move to netpoll.goc once it's used by all OSes.
-void net_runtime_Semacquire(uint32 *addr)
- __asm__ (GOSYM_PREFIX "net.runtime_Semacquire");
-
-void net_runtime_Semacquire(uint32 *addr)
-{
- runtime_semacquire(addr, true);
-}
-
-void net_runtime_Semrelease(uint32 *addr)
- __asm__ (GOSYM_PREFIX "net.runtime_Semrelease");
-
-void net_runtime_Semrelease(uint32 *addr)
-{
- runtime_semrelease(addr);
-}
-
-func runtime_Semacquire(addr *uint32) {
- runtime_semacquire(addr, true);
-}
-
-func runtime_Semrelease(addr *uint32) {
- runtime_semrelease(addr);
-}
-
-typedef struct SyncSema SyncSema;
-struct SyncSema
-{
- Lock;
- SemaWaiter* head;
- SemaWaiter* tail;
-};
-
-func runtime_Syncsemcheck(size uintptr) {
- if(size != sizeof(SyncSema)) {
- runtime_printf("bad SyncSema size: sync:%D runtime:%D\n", (int64)size, (int64)sizeof(SyncSema));
- runtime_throw("bad SyncSema size");
- }
-}
-
-// Syncsemacquire waits for a pairing Syncsemrelease on the same semaphore s.
-func runtime_Syncsemacquire(s *SyncSema) {
- SemaWaiter w, *wake;
- int64 t0;
-
- w.g = runtime_g();
- w.nrelease = -1;
- w.next = nil;
- w.releasetime = 0;
- t0 = 0;
- if(runtime_blockprofilerate > 0) {
- t0 = runtime_cputicks();
- w.releasetime = -1;
- }
-
- runtime_lock(s);
- if(s->head && s->head->nrelease > 0) {
- // have pending release, consume it
- wake = nil;
- s->head->nrelease--;
- if(s->head->nrelease == 0) {
- wake = s->head;
- s->head = wake->next;
- if(s->head == nil)
- s->tail = nil;
- }
- runtime_unlock(s);
- if(wake)
- runtime_ready(wake->g);
- } else {
- // enqueue itself
- if(s->tail == nil)
- s->head = &w;
- else
- s->tail->next = &w;
- s->tail = &w;
- runtime_parkunlock(s, "semacquire");
- if(t0)
- runtime_blockevent(w.releasetime - t0, 2);
- }
-}
-
-// Syncsemrelease waits for n pairing Syncsemacquire on the same semaphore s.
-func runtime_Syncsemrelease(s *SyncSema, n uint32) {
- SemaWaiter w, *wake;
-
- w.g = runtime_g();
- w.nrelease = (int32)n;
- w.next = nil;
- w.releasetime = 0;
-
- runtime_lock(s);
- while(w.nrelease > 0 && s->head && s->head->nrelease < 0) {
- // have pending acquire, satisfy it
- wake = s->head;
- s->head = wake->next;
- if(s->head == nil)
- s->tail = nil;
- if(wake->releasetime)
- wake->releasetime = runtime_cputicks();
- runtime_ready(wake->g);
- w.nrelease--;
- }
- if(w.nrelease > 0) {
- // enqueue itself
- if(s->tail == nil)
- s->head = &w;
- else
- s->tail->next = &w;
- s->tail = &w;
- runtime_parkunlock(s, "semarelease");
- } else
- runtime_unlock(s);
-}
diff --git a/libgo/runtime/signal_unix.c b/libgo/runtime/signal_unix.c
deleted file mode 100644
index 5bee0d2a70..0000000000
--- a/libgo/runtime/signal_unix.c
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
-
-#include <sys/time.h>
-
-#include "runtime.h"
-#include "defs.h"
-#include "signal_unix.h"
-
-extern SigTab runtime_sigtab[];
-
-void
-runtime_initsig(bool preinit)
-{
- int32 i;
- SigTab *t;
-
- // For c-archive/c-shared this is called by go-libmain.c with
- // preinit == true.
- if(runtime_isarchive && !preinit)
- return;
-
- // First call: basic setup.
- for(i = 0; runtime_sigtab[i].sig != -1; i++) {
- t = &runtime_sigtab[i];
- if((t->flags == 0) || (t->flags & SigDefault))
- continue;
-
- t->fwdsig = runtime_getsig(i);
-
- // For some signals, we respect an inherited SIG_IGN handler
- // rather than insist on installing our own default handler.
- // Even these signals can be fetched using the os/signal package.
- switch(t->sig) {
- case SIGHUP:
- case SIGINT:
- if(t->fwdsig == GO_SIG_IGN) {
- continue;
- }
- }
-
- if(runtime_isarchive && (t->flags&SigPanic) == 0)
- continue;
-
- t->flags |= SigHandling;
- runtime_setsig(i, runtime_sighandler, true);
- }
-}
-
-void
-runtime_sigenable(uint32 sig)
-{
- int32 i;
- SigTab *t;
-
- t = nil;
- for(i = 0; runtime_sigtab[i].sig != -1; i++) {
- if(runtime_sigtab[i].sig == (int32)sig) {
- t = &runtime_sigtab[i];
- break;
- }
- }
-
- if(t == nil)
- return;
-
- if((t->flags & SigNotify) && !(t->flags & SigHandling)) {
- t->flags |= SigHandling;
- t->fwdsig = runtime_getsig(i);
- runtime_setsig(i, runtime_sighandler, true);
- }
-}
-
-void
-runtime_sigdisable(uint32 sig)
-{
- int32 i;
- SigTab *t;
-
- t = nil;
- for(i = 0; runtime_sigtab[i].sig != -1; i++) {
- if(runtime_sigtab[i].sig == (int32)sig) {
- t = &runtime_sigtab[i];
- break;
- }
- }
-
- if(t == nil)
- return;
-
- if((sig == SIGHUP || sig == SIGINT) && t->fwdsig == GO_SIG_IGN) {
- t->flags &= ~SigHandling;
- runtime_setsig(i, t->fwdsig, true);
- }
-}
-
-void
-runtime_sigignore(uint32 sig)
-{
- int32 i;
- SigTab *t;
-
- t = nil;
- for(i = 0; runtime_sigtab[i].sig != -1; i++) {
- if(runtime_sigtab[i].sig == (int32)sig) {
- t = &runtime_sigtab[i];
- break;
- }
- }
-
- if(t == nil)
- return;
-
- if((t->flags & SigNotify) != 0) {
- t->flags &= ~SigHandling;
- runtime_setsig(i, GO_SIG_IGN, true);
- }
-}
-
-void
-runtime_resetcpuprofiler(int32 hz)
-{
- struct itimerval it;
-
- runtime_memclr((byte*)&it, sizeof it);
- if(hz == 0) {
- runtime_setitimer(ITIMER_PROF, &it, nil);
- } else {
- it.it_interval.tv_sec = 0;
- it.it_interval.tv_usec = 1000000 / hz;
- it.it_value = it.it_interval;
- runtime_setitimer(ITIMER_PROF, &it, nil);
- }
- runtime_m()->profilehz = hz;
-}
-
-void
-runtime_unblocksignals(void)
-{
- sigset_t sigset_none;
- sigemptyset(&sigset_none);
- pthread_sigmask(SIG_SETMASK, &sigset_none, nil);
-}
-
-void
-runtime_crash(void)
-{
- int32 i;
-
-#ifdef GOOS_darwin
- // OS X core dumps are linear dumps of the mapped memory,
- // from the first virtual byte to the last, with zeros in the gaps.
- // Because of the way we arrange the address space on 64-bit systems,
- // this means the OS X core file will be >128 GB and even on a zippy
- // workstation can take OS X well over an hour to write (uninterruptible).
- // Save users from making that mistake.
- if(sizeof(void*) == 8)
- return;
-#endif
-
- runtime_unblocksignals();
- for(i = 0; runtime_sigtab[i].sig != -1; i++)
- if(runtime_sigtab[i].sig == SIGABRT)
- break;
- runtime_setsig(i, GO_SIG_DFL, false);
- runtime_raise(SIGABRT);
-}
-
-void
-runtime_raise(int32 sig)
-{
- raise(sig);
-}
diff --git a/libgo/runtime/signal_unix.h b/libgo/runtime/signal_unix.h
deleted file mode 100644
index 1c51740bf1..0000000000
--- a/libgo/runtime/signal_unix.h
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <signal.h>
-
-#define GO_SIG_DFL ((void*)SIG_DFL)
-#define GO_SIG_IGN ((void*)SIG_IGN)
-
-#ifdef SA_SIGINFO
-typedef siginfo_t Siginfo;
-#else
-typedef void *Siginfo;
-#endif
-
-typedef void GoSighandler(int32, Siginfo*, void*, G*);
-void runtime_setsig(int32, GoSighandler*, bool);
-GoSighandler* runtime_getsig(int32);
-
-void runtime_sighandler(int32 sig, Siginfo *info, void *context, G *gp);
-void runtime_raise(int32);
-
diff --git a/libgo/runtime/sigqueue.goc b/libgo/runtime/sigqueue.goc
deleted file mode 100644
index fba1c71e2c..0000000000
--- a/libgo/runtime/sigqueue.goc
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements runtime support for signal handling.
-//
-// Most synchronization primitives are not available from
-// the signal handler (it cannot block, allocate memory, or use locks)
-// so the handler communicates with a processing goroutine
-// via struct sig, below.
-//
-// sigsend() is called by the signal handler to queue a new signal.
-// signal_recv() is called by the Go program to receive a newly queued signal.
-// Synchronization between sigsend() and signal_recv() is based on the sig.state
-// variable. It can be in 3 states: 0, HASWAITER and HASSIGNAL.
-// HASWAITER means that signal_recv() is blocked on sig.Note and there are no
-// new pending signals.
-// HASSIGNAL means that sig.mask *may* contain new pending signals,
-// signal_recv() can't be blocked in this state.
-// 0 means that there are no new pending signals and signal_recv() is not blocked.
-// Transitions between states are done atomically with CAS.
-// When signal_recv() is unblocked, it resets sig.Note and rechecks sig.mask.
-// If several sigsend()'s and signal_recv() execute concurrently, it can lead to
-// unnecessary rechecks of sig.mask, but must not lead to missed signals
-// nor deadlocks.
-
-package signal
-#include "config.h"
-#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
-#include "defs.h"
-
-static struct {
- Note;
- uint32 mask[(NSIG+31)/32];
- uint32 wanted[(NSIG+31)/32];
- uint32 state;
- bool inuse;
-} sig;
-
-enum {
- HASWAITER = 1,
- HASSIGNAL = 2,
-};
-
-// Called from sighandler to send a signal back out of the signal handling thread.
-bool
-__go_sigsend(int32 s)
-{
- uint32 bit, mask, old, new;
-
- if(!sig.inuse || s < 0 || (size_t)s >= 32*nelem(sig.wanted) || !(sig.wanted[s/32]&(1U<<(s&31))))
- return false;
- bit = 1 << (s&31);
- for(;;) {
- mask = sig.mask[s/32];
- if(mask & bit)
- break; // signal already in queue
- if(runtime_cas(&sig.mask[s/32], mask, mask|bit)) {
- // Added to queue.
- // Only send a wakeup if the receiver needs a kick.
- for(;;) {
- old = runtime_atomicload(&sig.state);
- if(old == HASSIGNAL)
- break;
- if(old == HASWAITER)
- new = 0;
- else // if(old == 0)
- new = HASSIGNAL;
- if(runtime_cas(&sig.state, old, new)) {
- if (old == HASWAITER)
- runtime_notewakeup(&sig);
- break;
- }
- }
- break;
- }
- }
- return true;
-}
-
-// Called to receive the next queued signal.
-// Must only be called from a single goroutine at a time.
-func signal_recv() (m uint32) {
- static uint32 recv[nelem(sig.mask)];
- uint32 i, old, new;
-
- for(;;) {
- // Serve from local copy if there are bits left.
- for(i=0; i<NSIG; i++) {
- if(recv[i/32]&(1U<<(i&31))) {
- recv[i/32] ^= 1U<<(i&31);
- m = i;
- goto done;
- }
- }
-
- // Check and update sig.state.
- for(;;) {
- old = runtime_atomicload(&sig.state);
- if(old == HASWAITER)
- runtime_throw("inconsistent state in signal_recv");
- if(old == HASSIGNAL)
- new = 0;
- else // if(old == 0)
- new = HASWAITER;
- if(runtime_cas(&sig.state, old, new)) {
- if (new == HASWAITER) {
- runtime_notetsleepg(&sig, -1);
- runtime_noteclear(&sig);
- }
- break;
- }
- }
-
- // Get a new local copy.
- for(i=0; (size_t)i<nelem(sig.mask); i++) {
- for(;;) {
- m = sig.mask[i];
- if(runtime_cas(&sig.mask[i], m, 0))
- break;
- }
- recv[i] = m;
- }
- }
-
-done:;
- // goc requires that we fall off the end of functions
- // that return values instead of using our own return
- // statements.
-}
-
-// Must only be called from a single goroutine at a time.
-func signal_enable(s uint32) {
- if(!sig.inuse) {
- // The first call to signal_enable is for us
- // to use for initialization. It does not pass
- // signal information in m.
- sig.inuse = true; // enable reception of signals; cannot disable
- runtime_noteclear(&sig);
- return;
- }
-
- if(s >= nelem(sig.wanted)*32)
- return;
- sig.wanted[s/32] |= 1U<<(s&31);
- runtime_sigenable(s);
-}
-
-// Must only be called from a single goroutine at a time.
-func signal_disable(s uint32) {
- if(s >= nelem(sig.wanted)*32)
- return;
- sig.wanted[s/32] &= ~(1U<<(s&31));
- runtime_sigdisable(s);
-}
-
-// Must only be called from a single goroutine at a time.
-func signal_ignore(s uint32) {
- if (s >= nelem(sig.wanted)*32)
- return;
- sig.wanted[s/32] &= ~(1U<<(s&31));
- runtime_sigignore(s);
-}
-
-// This runs on a foreign stack, without an m or a g. No stack split.
-void
-runtime_badsignal(int sig)
-{
- __go_sigsend(sig);
-}
diff --git a/libgo/runtime/string.goc b/libgo/runtime/string.goc
deleted file mode 100644
index 0ad180b983..0000000000
--- a/libgo/runtime/string.goc
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2009, 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
-#include "go-string.h"
-
-#define charntorune(pv, str, len) __go_get_rune(str, len, pv)
-
-const String runtime_emptystring;
-
-intgo
-runtime_findnull(const byte *s)
-{
- if(s == nil)
- return 0;
- return __builtin_strlen((const char*) s);
-}
-
-intgo
-runtime_findnullw(const uint16 *s)
-{
- intgo l;
-
- if(s == nil)
- return 0;
- for(l=0; s[l]!=0; l++)
- ;
- return l;
-}
-
-static String
-gostringsize(intgo l, byte** pmem)
-{
- String s;
- byte *mem;
-
- if(l == 0) {
- *pmem = nil;
- return runtime_emptystring;
- }
- mem = runtime_mallocgc(l, 0, FlagNoScan|FlagNoZero);
- s.str = mem;
- s.len = l;
- *pmem = mem;
- return s;
-}
-
-String
-runtime_gostring(const byte *str)
-{
- intgo l;
- String s;
- byte *mem;
-
- l = runtime_findnull(str);
- s = gostringsize(l, &mem);
- runtime_memmove(mem, str, l);
- return s;
-}
-
-String
-runtime_gostringnocopy(const byte *str)
-{
- String s;
-
- s.str = str;
- s.len = runtime_findnull(str);
- return s;
-}
-
-func cstringToGo(str *byte) (s String) {
- s = runtime_gostringnocopy(str);
-}
-
-enum
-{
- Runeself = 0x80,
-};
-
-func stringiter(s String, k int) (retk int) {
- int32 l;
-
- if(k >= s.len) {
- // retk=0 is end of iteration
- retk = 0;
- goto out;
- }
-
- l = s.str[k];
- if(l < Runeself) {
- retk = k+1;
- goto out;
- }
-
- // multi-char rune
- retk = k + charntorune(&l, s.str+k, s.len-k);
-
-out:
-}
-
-func stringiter2(s String, k int) (retk int, retv int32) {
- if(k >= s.len) {
- // retk=0 is end of iteration
- retk = 0;
- retv = 0;
- goto out;
- }
-
- retv = s.str[k];
- if(retv < Runeself) {
- retk = k+1;
- goto out;
- }
-
- // multi-char rune
- retk = k + charntorune(&retv, s.str+k, s.len-k);
-
-out:
-}
diff --git a/libgo/runtime/thread-linux.c b/libgo/runtime/thread-linux.c
index ae56261e6f..81ad0f9c90 100644
--- a/libgo/runtime/thread-linux.c
+++ b/libgo/runtime/thread-linux.c
@@ -4,72 +4,13 @@
#include "runtime.h"
#include "defs.h"
-#include "signal_unix.h"
// Linux futex.
-//
-// futexsleep(uint32 *addr, uint32 val)
-// futexwakeup(uint32 *addr)
-//
-// Futexsleep atomically checks if *addr == val and if so, sleeps on addr.
-// Futexwakeup wakes up threads sleeping on addr.
-// Futexsleep is allowed to wake up spuriously.
-#include <errno.h>
-#include <string.h>
-#include <time.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
#include <unistd.h>
#include <syscall.h>
#include <linux/futex.h>
-typedef struct timespec Timespec;
-
-// Atomically,
-// if(*addr == val) sleep
-// Might be woken up spuriously; that's allowed.
-// Don't sleep longer than ns; ns < 0 means forever.
-void
-runtime_futexsleep(uint32 *addr, uint32 val, int64 ns)
-{
- Timespec ts;
- int32 nsec;
-
- // Some Linux kernels have a bug where futex of
- // FUTEX_WAIT returns an internal error code
- // as an errno. Libpthread ignores the return value
- // here, and so can we: as it says a few lines up,
- // spurious wakeups are allowed.
-
- if(ns < 0) {
- syscall(__NR_futex, addr, FUTEX_WAIT, val, nil, nil, 0);
- return;
- }
- ts.tv_sec = runtime_timediv(ns, 1000000000LL, &nsec);
- ts.tv_nsec = nsec;
- syscall(__NR_futex, addr, FUTEX_WAIT, val, &ts, nil, 0);
-}
-
-// If any procs are sleeping on addr, wake up at most cnt.
-void
-runtime_futexwakeup(uint32 *addr, uint32 cnt)
-{
- int64 ret;
-
- ret = syscall(__NR_futex, addr, FUTEX_WAKE, cnt, nil, nil, 0);
-
- if(ret >= 0)
- return;
-
- // I don't know that futex wakeup can return
- // EAGAIN or EINTR, but if it does, it would be
- // safe to loop and call futex again.
- runtime_printf("futexwakeup addr=%p returned %D\n", addr, ret);
- *(int32*)0x1006 = 0x1006;
-}
-
void
runtime_osinit(void)
{
diff --git a/libgo/runtime/thread-sema.c b/libgo/runtime/thread-sema.c
index 18827b025d..b74b1dab11 100644
--- a/libgo/runtime/thread-sema.c
+++ b/libgo/runtime/thread-sema.c
@@ -10,131 +10,6 @@
#include <time.h>
#include <semaphore.h>
-/* If we don't have sem_timedwait, use pthread_cond_timedwait instead.
- We don't always use condition variables because on some systems
- pthread_mutex_lock and pthread_mutex_unlock must be called by the
- same thread. That is never true of semaphores. */
-
-struct go_sem
-{
- sem_t sem;
-
-#ifndef HAVE_SEM_TIMEDWAIT
- int timedwait;
- pthread_mutex_t mutex;
- pthread_cond_t cond;
-#endif
-};
-
-/* Create a semaphore. */
-
-uintptr
-runtime_semacreate(void)
-{
- struct go_sem *p;
-
- /* Call malloc rather than runtime_malloc. This will allocate space
- on the C heap. We can't call runtime_malloc here because it
- could cause a deadlock. */
- p = malloc (sizeof (struct go_sem));
- if (sem_init (&p->sem, 0, 0) != 0)
- runtime_throw ("sem_init");
-
-#ifndef HAVE_SEM_TIMEDWAIT
- if (pthread_mutex_init (&p->mutex, NULL) != 0)
- runtime_throw ("pthread_mutex_init");
- if (pthread_cond_init (&p->cond, NULL) != 0)
- runtime_throw ("pthread_cond_init");
-#endif
-
- return (uintptr) p;
-}
-
-/* Acquire m->waitsema. */
-
-int32
-runtime_semasleep (int64 ns)
-{
- M *m;
- struct go_sem *sem;
- int r;
-
- m = runtime_m ();
- sem = (struct go_sem *) m->waitsema;
- if (ns >= 0)
- {
- int64 abs;
- struct timespec ts;
- int err;
-
- abs = ns + runtime_nanotime ();
- ts.tv_sec = abs / 1000000000LL;
- ts.tv_nsec = abs % 1000000000LL;
-
- err = 0;
-
-#ifdef HAVE_SEM_TIMEDWAIT
- r = sem_timedwait (&sem->sem, &ts);
- if (r != 0)
- err = errno;
-#else
- if (pthread_mutex_lock (&sem->mutex) != 0)
- runtime_throw ("pthread_mutex_lock");
-
- while ((r = sem_trywait (&sem->sem)) != 0)
- {
- r = pthread_cond_timedwait (&sem->cond, &sem->mutex, &ts);
- if (r != 0)
- {
- err = r;
- break;
- }
- }
-
- if (pthread_mutex_unlock (&sem->mutex) != 0)
- runtime_throw ("pthread_mutex_unlock");
-#endif
-
- if (err != 0)
- {
- if (err == ETIMEDOUT || err == EAGAIN || err == EINTR)
- return -1;
- runtime_throw ("sema_timedwait");
- }
- return 0;
- }
-
- while (sem_wait (&sem->sem) != 0)
- {
- if (errno == EINTR)
- continue;
- runtime_throw ("sem_wait");
- }
-
- return 0;
-}
-
-/* Wake up mp->waitsema. */
-
-void
-runtime_semawakeup (M *mp)
-{
- struct go_sem *sem;
-
- sem = (struct go_sem *) mp->waitsema;
- if (sem_post (&sem->sem) != 0)
- runtime_throw ("sem_post");
-
-#ifndef HAVE_SEM_TIMEDWAIT
- if (pthread_mutex_lock (&sem->mutex) != 0)
- runtime_throw ("pthread_mutex_lock");
- if (pthread_cond_broadcast (&sem->cond) != 0)
- runtime_throw ("pthread_cond_broadcast");
- if (pthread_mutex_unlock (&sem->mutex) != 0)
- runtime_throw ("pthread_mutex_unlock");
-#endif
-}
-
void
runtime_osinit (void)
{
diff --git a/libgo/runtime/time.goc b/libgo/runtime/time.goc
deleted file mode 100644
index b77ad3333d..0000000000
--- a/libgo/runtime/time.goc
+++ /dev/null
@@ -1,353 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Time-related runtime and pieces of package time.
-
-package time
-
-#include <sys/time.h>
-
-#include "runtime.h"
-#include "defs.h"
-#include "arch.h"
-#include "malloc.h"
-
-enum {
- debug = 0,
-};
-
-static Timers timers;
-static void addtimer(Timer*);
-static void dumptimers(const char*);
-
-// nacl fake time support.
-int64 runtime_timens;
-
-// Package time APIs.
-// Godoc uses the comments in package time, not these.
-
-// time.now is implemented in assembly.
-
-// runtimeNano returns the current value of the runtime clock in nanoseconds.
-func runtimeNano() (ns int64) {
- ns = runtime_nanotime();
-}
-
-// Sleep puts the current goroutine to sleep for at least ns nanoseconds.
-func Sleep(ns int64) {
- runtime_tsleep(ns, "sleep");
-}
-
-// startTimer adds t to the timer heap.
-func startTimer(t *Timer) {
- runtime_addtimer(t);
-}
-
-// stopTimer removes t from the timer heap if it is there.
-// It returns true if t was removed, false if t wasn't even there.
-func stopTimer(t *Timer) (stopped bool) {
- stopped = runtime_deltimer(t);
-}
-
-// C runtime.
-
-int64 runtime_unixnanotime(void)
-{
- struct time_now_ret r;
-
- r = now();
- return r.sec*1000000000 + r.nsec;
-}
-
-static void timerproc(void*);
-static void siftup(int32);
-static void siftdown(int32);
-
-// Ready the goroutine e.data.
-static void
-ready(Eface e, uintptr seq)
-{
- USED(seq);
-
- runtime_ready(e.__object);
-}
-
-static FuncVal readyv = {(void(*)(void))ready};
-
-// Put the current goroutine to sleep for ns nanoseconds.
-void
-runtime_tsleep(int64 ns, const char *reason)
-{
- G* g;
- Timer t;
-
- g = runtime_g();
-
- if(ns <= 0)
- return;
-
- t.when = runtime_nanotime() + ns;
- t.period = 0;
- t.fv = &readyv;
- t.arg.__object = g;
- t.seq = 0;
- runtime_lock(&timers);
- addtimer(&t);
- runtime_parkunlock(&timers, reason);
-}
-
-void
-runtime_addtimer(Timer *t)
-{
- runtime_lock(&timers);
- addtimer(t);
- runtime_unlock(&timers);
-}
-
-// Add a timer to the heap and start or kick the timer proc
-// if the new timer is earlier than any of the others.
-static void
-addtimer(Timer *t)
-{
- int32 n;
- Timer **nt;
-
- // when must never be negative; otherwise timerproc will overflow
- // during its delta calculation and never expire other timers.
- if(t->when < 0)
- t->when = (int64)((1ULL<<63)-1);
-
- if(timers.len >= timers.cap) {
- // Grow slice.
- n = 16;
- if(n <= timers.cap)
- n = timers.cap*3 / 2;
- nt = runtime_malloc(n*sizeof nt[0]);
- runtime_memmove(nt, timers.t, timers.len*sizeof nt[0]);
- runtime_free(timers.t);
- timers.t = nt;
- timers.cap = n;
- }
- t->i = timers.len++;
- timers.t[t->i] = t;
- siftup(t->i);
- if(t->i == 0) {
- // siftup moved to top: new earliest deadline.
- if(timers.sleeping) {
- timers.sleeping = false;
- runtime_notewakeup(&timers.waitnote);
- }
- if(timers.rescheduling) {
- timers.rescheduling = false;
- runtime_ready(timers.timerproc);
- }
- }
- if(timers.timerproc == nil) {
- timers.timerproc = __go_go(timerproc, nil);
- timers.timerproc->issystem = true;
- }
- if(debug)
- dumptimers("addtimer");
-}
-
-// Used to force a dereference before the lock is acquired.
-static int32 gi;
-
-// Delete timer t from the heap.
-// Do not need to update the timerproc:
-// if it wakes up early, no big deal.
-bool
-runtime_deltimer(Timer *t)
-{
- int32 i;
-
- // Dereference t so that any panic happens before the lock is held.
- // Discard result, because t might be moving in the heap.
- i = t->i;
- gi = i;
-
- runtime_lock(&timers);
-
- // t may not be registered anymore and may have
- // a bogus i (typically 0, if generated by Go).
- // Verify it before proceeding.
- i = t->i;
- if(i < 0 || i >= timers.len || timers.t[i] != t) {
- runtime_unlock(&timers);
- return false;
- }
-
- timers.len--;
- if(i == timers.len) {
- timers.t[i] = nil;
- } else {
- timers.t[i] = timers.t[timers.len];
- timers.t[timers.len] = nil;
- timers.t[i]->i = i;
- siftup(i);
- siftdown(i);
- }
- if(debug)
- dumptimers("deltimer");
- runtime_unlock(&timers);
- return true;
-}
-
-// Timerproc runs the time-driven events.
-// It sleeps until the next event in the timers heap.
-// If addtimer inserts a new earlier event, addtimer
-// wakes timerproc early.
-static void
-timerproc(void* dummy __attribute__ ((unused)))
-{
- int64 delta, now;
- Timer *t;
- FuncVal *fv;
- void (*f)(Eface, uintptr);
- Eface arg;
- uintptr seq;
-
- for(;;) {
- runtime_lock(&timers);
- timers.sleeping = false;
- now = runtime_nanotime();
- for(;;) {
- if(timers.len == 0) {
- delta = -1;
- break;
- }
- t = timers.t[0];
- delta = t->when - now;
- if(delta > 0)
- break;
- if(t->period > 0) {
- // leave in heap but adjust next time to fire
- t->when += t->period * (1 + -delta/t->period);
- siftdown(0);
- } else {
- // remove from heap
- timers.t[0] = timers.t[--timers.len];
- timers.t[0]->i = 0;
- siftdown(0);
- t->i = -1; // mark as removed
- }
- fv = t->fv;
- f = (void*)t->fv->fn;
- arg = t->arg;
- seq = t->seq;
- runtime_unlock(&timers);
- __builtin_call_with_static_chain(f(arg, seq), fv);
-
- // clear f and arg to avoid leak while sleeping for next timer
- f = nil;
- USED(f);
- arg.__type_descriptor = nil;
- arg.__object = nil;
- USED(&arg);
-
- runtime_lock(&timers);
- }
- if(delta < 0) {
- // No timers left - put goroutine to sleep.
- timers.rescheduling = true;
- runtime_g()->isbackground = true;
- runtime_parkunlock(&timers, "timer goroutine (idle)");
- runtime_g()->isbackground = false;
- continue;
- }
- // At least one timer pending. Sleep until then.
- timers.sleeping = true;
- runtime_noteclear(&timers.waitnote);
- runtime_unlock(&timers);
- runtime_notetsleepg(&timers.waitnote, delta);
- }
-}
-
-// heap maintenance algorithms.
-
-static void
-siftup(int32 i)
-{
- int32 p;
- int64 when;
- Timer **t, *tmp;
-
- t = timers.t;
- when = t[i]->when;
- tmp = t[i];
- while(i > 0) {
- p = (i-1)/4; // parent
- if(when >= t[p]->when)
- break;
- t[i] = t[p];
- t[i]->i = i;
- t[p] = tmp;
- tmp->i = p;
- i = p;
- }
-}
-
-static void
-siftdown(int32 i)
-{
- int32 c, c3, len;
- int64 when, w, w3;
- Timer **t, *tmp;
-
- t = timers.t;
- len = timers.len;
- when = t[i]->when;
- tmp = t[i];
- for(;;) {
- c = i*4 + 1; // left child
- c3 = c + 2; // mid child
- if(c >= len) {
- break;
- }
- w = t[c]->when;
- if(c+1 < len && t[c+1]->when < w) {
- w = t[c+1]->when;
- c++;
- }
- if(c3 < len) {
- w3 = t[c3]->when;
- if(c3+1 < len && t[c3+1]->when < w3) {
- w3 = t[c3+1]->when;
- c3++;
- }
- if(w3 < w) {
- w = w3;
- c = c3;
- }
- }
- if(w >= when)
- break;
- t[i] = t[c];
- t[i]->i = i;
- t[c] = tmp;
- tmp->i = c;
- i = c;
- }
-}
-
-static void
-dumptimers(const char *msg)
-{
- Timer *t;
- int32 i;
-
- runtime_printf("timers: %s\n", msg);
- for(i = 0; i < timers.len; i++) {
- t = timers.t[i];
- runtime_printf("\t%d\t%p:\ti %d when %D period %D fn %p\n",
- i, t, t->i, t->when, t->period, t->fv->fn);
- }
- runtime_printf("\n");
-}
-
-void
-runtime_time_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj))
-{
- enqueue1(wbufp, (Obj){(byte*)&timers, sizeof timers, 0});
-}
diff --git a/libgo/runtime/yield.c b/libgo/runtime/yield.c
index 442d346db7..d0aed8d22e 100644
--- a/libgo/runtime/yield.c
+++ b/libgo/runtime/yield.c
@@ -37,6 +37,9 @@ runtime_procyield (uint32 cnt)
/* Ask the OS to reschedule this thread. */
+void runtime_osyield(void)
+ __attribute__ ((no_split_stack));
+
void
runtime_osyield (void)
{