Commit 92048217 authored by Russ Cox's avatar Russ Cox

[dev.cc] runtime: convert arch-specific .c and .h files to Go

The conversion was done with an automated tool and then
modified only as necessary to make it compile and run.

vlrt.c was only called from C. Pure delete.

[This CL is part of the removal of C code from package runtime.
See golang.org/s/dev.cc for an overview.]

LGTM=r
R=r, austin
CC=dvyukov, golang-codereviews, iant, khr
https://golang.org/cl/174860043
parent e785e3ac
......@@ -2,24 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
enum {
thechar = '6',
BigEndian = 0,
CacheLineSize = 64,
#ifdef GOOS_solaris
RuntimeGogoBytes = 80,
#else
#ifdef GOOS_windows
RuntimeGogoBytes = 80,
#else
#ifdef GOOS_plan9
RuntimeGogoBytes = 80,
#else
RuntimeGogoBytes = 64,
#endif // Plan 9
#endif // Windows
#endif // Solaris
PhysPageSize = 4096,
PCQuantum = 1,
Int64Align = 8
};
package runtime
const (
thechar = '8'
_BigEndian = 0
_CacheLineSize = 64
_RuntimeGogoBytes = 64
_PhysPageSize = _NaCl*65536 + (1-_NaCl)*4096 // 4k normally; 64k on NaCl
_PCQuantum = 1
_Int64Align = 4
)
......@@ -2,16 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
enum {
thechar = '5',
BigEndian = 0,
CacheLineSize = 32,
RuntimeGogoBytes = 60,
#ifdef GOOS_nacl
PhysPageSize = 65536,
#else
PhysPageSize = 4096,
#endif
PCQuantum = 4,
Int64Align = 4
};
package runtime
const (
thechar = '6'
_BigEndian = 0
_CacheLineSize = 64
_RuntimeGogoBytes = 64 + (_Plan9|_Solaris|_Windows)*16
_PhysPageSize = 4096
_PCQuantum = 1
_Int64Align = 8
)
......@@ -2,16 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
enum {
thechar = '8',
BigEndian = 0,
CacheLineSize = 64,
RuntimeGogoBytes = 64,
#ifdef GOOS_nacl
PhysPageSize = 65536,
#else
PhysPageSize = 4096,
#endif
PCQuantum = 1,
Int64Align = 4
};
package runtime
const (
thechar = '5'
_BigEndian = 0
_CacheLineSize = 32
_RuntimeGogoBytes = 60
_PhysPageSize = 65536*_NaCl + 4096*(1-_NaCl)
_PCQuantum = 4
_Int64Align = 4
)
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "runtime.h"
#include "textflag.h"
#pragma textflag NOSPLIT
uint32
runtime·atomicload(uint32 volatile* addr)
{
return *addr;
}
#pragma textflag NOSPLIT
void*
runtime·atomicloadp(void* volatile* addr)
{
return *addr;
}
#pragma textflag NOSPLIT
uint64
runtime·xadd64(uint64 volatile* addr, int64 v)
{
uint64 old;
do
old = *addr;
while(!runtime·cas64(addr, old, old+v));
return old+v;
}
#pragma textflag NOSPLIT
uint64
runtime·xchg64(uint64 volatile* addr, uint64 v)
{
uint64 old;
do
old = *addr;
while(!runtime·cas64(addr, old, v));
return old;
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
// The calls to nop are to keep these functions from being inlined.
// If they are inlined we have no guarantee that later rewrites of the
// code by optimizers will preserve the relative order of memory accesses.
//go:nosplit
func atomicload(ptr *uint32) uint32 {
nop()
return *ptr
}
//go:nosplit
func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer {
nop()
return *(*unsafe.Pointer)(ptr)
}
//go:nosplit
func xadd64(ptr *uint64, delta int64) uint64 {
for {
old := *ptr
if cas64(ptr, old, old+uint64(delta)) {
return old + uint64(delta)
}
}
}
//go:nosplit
func xchg64(ptr *uint64, new uint64) uint64 {
for {
old := *ptr
if cas64(ptr, old, new) {
return old
}
}
}
//go:noescape
func xadd(ptr *uint32, delta int32) uint32
//go:noescape
func xchg(ptr *uint32, new uint32) uint32
// xchgp cannot have a go:noescape annotation, because
// while ptr does not escape, new does. If new is marked as
// not escaping, the compiler will make incorrect escape analysis
// decisions about the value being xchg'ed.
// Instead, make xchgp a wrapper around the actual atomic.
// When calling the wrapper we mark ptr as noescape explicitly.
//go:nosplit
func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
return xchgp1(noescape(ptr), new)
}
func xchgp1(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
//go:noescape
func xchguintptr(ptr *uintptr, new uintptr) uintptr
//go:noescape
func atomicload64(ptr *uint64) uint64
//go:noescape
func atomicor8(ptr *uint8, val uint8)
//go:noescape
func cas64(ptr *uint64, old, new uint64) bool
//go:noescape
func atomicstore(ptr *uint32, val uint32)
//go:noescape
func atomicstore64(ptr *uint64, val uint64)
// atomicstorep cannot have a go:noescape annotation.
// See comment above for xchgp.
//go:nosplit
func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
atomicstorep1(noescape(ptr), new)
}
func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer)
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64 amd64p32
#include "runtime.h"
#include "textflag.h"
#pragma textflag NOSPLIT
uint32
runtime·atomicload(uint32 volatile* addr)
{
return *addr;
}
#pragma textflag NOSPLIT
uint64
runtime·atomicload64(uint64 volatile* addr)
{
return *addr;
}
#pragma textflag NOSPLIT
void*
runtime·atomicloadp(void* volatile* addr)
{
return *addr;
}
// Copyright 2014 The Go Authors. All rights reserved.
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !arm
// +build amd64 amd64p32
package runtime
import "unsafe"
// The calls to nop are to keep these functions from being inlined.
// If they are inlined we have no guarantee that later rewrites of the
// code by optimizers will preserve the relative order of memory accesses.
//go:nosplit
func atomicload(ptr *uint32) uint32 {
nop()
return *ptr
}
//go:nosplit
func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer {
nop()
return *(*unsafe.Pointer)(ptr)
}
//go:nosplit
func atomicload64(ptr *uint64) uint64 {
nop()
return *ptr
}
//go:noescape
func xadd(ptr *uint32, delta int32) uint32
......@@ -20,20 +42,22 @@ func xchg(ptr *uint32, new uint32) uint32
//go:noescape
func xchg64(ptr *uint64, new uint64) uint64
//go:noescape
func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
//go:noescape
func xchguintptr(ptr *uintptr, new uintptr) uintptr
// xchgp cannot have a go:noescape annotation, because
// while ptr does not escape, new does. If new is marked as
// not escaping, the compiler will make incorrect escape analysis
// decisions about the value being xchg'ed.
// Instead, make xchgp a wrapper around the actual atomic.
// When calling the wrapper we mark ptr as noescape explicitly.
//go:noescape
func atomicload(ptr *uint32) uint32
//go:nosplit
func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
return xchgp1(noescape(ptr), new)
}
//go:noescape
func atomicload64(ptr *uint64) uint64
func xchgp1(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
//go:noescape
func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer
func xchguintptr(ptr *uintptr, new uintptr) uintptr
//go:noescape
func atomicor8(ptr *uint8, val uint8)
......@@ -47,5 +71,12 @@ func atomicstore(ptr *uint32, val uint32)
//go:noescape
func atomicstore64(ptr *uint64, val uint64)
//go:noescape
func atomicstorep(ptr unsafe.Pointer, val unsafe.Pointer)
// atomicstorep cannot have a go:noescape annotation.
// See comment above for xchgp.
//go:nosplit
func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
atomicstorep1(noescape(ptr), new)
}
func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer)
......@@ -23,12 +23,7 @@ func roundup(p unsafe.Pointer, n uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(p) + delta)
}
// in runtime.c
func getg() *g
func acquirem() *m
func releasem(mp *m)
func gomcache() *mcache
func readgstatus(*g) uint32 // proc.c
// mcall switches from the g to the g0 stack and invokes fn(g),
// where g is the goroutine that made the call.
......@@ -95,33 +90,6 @@ func badonm() {
gothrow("onM called from signal goroutine")
}
// C functions that run on the M stack.
// Call using mcall.
func gosched_m(*g)
func park_m(*g)
func recovery_m(*g)
// More C functions that run on the M stack.
// Call using onM.
func mcacheRefill_m()
func largeAlloc_m()
func gc_m()
func scavenge_m()
func setFinalizer_m()
func removeFinalizer_m()
func markallocated_m()
func unrollgcprog_m()
func unrollgcproginplace_m()
func setgcpercent_m()
func setmaxthreads_m()
func ready_m()
func deferproc_m()
func goexit_m()
func startpanic_m()
func dopanic_m()
func readmemstats_m()
func writeheapdump_m()
// memclr clears n bytes starting at ptr.
// in memclr_*.s
//go:noescape
......@@ -132,12 +100,6 @@ func memclr(ptr unsafe.Pointer, n uintptr)
//go:noescape
func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
func starttheworld()
func stoptheworld()
func newextram()
func lockOSThread()
func unlockOSThread()
// exported value for testing
var hashLoad = loadFactor
......@@ -159,11 +121,6 @@ func noescape(p unsafe.Pointer) unsafe.Pointer {
return unsafe.Pointer(x ^ 0)
}
func entersyscall()
func reentersyscall(pc uintptr, sp unsafe.Pointer)
func entersyscallblock()
func exitsyscall()
func cgocallback(fn, frame unsafe.Pointer, framesize uintptr)
func gogo(buf *gobuf)
func gosave(buf *gobuf)
......@@ -181,20 +138,12 @@ func breakpoint()
func nanotime() int64
func usleep(usec uint32)
// careful: cputicks is not guaranteed to be monotonic! In particular, we have
// noticed drift between cpus on certain os/arch combinations. See issue 8976.
func cputicks() int64
func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
func munmap(addr unsafe.Pointer, n uintptr)
func madvise(addr unsafe.Pointer, n uintptr, flags int32)
func reflectcall(fn, arg unsafe.Pointer, n uint32, retoffset uint32)
func osyield()
func procyield(cycles uint32)
func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr)
func readgogc() int32
func purgecachedstats(c *mcache)
func gostringnocopy(b *byte) string
func goexit()
//go:noescape
......@@ -203,8 +152,21 @@ func write(fd uintptr, p unsafe.Pointer, n int32) int32
//go:noescape
func cas(ptr *uint32, old, new uint32) bool
//go:noescape
func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
// casp cannot have a go:noescape annotation, because
// while ptr and old do not escape, new does. If new is marked as
// not escaping, the compiler will make incorrect escape analysis
// decisions about the value being xchg'ed.
// Instead, make casp a wrapper around the actual atomic.
// When calling the wrapper we mark ptr as noescape explicitly.
//go:nosplit
func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
return casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new)
}
func casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
func nop() // call to prevent inlining of function body
//go:noescape
func casuintptr(ptr *uintptr, old, new uintptr) bool
......@@ -264,15 +226,10 @@ func asmcgocall_errno(fn, arg unsafe.Pointer) int32
//go:noescape
func open(name *byte, mode, perm int32) int32
//go:noescape
func gotraceback(*bool) int32
// argp used in Defer structs when there is no argp.
const _NoArgs = ^uintptr(0)
func newstack()
func newproc()
func morestack()
func mstart()
func rt0_go()
// return0 is a stub used to return 0 from deferproc.
......@@ -314,3 +271,5 @@ func call134217728(fn, arg unsafe.Pointer, n, retoffset uint32)
func call268435456(fn, arg unsafe.Pointer, n, retoffset uint32)
func call536870912(fn, arg unsafe.Pointer, n, retoffset uint32)
func call1073741824(fn, arg unsafe.Pointer, n, retoffset uint32)
func switchtoM()
......@@ -2,34 +2,34 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "runtime.h"
package runtime
import "unsafe"
// adjust Gobuf as if it executed a call to fn with context ctxt
// and then did an immediate Gosave.
void
runtime·gostartcall(Gobuf *gobuf, void (*fn)(void), void *ctxt)
{
if(gobuf->lr != 0)
runtime·throw("invalid use of gostartcall");
gobuf->lr = gobuf->pc;
gobuf->pc = (uintptr)fn;
gobuf->ctxt = ctxt;
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
if buf.lr != 0 {
gothrow("invalid use of gostartcall")
}
buf.lr = buf.pc
buf.pc = uintptr(fn)
buf.ctxt = ctxt
}
// Called to rewind context saved during morestack back to beginning of function.
// To help us, the linker emits a jmp back to the beginning right after the
// call to morestack. We just have to decode and apply that jump.
void
runtime·rewindmorestack(Gobuf *gobuf)
{
uint32 inst;
inst = *(uint32*)gobuf->pc;
if((gobuf->pc&3) == 0 && (inst>>24) == 0x9a) {
//runtime·printf("runtime: rewind pc=%p to pc=%p\n", gobuf->pc, gobuf->pc + ((int32)(inst<<8)>>6) + 8);
gobuf->pc += ((int32)(inst<<8)>>6) + 8;
return;
func rewindmorestack(buf *gobuf) {
var inst uint32
if buf.pc&3 == 0 && buf.pc != 0 {
inst = *(*uint32)(unsafe.Pointer(buf.pc))
if inst>>24 == 0x9a {
buf.pc += uintptr(int32(inst<<8)>>6) + 8
return
}
}
runtime·printf("runtime: pc=%p %x\n", gobuf->pc, inst);
runtime·throw("runtime: misuse of rewindmorestack");
print("runtime: pc=", hex(buf.pc), " ", hex(inst), "\n")
gothrow("runtime: misuse of rewindmorestack")
}
......@@ -4,54 +4,51 @@
// +build amd64 amd64p32 386
#include "runtime.h"
package runtime
import "unsafe"
// adjust Gobuf as it if executed a call to fn with context ctxt
// and then did an immediate gosave.
void
runtime·gostartcall(Gobuf *gobuf, void (*fn)(void), void *ctxt)
{
uintptr *sp;
sp = (uintptr*)gobuf->sp;
if(sizeof(uintreg) > sizeof(uintptr))
*--sp = 0;
*--sp = (uintptr)gobuf->pc;
gobuf->sp = (uintptr)sp;
gobuf->pc = (uintptr)fn;
gobuf->ctxt = ctxt;
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
sp := buf.sp
if regSize > ptrSize {
sp -= ptrSize
*(*uintptr)(unsafe.Pointer(sp)) = 0
}
sp -= ptrSize
*(*uintptr)(unsafe.Pointer(sp)) = buf.pc
buf.sp = sp
buf.pc = uintptr(fn)
buf.ctxt = ctxt
}
// Called to rewind context saved during morestack back to beginning of function.
// To help us, the linker emits a jmp back to the beginning right after the
// call to morestack. We just have to decode and apply that jump.
void
runtime·rewindmorestack(Gobuf *gobuf)
{
byte *pc;
pc = (byte*)gobuf->pc;
if(pc[0] == 0xe9) { // jmp 4-byte offset
gobuf->pc = gobuf->pc + 5 + *(int32*)(pc+1);
return;
func rewindmorestack(buf *gobuf) {
pc := (*[8]byte)(unsafe.Pointer(buf.pc))
if pc[0] == 0xe9 { // jmp 4-byte offset
buf.pc = buf.pc + 5 + uintptr(int64(*(*int32)(unsafe.Pointer(&pc[1]))))
return
}
if(pc[0] == 0xeb) { // jmp 1-byte offset
gobuf->pc = gobuf->pc + 2 + *(int8*)(pc+1);
return;
if pc[0] == 0xeb { // jmp 1-byte offset
buf.pc = buf.pc + 2 + uintptr(int64(*(*int8)(unsafe.Pointer(&pc[1]))))
return
}
if(pc[0] == 0xcc) {
if pc[0] == 0xcc {
// This is a breakpoint inserted by gdb. We could use
// runtime·findfunc to find the function. But if we
// do that, then we will continue execution at the
// function entry point, and we will not hit the gdb
// breakpoint. So for this case we don't change
// gobuf->pc, so that when we return we will execute
// buf.pc, so that when we return we will execute
// the jump instruction and carry on. This means that
// stack unwinding may not work entirely correctly
// (http://golang.org/issue/5723) but the user is
// running under gdb anyhow.
return;
return
}
runtime·printf("runtime: pc=%p %x %x %x %x %x\n", pc, pc[0], pc[1], pc[2], pc[3], pc[4]);
runtime·throw("runtime: misuse of rewindmorestack");
print("runtime: pc=", pc, " ", hex(pc[0]), " ", hex(pc[1]), " ", hex(pc[2]), " ", hex(pc[3]), " ", hex(pc[4]), "\n")
gothrow("runtime: misuse of rewindmorestack")
}
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment