Commit 92048217 authored by Russ Cox's avatar Russ Cox

[dev.cc] runtime: convert arch-specific .c and .h files to Go

The conversion was done with an automated tool and then
modified only as necessary to make it compile and run.

vlrt.c was only called from C. Pure delete.

[This CL is part of the removal of C code from package runtime.
See golang.org/s/dev.cc for an overview.]

LGTM=r
R=r, austin
CC=dvyukov, golang-codereviews, iant, khr
https://golang.org/cl/174860043
parent e785e3ac
...@@ -2,24 +2,14 @@ ...@@ -2,24 +2,14 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
enum { package runtime
thechar = '6',
BigEndian = 0, const (
CacheLineSize = 64, thechar = '8'
#ifdef GOOS_solaris _BigEndian = 0
RuntimeGogoBytes = 80, _CacheLineSize = 64
#else _RuntimeGogoBytes = 64
#ifdef GOOS_windows _PhysPageSize = _NaCl*65536 + (1-_NaCl)*4096 // 4k normally; 64k on NaCl
RuntimeGogoBytes = 80, _PCQuantum = 1
#else _Int64Align = 4
#ifdef GOOS_plan9 )
RuntimeGogoBytes = 80,
#else
RuntimeGogoBytes = 64,
#endif // Plan 9
#endif // Windows
#endif // Solaris
PhysPageSize = 4096,
PCQuantum = 1,
Int64Align = 8
};
...@@ -2,16 +2,14 @@ ...@@ -2,16 +2,14 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
enum { package runtime
thechar = '5',
BigEndian = 0, const (
CacheLineSize = 32, thechar = '6'
RuntimeGogoBytes = 60, _BigEndian = 0
#ifdef GOOS_nacl _CacheLineSize = 64
PhysPageSize = 65536, _RuntimeGogoBytes = 64 + (_Plan9|_Solaris|_Windows)*16
#else _PhysPageSize = 4096
PhysPageSize = 4096, _PCQuantum = 1
#endif _Int64Align = 8
PCQuantum = 4, )
Int64Align = 4
};
...@@ -2,16 +2,14 @@ ...@@ -2,16 +2,14 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
enum { package runtime
thechar = '8',
BigEndian = 0, const (
CacheLineSize = 64, thechar = '5'
RuntimeGogoBytes = 64, _BigEndian = 0
#ifdef GOOS_nacl _CacheLineSize = 32
PhysPageSize = 65536, _RuntimeGogoBytes = 60
#else _PhysPageSize = 65536*_NaCl + 4096*(1-_NaCl)
PhysPageSize = 4096, _PCQuantum = 4
#endif _Int64Align = 4
PCQuantum = 1, )
Int64Align = 4
};
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "runtime.h"
#include "textflag.h"
#pragma textflag NOSPLIT
uint32
runtime·atomicload(uint32 volatile* addr)
{
return *addr;
}
#pragma textflag NOSPLIT
void*
runtime·atomicloadp(void* volatile* addr)
{
return *addr;
}
#pragma textflag NOSPLIT
uint64
runtime·xadd64(uint64 volatile* addr, int64 v)
{
uint64 old;
do
old = *addr;
while(!runtime·cas64(addr, old, old+v));
return old+v;
}
#pragma textflag NOSPLIT
uint64
runtime·xchg64(uint64 volatile* addr, uint64 v)
{
uint64 old;
do
old = *addr;
while(!runtime·cas64(addr, old, v));
return old;
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
// The calls to nop are to keep these functions from being inlined.
// If they are inlined we have no guarantee that later rewrites of the
// code by optimizers will preserve the relative order of memory accesses.
//go:nosplit
func atomicload(ptr *uint32) uint32 {
nop()
return *ptr
}
//go:nosplit
func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer {
nop()
return *(*unsafe.Pointer)(ptr)
}
//go:nosplit
func xadd64(ptr *uint64, delta int64) uint64 {
for {
old := *ptr
if cas64(ptr, old, old+uint64(delta)) {
return old + uint64(delta)
}
}
}
//go:nosplit
func xchg64(ptr *uint64, new uint64) uint64 {
for {
old := *ptr
if cas64(ptr, old, new) {
return old
}
}
}
//go:noescape
func xadd(ptr *uint32, delta int32) uint32
//go:noescape
func xchg(ptr *uint32, new uint32) uint32
// xchgp cannot have a go:noescape annotation, because
// while ptr does not escape, new does. If new is marked as
// not escaping, the compiler will make incorrect escape analysis
// decisions about the value being xchg'ed.
// Instead, make xchgp a wrapper around the actual atomic.
// When calling the wrapper we mark ptr as noescape explicitly.
//go:nosplit
func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
return xchgp1(noescape(ptr), new)
}
func xchgp1(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
//go:noescape
func xchguintptr(ptr *uintptr, new uintptr) uintptr
//go:noescape
func atomicload64(ptr *uint64) uint64
//go:noescape
func atomicor8(ptr *uint8, val uint8)
//go:noescape
func cas64(ptr *uint64, old, new uint64) bool
//go:noescape
func atomicstore(ptr *uint32, val uint32)
//go:noescape
func atomicstore64(ptr *uint64, val uint64)
// atomicstorep cannot have a go:noescape annotation.
// See comment above for xchgp.
//go:nosplit
func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
atomicstorep1(noescape(ptr), new)
}
func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer)
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64 amd64p32
#include "runtime.h"
#include "textflag.h"
#pragma textflag NOSPLIT
uint32
runtime·atomicload(uint32 volatile* addr)
{
return *addr;
}
#pragma textflag NOSPLIT
uint64
runtime·atomicload64(uint64 volatile* addr)
{
return *addr;
}
#pragma textflag NOSPLIT
void*
runtime·atomicloadp(void* volatile* addr)
{
return *addr;
}
// Copyright 2014 The Go Authors. All rights reserved. // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build !arm // +build amd64 amd64p32
package runtime package runtime
import "unsafe" import "unsafe"
// The calls to nop are to keep these functions from being inlined.
// If they are inlined we have no guarantee that later rewrites of the
// code by optimizers will preserve the relative order of memory accesses.
//go:nosplit
func atomicload(ptr *uint32) uint32 {
nop()
return *ptr
}
//go:nosplit
func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer {
nop()
return *(*unsafe.Pointer)(ptr)
}
//go:nosplit
func atomicload64(ptr *uint64) uint64 {
nop()
return *ptr
}
//go:noescape //go:noescape
func xadd(ptr *uint32, delta int32) uint32 func xadd(ptr *uint32, delta int32) uint32
...@@ -20,20 +42,22 @@ func xchg(ptr *uint32, new uint32) uint32 ...@@ -20,20 +42,22 @@ func xchg(ptr *uint32, new uint32) uint32
//go:noescape //go:noescape
func xchg64(ptr *uint64, new uint64) uint64 func xchg64(ptr *uint64, new uint64) uint64
//go:noescape // xchgp cannot have a go:noescape annotation, because
func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer // while ptr does not escape, new does. If new is marked as
// not escaping, the compiler will make incorrect escape analysis
//go:noescape // decisions about the value being xchg'ed.
func xchguintptr(ptr *uintptr, new uintptr) uintptr // Instead, make xchgp a wrapper around the actual atomic.
// When calling the wrapper we mark ptr as noescape explicitly.
//go:noescape //go:nosplit
func atomicload(ptr *uint32) uint32 func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
return xchgp1(noescape(ptr), new)
}
//go:noescape func xchgp1(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
func atomicload64(ptr *uint64) uint64
//go:noescape //go:noescape
func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer func xchguintptr(ptr *uintptr, new uintptr) uintptr
//go:noescape //go:noescape
func atomicor8(ptr *uint8, val uint8) func atomicor8(ptr *uint8, val uint8)
...@@ -47,5 +71,12 @@ func atomicstore(ptr *uint32, val uint32) ...@@ -47,5 +71,12 @@ func atomicstore(ptr *uint32, val uint32)
//go:noescape //go:noescape
func atomicstore64(ptr *uint64, val uint64) func atomicstore64(ptr *uint64, val uint64)
//go:noescape // atomicstorep cannot have a go:noescape annotation.
func atomicstorep(ptr unsafe.Pointer, val unsafe.Pointer) // See comment above for xchgp.
//go:nosplit
func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
atomicstorep1(noescape(ptr), new)
}
func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer)
...@@ -23,12 +23,7 @@ func roundup(p unsafe.Pointer, n uintptr) unsafe.Pointer { ...@@ -23,12 +23,7 @@ func roundup(p unsafe.Pointer, n uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(p) + delta) return unsafe.Pointer(uintptr(p) + delta)
} }
// in runtime.c
func getg() *g func getg() *g
func acquirem() *m
func releasem(mp *m)
func gomcache() *mcache
func readgstatus(*g) uint32 // proc.c
// mcall switches from the g to the g0 stack and invokes fn(g), // mcall switches from the g to the g0 stack and invokes fn(g),
// where g is the goroutine that made the call. // where g is the goroutine that made the call.
...@@ -95,33 +90,6 @@ func badonm() { ...@@ -95,33 +90,6 @@ func badonm() {
gothrow("onM called from signal goroutine") gothrow("onM called from signal goroutine")
} }
// C functions that run on the M stack.
// Call using mcall.
func gosched_m(*g)
func park_m(*g)
func recovery_m(*g)
// More C functions that run on the M stack.
// Call using onM.
func mcacheRefill_m()
func largeAlloc_m()
func gc_m()
func scavenge_m()
func setFinalizer_m()
func removeFinalizer_m()
func markallocated_m()
func unrollgcprog_m()
func unrollgcproginplace_m()
func setgcpercent_m()
func setmaxthreads_m()
func ready_m()
func deferproc_m()
func goexit_m()
func startpanic_m()
func dopanic_m()
func readmemstats_m()
func writeheapdump_m()
// memclr clears n bytes starting at ptr. // memclr clears n bytes starting at ptr.
// in memclr_*.s // in memclr_*.s
//go:noescape //go:noescape
...@@ -132,12 +100,6 @@ func memclr(ptr unsafe.Pointer, n uintptr) ...@@ -132,12 +100,6 @@ func memclr(ptr unsafe.Pointer, n uintptr)
//go:noescape //go:noescape
func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr) func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
func starttheworld()
func stoptheworld()
func newextram()
func lockOSThread()
func unlockOSThread()
// exported value for testing // exported value for testing
var hashLoad = loadFactor var hashLoad = loadFactor
...@@ -159,11 +121,6 @@ func noescape(p unsafe.Pointer) unsafe.Pointer { ...@@ -159,11 +121,6 @@ func noescape(p unsafe.Pointer) unsafe.Pointer {
return unsafe.Pointer(x ^ 0) return unsafe.Pointer(x ^ 0)
} }
func entersyscall()
func reentersyscall(pc uintptr, sp unsafe.Pointer)
func entersyscallblock()
func exitsyscall()
func cgocallback(fn, frame unsafe.Pointer, framesize uintptr) func cgocallback(fn, frame unsafe.Pointer, framesize uintptr)
func gogo(buf *gobuf) func gogo(buf *gobuf)
func gosave(buf *gobuf) func gosave(buf *gobuf)
...@@ -181,20 +138,12 @@ func breakpoint() ...@@ -181,20 +138,12 @@ func breakpoint()
func nanotime() int64 func nanotime() int64
func usleep(usec uint32) func usleep(usec uint32)
// careful: cputicks is not guaranteed to be monotonic! In particular, we have
// noticed drift between cpus on certain os/arch combinations. See issue 8976.
func cputicks() int64
func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
func munmap(addr unsafe.Pointer, n uintptr) func munmap(addr unsafe.Pointer, n uintptr)
func madvise(addr unsafe.Pointer, n uintptr, flags int32) func madvise(addr unsafe.Pointer, n uintptr, flags int32)
func reflectcall(fn, arg unsafe.Pointer, n uint32, retoffset uint32) func reflectcall(fn, arg unsafe.Pointer, n uint32, retoffset uint32)
func osyield()
func procyield(cycles uint32) func procyield(cycles uint32)
func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr) func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr)
func readgogc() int32
func purgecachedstats(c *mcache)
func gostringnocopy(b *byte) string
func goexit() func goexit()
//go:noescape //go:noescape
...@@ -203,8 +152,21 @@ func write(fd uintptr, p unsafe.Pointer, n int32) int32 ...@@ -203,8 +152,21 @@ func write(fd uintptr, p unsafe.Pointer, n int32) int32
//go:noescape //go:noescape
func cas(ptr *uint32, old, new uint32) bool func cas(ptr *uint32, old, new uint32) bool
//go:noescape // casp cannot have a go:noescape annotation, because
func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool // while ptr and old do not escape, new does. If new is marked as
// not escaping, the compiler will make incorrect escape analysis
// decisions about the value being xchg'ed.
// Instead, make casp a wrapper around the actual atomic.
// When calling the wrapper we mark ptr as noescape explicitly.
//go:nosplit
func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
return casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new)
}
func casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
func nop() // call to prevent inlining of function body
//go:noescape //go:noescape
func casuintptr(ptr *uintptr, old, new uintptr) bool func casuintptr(ptr *uintptr, old, new uintptr) bool
...@@ -264,15 +226,10 @@ func asmcgocall_errno(fn, arg unsafe.Pointer) int32 ...@@ -264,15 +226,10 @@ func asmcgocall_errno(fn, arg unsafe.Pointer) int32
//go:noescape //go:noescape
func open(name *byte, mode, perm int32) int32 func open(name *byte, mode, perm int32) int32
//go:noescape // argp used in Defer structs when there is no argp.
func gotraceback(*bool) int32
const _NoArgs = ^uintptr(0) const _NoArgs = ^uintptr(0)
func newstack()
func newproc()
func morestack() func morestack()
func mstart()
func rt0_go() func rt0_go()
// return0 is a stub used to return 0 from deferproc. // return0 is a stub used to return 0 from deferproc.
...@@ -314,3 +271,5 @@ func call134217728(fn, arg unsafe.Pointer, n, retoffset uint32) ...@@ -314,3 +271,5 @@ func call134217728(fn, arg unsafe.Pointer, n, retoffset uint32)
func call268435456(fn, arg unsafe.Pointer, n, retoffset uint32) func call268435456(fn, arg unsafe.Pointer, n, retoffset uint32)
func call536870912(fn, arg unsafe.Pointer, n, retoffset uint32) func call536870912(fn, arg unsafe.Pointer, n, retoffset uint32)
func call1073741824(fn, arg unsafe.Pointer, n, retoffset uint32) func call1073741824(fn, arg unsafe.Pointer, n, retoffset uint32)
func switchtoM()
...@@ -2,34 +2,34 @@ ...@@ -2,34 +2,34 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
#include "runtime.h" package runtime
import "unsafe"
// adjust Gobuf as if it executed a call to fn with context ctxt // adjust Gobuf as if it executed a call to fn with context ctxt
// and then did an immediate Gosave. // and then did an immediate Gosave.
void func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
runtime·gostartcall(Gobuf *gobuf, void (*fn)(void), void *ctxt) if buf.lr != 0 {
{ gothrow("invalid use of gostartcall")
if(gobuf->lr != 0) }
runtime·throw("invalid use of gostartcall"); buf.lr = buf.pc
gobuf->lr = gobuf->pc; buf.pc = uintptr(fn)
gobuf->pc = (uintptr)fn; buf.ctxt = ctxt
gobuf->ctxt = ctxt;
} }
// Called to rewind context saved during morestack back to beginning of function. // Called to rewind context saved during morestack back to beginning of function.
// To help us, the linker emits a jmp back to the beginning right after the // To help us, the linker emits a jmp back to the beginning right after the
// call to morestack. We just have to decode and apply that jump. // call to morestack. We just have to decode and apply that jump.
void func rewindmorestack(buf *gobuf) {
runtime·rewindmorestack(Gobuf *gobuf) var inst uint32
{ if buf.pc&3 == 0 && buf.pc != 0 {
uint32 inst; inst = *(*uint32)(unsafe.Pointer(buf.pc))
if inst>>24 == 0x9a {
inst = *(uint32*)gobuf->pc; buf.pc += uintptr(int32(inst<<8)>>6) + 8
if((gobuf->pc&3) == 0 && (inst>>24) == 0x9a) { return
//runtime·printf("runtime: rewind pc=%p to pc=%p\n", gobuf->pc, gobuf->pc + ((int32)(inst<<8)>>6) + 8); }
gobuf->pc += ((int32)(inst<<8)>>6) + 8;
return;
} }
runtime·printf("runtime: pc=%p %x\n", gobuf->pc, inst);
runtime·throw("runtime: misuse of rewindmorestack"); print("runtime: pc=", hex(buf.pc), " ", hex(inst), "\n")
gothrow("runtime: misuse of rewindmorestack")
} }
...@@ -4,54 +4,51 @@ ...@@ -4,54 +4,51 @@
// +build amd64 amd64p32 386 // +build amd64 amd64p32 386
#include "runtime.h" package runtime
import "unsafe"
// adjust Gobuf as it if executed a call to fn with context ctxt // adjust Gobuf as it if executed a call to fn with context ctxt
// and then did an immediate gosave. // and then did an immediate gosave.
void func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
runtime·gostartcall(Gobuf *gobuf, void (*fn)(void), void *ctxt) sp := buf.sp
{ if regSize > ptrSize {
uintptr *sp; sp -= ptrSize
*(*uintptr)(unsafe.Pointer(sp)) = 0
sp = (uintptr*)gobuf->sp; }
if(sizeof(uintreg) > sizeof(uintptr)) sp -= ptrSize
*--sp = 0; *(*uintptr)(unsafe.Pointer(sp)) = buf.pc
*--sp = (uintptr)gobuf->pc; buf.sp = sp
gobuf->sp = (uintptr)sp; buf.pc = uintptr(fn)
gobuf->pc = (uintptr)fn; buf.ctxt = ctxt
gobuf->ctxt = ctxt;
} }
// Called to rewind context saved during morestack back to beginning of function. // Called to rewind context saved during morestack back to beginning of function.
// To help us, the linker emits a jmp back to the beginning right after the // To help us, the linker emits a jmp back to the beginning right after the
// call to morestack. We just have to decode and apply that jump. // call to morestack. We just have to decode and apply that jump.
void func rewindmorestack(buf *gobuf) {
runtime·rewindmorestack(Gobuf *gobuf) pc := (*[8]byte)(unsafe.Pointer(buf.pc))
{ if pc[0] == 0xe9 { // jmp 4-byte offset
byte *pc; buf.pc = buf.pc + 5 + uintptr(int64(*(*int32)(unsafe.Pointer(&pc[1]))))
return
pc = (byte*)gobuf->pc;
if(pc[0] == 0xe9) { // jmp 4-byte offset
gobuf->pc = gobuf->pc + 5 + *(int32*)(pc+1);
return;
} }
if(pc[0] == 0xeb) { // jmp 1-byte offset if pc[0] == 0xeb { // jmp 1-byte offset
gobuf->pc = gobuf->pc + 2 + *(int8*)(pc+1); buf.pc = buf.pc + 2 + uintptr(int64(*(*int8)(unsafe.Pointer(&pc[1]))))
return; return
} }
if(pc[0] == 0xcc) { if pc[0] == 0xcc {
// This is a breakpoint inserted by gdb. We could use // This is a breakpoint inserted by gdb. We could use
// runtime·findfunc to find the function. But if we // runtime·findfunc to find the function. But if we
// do that, then we will continue execution at the // do that, then we will continue execution at the
// function entry point, and we will not hit the gdb // function entry point, and we will not hit the gdb
// breakpoint. So for this case we don't change // breakpoint. So for this case we don't change
// gobuf->pc, so that when we return we will execute // buf.pc, so that when we return we will execute
// the jump instruction and carry on. This means that // the jump instruction and carry on. This means that
// stack unwinding may not work entirely correctly // stack unwinding may not work entirely correctly
// (http://golang.org/issue/5723) but the user is // (http://golang.org/issue/5723) but the user is
// running under gdb anyhow. // running under gdb anyhow.
return; return
} }
runtime·printf("runtime: pc=%p %x %x %x %x %x\n", pc, pc[0], pc[1], pc[2], pc[3], pc[4]); print("runtime: pc=", pc, " ", hex(pc[0]), " ", hex(pc[1]), " ", hex(pc[2]), " ", hex(pc[3]), " ", hex(pc[4]), "\n")
runtime·throw("runtime: misuse of rewindmorestack"); gothrow("runtime: misuse of rewindmorestack")
} }
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment