Commit 76483877 authored by Austin Clements's avatar Austin Clements

debug/elf: transparently decompress compressed sections

This adds support for compressed ELF sections. This compression is
treated as a framing issue and hence the package APIs all
transparently decompress compressed sections. This requires some
subtlety for (*Section).Open, which returns an io.ReadSeeker: since
the decompressed data comes from an io.Reader, this commit introduces
a Reader-to-ReadSeeker adapter that is efficient for common uses of
Seek and does what it can otherwise.

Fixes #11773.

Change-Id: Ic0cb7255a85cadf4c1d15fb563d5a2e89dbd3c36
Reviewed-on: https://go-review.googlesource.com/17341Reviewed-by: 's avatarRuss Cox <rsc@golang.org>
Run-TryBot: Austin Clements <austin@google.com>
parent e1544d3b
......@@ -411,6 +411,7 @@ const (
SHF_OS_NONCONFORMING SectionFlag = 0x100 /* OS-specific processing required. */
SHF_GROUP SectionFlag = 0x200 /* Member of section group. */
SHF_TLS SectionFlag = 0x400 /* Section contains TLS data. */
SHF_COMPRESSED SectionFlag = 0x800 /* Section is compressed. */
SHF_MASKOS SectionFlag = 0x0ff00000 /* OS-specific semantics. */
SHF_MASKPROC SectionFlag = 0xf0000000 /* Processor-specific semantics. */
)
......@@ -426,11 +427,34 @@ var shfStrings = []intName{
{0x100, "SHF_OS_NONCONFORMING"},
{0x200, "SHF_GROUP"},
{0x400, "SHF_TLS"},
{0x800, "SHF_COMPRESSED"},
}
func (i SectionFlag) String() string { return flagName(uint32(i), shfStrings, false) }
func (i SectionFlag) GoString() string { return flagName(uint32(i), shfStrings, true) }
// Section compression type.
type CompressionType int
const (
COMPRESS_ZLIB CompressionType = 1 /* ZLIB compression. */
COMPRESS_LOOS CompressionType = 0x60000000 /* First OS-specific. */
COMPRESS_HIOS CompressionType = 0x6fffffff /* Last OS-specific. */
COMPRESS_LOPROC CompressionType = 0x70000000 /* First processor-specific type. */
COMPRESS_HIPROC CompressionType = 0x7fffffff /* Last processor-specific type. */
)
var compressionStrings = []intName{
{0, "COMPRESS_ZLIB"},
{0x60000000, "COMPRESS_LOOS"},
{0x6fffffff, "COMPRESS_HIOS"},
{0x70000000, "COMPRESS_LOPROC"},
{0x7fffffff, "COMPRESS_HIPROC"},
}
func (i CompressionType) String() string { return stringName(uint32(i), compressionStrings, false) }
func (i CompressionType) GoString() string { return stringName(uint32(i), compressionStrings, true) }
// Prog.Type
type ProgType int
......@@ -1878,6 +1902,13 @@ type Dyn32 struct {
Val uint32 /* Integer/Address value. */
}
// ELF32 Compression header.
type Chdr32 struct {
Type uint32
Size uint32
Addralign uint32
}
/*
* Relocation entries.
*/
......@@ -1972,6 +2003,14 @@ type Dyn64 struct {
Val uint64 /* Integer/address value */
}
// ELF64 Compression header.
type Chdr64 struct {
Type uint32
Reserved uint32
Size uint64
Addralign uint64
}
/*
* Relocation entries.
*/
......
......@@ -58,6 +58,12 @@ type SectionHeader struct {
Info uint32
Addralign uint64
Entsize uint64
// FileSize is the size of this section in the file in bytes.
// If a section is compressed, FileSize is the size of the
// compressed data, while Size (above) is the size of the
// uncompressed data.
FileSize uint64
}
// A Section represents a single section in an ELF file.
......@@ -70,17 +76,23 @@ type Section struct {
// If a client wants Read and Seek it must use
// Open() to avoid fighting over the seek offset
// with other clients.
//
// ReaderAt may be nil if the section is not easily available
// in a random-access form. For example, a compressed section
// may have a nil ReaderAt.
io.ReaderAt
sr *io.SectionReader
compressionType CompressionType
compressionOffset int64
}
// Data reads and returns the contents of the ELF section.
// Even if the section is stored compressed in the ELF file,
// Data returns uncompressed data.
func (s *Section) Data() ([]byte, error) {
dat := make([]byte, s.sr.Size())
n, err := s.sr.ReadAt(dat, 0)
if n == len(dat) {
err = nil
}
dat := make([]byte, s.Size)
n, err := io.ReadFull(s.Open(), dat)
return dat[0:n], err
}
......@@ -94,7 +106,24 @@ func (f *File) stringTable(link uint32) ([]byte, error) {
}
// Open returns a new ReadSeeker reading the ELF section.
func (s *Section) Open() io.ReadSeeker { return io.NewSectionReader(s.sr, 0, 1<<63-1) }
// Even if the section is stored compressed in the ELF file,
// the ReadSeeker reads uncompressed data.
func (s *Section) Open() io.ReadSeeker {
if s.Flags&SHF_COMPRESSED == 0 {
return io.NewSectionReader(s.sr, 0, 1<<63-1)
}
if s.compressionType == COMPRESS_ZLIB {
return &readSeekerFromReader{
reset: func() (io.Reader, error) {
fr := io.NewSectionReader(s.sr, s.compressionOffset, int64(s.FileSize)-s.compressionOffset)
return zlib.NewReader(fr)
},
size: int64(s.Size),
}
}
err := &FormatError{int64(s.Offset), "unknown compression type", s.compressionType}
return errorReader{err}
}
// A ProgHeader represents a single ELF program header.
type ProgHeader struct {
......@@ -344,7 +373,7 @@ func NewFile(r io.ReaderAt) (*File, error) {
Flags: SectionFlag(sh.Flags),
Addr: uint64(sh.Addr),
Offset: uint64(sh.Off),
Size: uint64(sh.Size),
FileSize: uint64(sh.Size),
Link: uint32(sh.Link),
Info: uint32(sh.Info),
Addralign: uint64(sh.Addralign),
......@@ -360,7 +389,7 @@ func NewFile(r io.ReaderAt) (*File, error) {
Type: SectionType(sh.Type),
Flags: SectionFlag(sh.Flags),
Offset: uint64(sh.Off),
Size: uint64(sh.Size),
FileSize: uint64(sh.Size),
Addr: uint64(sh.Addr),
Link: uint32(sh.Link),
Info: uint32(sh.Info),
......@@ -368,8 +397,35 @@ func NewFile(r io.ReaderAt) (*File, error) {
Entsize: uint64(sh.Entsize),
}
}
s.sr = io.NewSectionReader(r, int64(s.Offset), int64(s.Size))
s.ReaderAt = s.sr
s.sr = io.NewSectionReader(r, int64(s.Offset), int64(s.FileSize))
if s.Flags&SHF_COMPRESSED == 0 {
s.ReaderAt = s.sr
s.Size = s.FileSize
} else {
// Read the compression header.
switch f.Class {
case ELFCLASS32:
ch := new(Chdr32)
if err := binary.Read(s.sr, f.ByteOrder, ch); err != nil {
return nil, err
}
s.compressionType = CompressionType(ch.Type)
s.Size = uint64(ch.Size)
s.Addralign = uint64(ch.Addralign)
s.compressionOffset = int64(binary.Size(ch))
case ELFCLASS64:
ch := new(Chdr64)
if err := binary.Read(s.sr, f.ByteOrder, ch); err != nil {
return nil, err
}
s.compressionType = CompressionType(ch.Type)
s.Size = ch.Size
s.Addralign = ch.Addralign
s.compressionOffset = int64(binary.Size(ch))
}
}
f.Sections[i] = s
}
......
This diff is collapsed.
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package elf
import (
"io"
"os"
)
// errorReader returns error from all operations.
type errorReader struct {
error
}
func (r errorReader) Read(p []byte) (n int, err error) {
return 0, r.error
}
func (r errorReader) ReadAt(p []byte, off int64) (n int, err error) {
return 0, r.error
}
func (r errorReader) Seek(offset int64, whence int) (int64, error) {
return 0, r.error
}
func (r errorReader) Close() error {
return r.error
}
// readSeekerFromReader converts an io.Reader into an io.ReadSeeker.
// In general Seek may not be efficient, but it is optimized for
// common cases such as seeking to the end to find the length of the
// data.
type readSeekerFromReader struct {
reset func() (io.Reader, error)
r io.Reader
size int64
offset int64
}
func (r *readSeekerFromReader) start() {
x, err := r.reset()
if err != nil {
r.r = errorReader{err}
} else {
r.r = x
}
r.offset = 0
}
func (r *readSeekerFromReader) Read(p []byte) (n int, err error) {
if r.r == nil {
r.start()
}
n, err = r.r.Read(p)
r.offset += int64(n)
return n, err
}
func (r *readSeekerFromReader) Seek(offset int64, whence int) (int64, error) {
var newOffset int64
switch whence {
case 0:
newOffset = offset
case 1:
newOffset = r.offset + offset
case 2:
newOffset = r.size + offset
default:
return 0, os.ErrInvalid
}
switch {
case newOffset == r.offset:
return newOffset, nil
case newOffset < 0, newOffset > r.size:
return 0, os.ErrInvalid
case newOffset == 0:
r.r = nil
case newOffset == r.size:
r.r = errorReader{io.EOF}
default:
if newOffset < r.offset {
// Restart at the beginning.
r.start()
}
// Read until we reach offset.
var buf [512]byte
for r.offset < newOffset {
b := buf[:]
if newOffset-r.offset < int64(len(buf)) {
b = buf[:newOffset-r.offset]
}
if _, err := r.Read(b); err != nil {
return 0, err
}
}
}
r.offset = newOffset
return r.offset, nil
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment