Commit f86fd782 authored by brian@zim.(none)'s avatar brian@zim.(none)

Merge baker@bk-internal.mysql.com:/home/bk/mysql-5.1-new

into  zim.(none):/home/brian/mysql/archive-5.1
parents 68076249 3ee76a3e
......@@ -2438,7 +2438,7 @@ MYSQL_STORAGE_ENGINE(berkeley,,berkeley-db,,,,storage/bdb,,,[
MYSQL_STORAGE_ENGINE(example,,,,,no,storage/example,,,[
AC_CONFIG_FILES(storage/example/Makefile)
])
MYSQL_STORAGE_ENGINE(archive)
MYSQL_STORAGE_ENGINE(archive,,,,,,storage/archive)
dnl MYSQL_STORAGE_ENGINE(csv,,,,,tina_hton,,ha_tina.o)
MYSQL_STORAGE_ENGINE(csv,,,,,no,storage/csv,,,[
AC_CONFIG_FILES(storage/csv/Makefile)
......@@ -2564,6 +2564,7 @@ AC_SUBST(MAKE_BINARY_DISTRIBUTION_OPTIONS)
AC_CONFIG_FILES(Makefile extra/Makefile mysys/Makefile dnl
strings/Makefile regex/Makefile storage/Makefile storage/heap/Makefile dnl
storage/myisam/Makefile storage/myisammrg/Makefile dnl
storage/archive/Makefile dnl
os2/Makefile os2/include/Makefile os2/include/sys/Makefile dnl
man/Makefile BUILD/Makefile vio/Makefile dnl
libmysql/Makefile client/Makefile dnl
......
......@@ -83,6 +83,7 @@ sql_yacc.cc sql_yacc.h: $(top_srcdir)/sql/sql_yacc.yy
INC_LIB= $(top_builddir)/regex/libregex.a \
$(top_builddir)/storage/myisam/libmyisam.a \
$(top_builddir)/storage/myisammrg/libmyisammrg.a \
$(top_builddir)/storage/archive/libarchive.a \
$(top_builddir)/storage/heap/libheap.a \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/strings/libmystrings.a \
......
......@@ -32,6 +32,7 @@ bin_PROGRAMS = mysql_tzinfo_to_sql
gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@
LDADD = $(top_builddir)/storage/myisam/libmyisam.a \
$(top_builddir)/storage/myisammrg/libmyisammrg.a \
$(top_builddir)/storage/archive/libarchive.a \
$(top_builddir)/storage/heap/libheap.a \
$(top_builddir)/vio/libvio.a \
$(top_builddir)/mysys/libmysys.a \
......
......@@ -30,13 +30,13 @@
a storage engine without indexes that could compress data very well.
So, welcome to a completely compressed storage engine. This storage
engine only does inserts. No replace, deletes, or updates. All reads are
complete table scans. Compression is done through gzip (bzip compresses
complete table scans. Compression is done through azip (bzip compresses
better, but only marginally, if someone asks I could add support for
it too, but beaware that it costs a lot more in CPU time then gzip).
it too, but beaware that it costs a lot more in CPU time then azip).
We keep a file pointer open for each instance of ha_archive for each read
but for writes we keep one open file handle just for that. We flush it
only if we have a read occur. gzip handles compressing lots of records
only if we have a read occur. azip handles compressing lots of records
at once much better then doing lots of little records between writes.
It is possible to not lock on writes but this would then mean we couldn't
handle bulk inserts as well (that is if someone was trying to read at
......@@ -84,7 +84,7 @@
Add truncate table command.
Implement versioning, should be easy.
Allow for errors, find a way to mark bad rows.
Talk to the gzip guys, come up with a writable format so that updates are doable
Talk to the azip guys, come up with a writable format so that updates are doable
without switching to a block method.
Add optional feature so that rows can be flushed at interval (which will cause less
compression but may speed up ordered searches).
......@@ -254,15 +254,15 @@ ha_archive::ha_archive(TABLE_SHARE *table_arg)
/*
This method reads the header of a datafile and returns whether or not it was successful.
*/
int ha_archive::read_data_header(gzFile file_to_read)
int ha_archive::read_data_header(azio_stream *file_to_read)
{
uchar data_buffer[DATA_BUFFER_SIZE];
DBUG_ENTER("ha_archive::read_data_header");
if (gzrewind(file_to_read) == -1)
if (azrewind(file_to_read) == -1)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
if (gzread(file_to_read, data_buffer, DATA_BUFFER_SIZE) != DATA_BUFFER_SIZE)
if (azread(file_to_read, data_buffer, DATA_BUFFER_SIZE) != DATA_BUFFER_SIZE)
DBUG_RETURN(errno ? errno : -1);
DBUG_PRINT("ha_archive::read_data_header", ("Check %u", data_buffer[0]));
......@@ -278,7 +278,7 @@ int ha_archive::read_data_header(gzFile file_to_read)
/*
This method writes out the header of a datafile and returns whether or not it was successful.
*/
int ha_archive::write_data_header(gzFile file_to_write)
int ha_archive::write_data_header(azio_stream *file_to_write)
{
uchar data_buffer[DATA_BUFFER_SIZE];
DBUG_ENTER("ha_archive::write_data_header");
......@@ -286,7 +286,7 @@ int ha_archive::write_data_header(gzFile file_to_write)
data_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
data_buffer[1]= (uchar)ARCHIVE_VERSION;
if (gzwrite(file_to_write, &data_buffer, DATA_BUFFER_SIZE) !=
if (azwrite(file_to_write, &data_buffer, DATA_BUFFER_SIZE) !=
DATA_BUFFER_SIZE)
goto error;
DBUG_PRINT("ha_archive::write_data_header", ("Check %u", (uint)data_buffer[0]));
......@@ -425,8 +425,11 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
a gzip file that can be both read and written we keep a writer open
that is shared amoung all open tables.
*/
if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
if (!(azopen(&(share->archive_write), share->data_file_name, O_WRONLY|O_APPEND|O_BINARY)))
{
DBUG_PRINT("info", ("Could not open archive write file"));
share->crashed= TRUE;
}
VOID(my_hash_insert(&archive_open_tables, (byte*) share));
thr_lock_init(&share->lock);
}
......@@ -451,7 +454,7 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
thr_lock_delete(&share->lock);
VOID(pthread_mutex_destroy(&share->mutex));
(void)write_meta_file(share->meta_file, share->rows_recorded, FALSE);
if (gzclose(share->archive_write) == Z_ERRNO)
if (azclose(&(share->archive_write)))
rc= 1;
if (my_close(share->meta_file, MYF(0)))
rc= 1;
......@@ -492,7 +495,7 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked)
DBUG_RETURN(HA_ERR_OUT_OF_MEM); // Not handled well by calling code!
thr_lock_data_init(&share->lock,&lock,NULL);
if ((archive= gzopen(share->data_file_name, "rb")) == NULL)
if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY)))
{
if (errno == EROFS || errno == EACCES)
DBUG_RETURN(my_errno= errno);
......@@ -526,7 +529,7 @@ int ha_archive::close(void)
DBUG_ENTER("ha_archive::close");
/* First close stream */
if (gzclose(archive) == Z_ERRNO)
if (azclose(&archive))
rc= 1;
/* then also close share */
rc|= free_share(share);
......@@ -572,18 +575,18 @@ int ha_archive::create(const char *name, TABLE *table_arg,
error= my_errno;
goto error;
}
if ((archive= gzdopen(create_file, "wb")) == NULL)
if (!azdopen(&archive, create_file, O_WRONLY|O_BINARY))
{
error= errno;
goto error2;
}
if (write_data_header(archive))
if (write_data_header(&archive))
{
error= errno;
goto error3;
}
if (gzclose(archive))
if (azclose(&archive))
{
error= errno;
goto error2;
......@@ -594,8 +597,8 @@ int ha_archive::create(const char *name, TABLE *table_arg,
DBUG_RETURN(0);
error3:
/* We already have an error, so ignore results of gzclose. */
(void)gzclose(archive);
/* We already have an error, so ignore results of azclose. */
(void)azclose(&archive);
error2:
my_close(create_file, MYF(0));
delete_table(name);
......@@ -607,13 +610,13 @@ error:
/*
This is where the actual row is written out.
*/
int ha_archive::real_write_row(byte *buf, gzFile writer)
int ha_archive::real_write_row(byte *buf, azio_stream *writer)
{
z_off_t written;
uint *ptr, *end;
DBUG_ENTER("ha_archive::real_write_row");
written= gzwrite(writer, buf, table->s->reclength);
written= azwrite(writer, buf, table->s->reclength);
DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %d", written, table->s->reclength));
if (!delayed_insert || !bulk_insert)
share->dirty= TRUE;
......@@ -634,7 +637,7 @@ int ha_archive::real_write_row(byte *buf, gzFile writer)
if (size)
{
((Field_blob*) table->field[*ptr])->get_ptr(&data_ptr);
written= gzwrite(writer, data_ptr, (unsigned)size);
written= azwrite(writer, data_ptr, (unsigned)size);
if (written != (z_off_t)size)
DBUG_RETURN(errno ? errno : -1);
}
......@@ -665,7 +668,7 @@ int ha_archive::write_row(byte *buf)
table->timestamp_field->set_time();
pthread_mutex_lock(&share->mutex);
share->rows_recorded++;
rc= real_write_row(buf, share->archive_write);
rc= real_write_row(buf, &(share->archive_write));
pthread_mutex_unlock(&share->mutex);
DBUG_RETURN(rc);
......@@ -692,20 +695,20 @@ int ha_archive::rnd_init(bool scan)
/*
If dirty, we lock, and then reset/flush the data.
I found that just calling gzflush() doesn't always work.
I found that just calling azflush() doesn't always work.
*/
if (share->dirty == TRUE)
{
pthread_mutex_lock(&share->mutex);
if (share->dirty == TRUE)
{
gzflush(share->archive_write, Z_SYNC_FLUSH);
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->dirty= FALSE;
}
pthread_mutex_unlock(&share->mutex);
}
if (read_data_header(archive))
if (read_data_header(&archive))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
}
......@@ -717,15 +720,15 @@ int ha_archive::rnd_init(bool scan)
This is the method that is used to read a row. It assumes that the row is
positioned where you want it.
*/
int ha_archive::get_row(gzFile file_to_read, byte *buf)
int ha_archive::get_row(azio_stream *file_to_read, byte *buf)
{
int read; // Bytes read, gzread() returns int
int read; // Bytes read, azread() returns int
uint *ptr, *end;
char *last;
size_t total_blob_length= 0;
DBUG_ENTER("ha_archive::get_row");
read= gzread(file_to_read, buf, table->s->reclength);
read= azread(file_to_read, buf, table->s->reclength);
DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, table->s->reclength));
if (read == Z_STREAM_ERROR)
......@@ -760,7 +763,7 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf)
size_t size= ((Field_blob*) table->field[*ptr])->get_length();
if (size)
{
read= gzread(file_to_read, last, size);
read= azread(file_to_read, last, size);
if ((size_t) read != size)
DBUG_RETURN(HA_ERR_END_OF_FILE);
((Field_blob*) table->field[*ptr])->set_ptr(size, last);
......@@ -790,8 +793,8 @@ int ha_archive::rnd_next(byte *buf)
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
&LOCK_status);
current_position= gztell(archive);
rc= get_row(archive, buf);
current_position= aztell(&archive);
rc= get_row(&archive, buf);
if (rc != HA_ERR_END_OF_FILE)
......@@ -828,9 +831,9 @@ int ha_archive::rnd_pos(byte * buf, byte *pos)
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
&LOCK_status);
current_position= (z_off_t)my_get_ptr(pos, ref_length);
(void)gzseek(archive, current_position, SEEK_SET);
(void)azseek(&archive, current_position, SEEK_SET);
DBUG_RETURN(get_row(archive, buf));
DBUG_RETURN(get_row(&archive, buf));
}
/*
......@@ -859,17 +862,17 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
{
DBUG_ENTER("ha_archive::optimize");
int rc;
gzFile writer;
azio_stream writer;
char writer_filename[FN_REFLEN];
/* Flush any waiting data */
gzflush(share->archive_write, Z_SYNC_FLUSH);
azflush(&(share->archive_write), Z_SYNC_FLUSH);
/* Lets create a file to contain the new data */
fn_format(writer_filename, share->table_name, "", ARN,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
if ((writer= gzopen(writer_filename, "wb")) == NULL)
if (!(azopen(&writer, writer_filename, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
/*
......@@ -879,6 +882,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
if (check_opt->flags == T_EXTEND)
{
DBUG_PRINT("info", ("archive extended rebuild"));
byte *buf;
/*
......@@ -895,14 +899,14 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
Now we will rewind the archive file so that we are positioned at the
start of the file.
*/
rc= read_data_header(archive);
rc= read_data_header(&archive);
/*
Assuming now error from rewinding the archive file, we now write out the
new header for out data file.
*/
if (!rc)
rc= write_data_header(writer);
rc= write_data_header(&writer);
/*
On success of writing out the new header, we now fetch each row and
......@@ -911,9 +915,9 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
if (!rc)
{
share->rows_recorded= 0;
while (!(rc= get_row(archive, buf)))
while (!(rc= get_row(&archive, buf)))
{
real_write_row(buf, writer);
real_write_row(buf, &writer);
share->rows_recorded++;
}
}
......@@ -924,31 +928,31 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
}
else
{
DBUG_PRINT("info", ("archive quick rebuild"));
/*
The quick method is to just read the data raw, and then compress it directly.
*/
int read; // Bytes read, gzread() returns int
int read; // Bytes read, azread() returns int
char block[IO_SIZE];
if (gzrewind(archive) == -1)
if (azrewind(&archive) == -1)
{
rc= HA_ERR_CRASHED_ON_USAGE;
DBUG_PRINT("info", ("archive HA_ERR_CRASHED_ON_USAGE"));
goto error;
}
while ((read= gzread(archive, block, IO_SIZE)))
gzwrite(writer, block, read);
while ((read= azread(&archive, block, IO_SIZE)))
azwrite(&writer, block, read);
}
gzflush(writer, Z_SYNC_FLUSH);
gzclose(share->archive_write);
share->archive_write= writer;
azclose(&writer);
my_rename(writer_filename,share->data_file_name,MYF(0));
DBUG_RETURN(0);
error:
gzclose(writer);
azclose(&writer);
DBUG_RETURN(rc);
}
......@@ -1090,7 +1094,7 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
thd->proc_info= "Checking table";
/* Flush any waiting data */
gzflush(share->archive_write, Z_SYNC_FLUSH);
azflush(&(share->archive_write), Z_SYNC_FLUSH);
/*
First we create a buffer that we can use for reading rows, and can pass
......@@ -1104,10 +1108,10 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
start of the file.
*/
if (!rc)
read_data_header(archive);
read_data_header(&archive);
if (!rc)
while (!(rc= get_row(archive, buf)))
while (!(rc= get_row(&archive, buf)))
count--;
my_free((char*)buf, MYF(0));
......
......@@ -19,6 +19,7 @@
#endif
#include <zlib.h>
#include "../storage/archive/azlib.h"
/*
Please read ha_archive.cc first. If you are looking for more general
......@@ -33,7 +34,7 @@ typedef struct st_archive_share {
pthread_mutex_t mutex;
THR_LOCK lock;
File meta_file; /* Meta file we use */
gzFile archive_write; /* Archive file we are working with */
azio_stream archive_write; /* Archive file we are working with */
bool dirty; /* Flag for if a flush should occur */
bool crashed; /* Meta file is crashed */
ha_rows rows_recorded; /* Number of rows in tables */
......@@ -49,7 +50,7 @@ class ha_archive: public handler
{
THR_LOCK_DATA lock; /* MySQL lock */
ARCHIVE_SHARE *share; /* Shared lock info */
gzFile archive; /* Archive file we are working with */
azio_stream archive; /* Archive file we are working with */
z_off_t current_position; /* The position of the row we just read */
byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */
String buffer; /* Buffer used for blob storage */
......@@ -77,19 +78,19 @@ public:
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(byte * buf);
int real_write_row(byte *buf, gzFile writer);
int real_write_row(byte *buf, azio_stream *writer);
int delete_all_rows();
int rnd_init(bool scan=1);
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
int get_row(gzFile file_to_read, byte *buf);
int get_row(azio_stream *file_to_read, byte *buf);
int read_meta_file(File meta_file, ha_rows *rows);
int write_meta_file(File meta_file, ha_rows rows, bool dirty);
ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table);
int free_share(ARCHIVE_SHARE *share);
bool auto_repair() const { return 1; } // For the moment we just do this
int read_data_header(gzFile file_to_read);
int write_data_header(gzFile file_to_write);
int read_data_header(azio_stream *file_to_read);
int write_data_header(azio_stream *file_to_write);
void position(const byte *record);
void info(uint);
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
......
......@@ -21,7 +21,7 @@ AUTOMAKE_OPTIONS = foreign
# These are built from source in the Docs directory
EXTRA_DIST =
SUBDIRS =
DIST_SUBDIRS = . csv example bdb heap innobase myisam myisammrg ndb
DIST_SUBDIRS = . csv example bdb heap innobase myisam myisammrg ndb archive
# Don't update the files from bitkeeper
%::SCCS/s.%
# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include \
@ZLIB_INCLUDES@
LDADD = libarchive.a $(top_srcdir)/mysys/libmysys.a \
$(top_srcdir)/dbug/libdbug.a \
$(top_srcdir)/strings/libmystrings.a \
@ZLIB_LIBS@
pkglib_LIBRARIES = libarchive.a
noinst_PROGRAMS = archive_test
archive_test_LDFLAGS = @NOINST_LDFLAGS@
noinst_HEADERS = azlib.h
libarchive_a_SOURCES = azio.c
# Don't update the files from bitkeeper
%::SCCS/s.%
#include <stdio.h>
#include <azlib.h>
#define TEST_STRING "This is a test"
#define BUFFER_LEN 1024
int main(int argc, char *argv[])
{
int ret;
azio_stream foo, foo1;
char buffer[BUFFER_LEN];
MY_INIT(argv[0]);
if (!(ret= azopen(&foo, "test", O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)))
{
printf("Could not create test file\n");
return 0;
}
azwrite(&foo, TEST_STRING, sizeof(TEST_STRING));
azflush(&foo, Z_FINISH);
if (!(ret= azopen(&foo1, "test", O_RDONLY|O_BINARY)))
{
printf("Could not open test file\n");
return 0;
}
ret= azread(&foo1, buffer, BUFFER_LEN);
printf("Read %d bytes\n", ret);
printf("%s\n", buffer);
azrewind(&foo1);
azclose(&foo);
if (!(ret= azopen(&foo, "test", O_APPEND|O_WRONLY|O_BINARY)))
{
printf("Could not create test file\n");
return 0;
}
azwrite(&foo, TEST_STRING, sizeof(TEST_STRING));
azflush(&foo, Z_FINISH);
ret= azread(&foo1, buffer, BUFFER_LEN);
printf("Read %d bytes\n", ret);
printf("%s\n", buffer);
azclose(&foo);
azclose(&foo1);
//unlink("test");
return 0;
}
/*
azio is a modified version of gzio. It makes use of mysys and removes mallocs.
*/
/* gzio.c -- IO on .gz files
* Copyright (C) 1995-2005 Jean-loup Gailly.
* For conditions of distribution and use, see copyright notice in zlib.h
*
* Compile this file with -DNO_GZCOMPRESS to avoid the compression code.
*/
/* @(#) $Id$ */
#include <stdio.h>
#include "zutil.h"
#include "azlib.h"
static int const gz_magic[2] = {0x1f, 0x8b}; /* gzip magic header */
/* gzip flag byte */
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
#define HEAD_CRC 0x02 /* bit 1 set: header CRC present */
#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
#define COMMENT 0x10 /* bit 4 set: file comment present */
#define RESERVED 0xE0 /* bits 5..7: reserved */
local int az_open(azio_stream *s, const char *path, int Flags, File fd);
local int do_flush(azio_stream *file, int flush);
local int get_byte(azio_stream *s);
local void check_header(azio_stream *s);
local int destroy(azio_stream *s);
local void putLong(File file, uLong x);
local uLong getLong(azio_stream *s);
/* ===========================================================================
Opens a gzip (.gz) file for reading or writing. The mode parameter
is as in fopen ("rb" or "wb"). The file is given either by file descriptor
or path name (if fd == -1).
az_open returns NULL if the file could not be opened or if there was
insufficient memory to allocate the (de)compression state; errno
can be checked to distinguish the two cases (if errno is zero, the
zlib error is Z_MEM_ERROR).
*/
local int az_open (azio_stream *s, const char *path, int Flags, File fd)
{
int err;
int level = Z_DEFAULT_COMPRESSION; /* compression level */
int strategy = Z_DEFAULT_STRATEGY; /* compression strategy */
s->stream.zalloc = (alloc_func)0;
s->stream.zfree = (free_func)0;
s->stream.opaque = (voidpf)0;
zmemzero(s->inbuf, Z_BUFSIZE);
zmemzero(s->outbuf, Z_BUFSIZE);
s->stream.next_in = s->inbuf;
s->stream.next_out = s->outbuf;
s->stream.avail_in = s->stream.avail_out = 0;
s->z_err = Z_OK;
s->z_eof = 0;
s->in = 0;
s->out = 0;
s->back = EOF;
s->crc = crc32(0L, Z_NULL, 0);
s->transparent = 0;
s->mode = 'r';
if (Flags & O_WRONLY || Flags & O_APPEND)
s->mode = 'w';
if (s->mode == 'w') {
#ifdef NO_GZCOMPRESS
err = Z_STREAM_ERROR;
#else
err = deflateInit2(&(s->stream), level,
Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL, strategy);
/* windowBits is passed < 0 to suppress zlib header */
s->stream.next_out = s->outbuf;
#endif
if (err != Z_OK)
{
destroy(s);
return Z_NULL;
}
} else {
s->stream.next_in = s->inbuf;
err = inflateInit2(&(s->stream), -MAX_WBITS);
/* windowBits is passed < 0 to tell that there is no zlib header.
* Note that in this case inflate *requires* an extra "dummy" byte
* after the compressed stream in order to complete decompression and
* return Z_STREAM_END. Here the gzip CRC32 ensures that 4 bytes are
* present after the compressed stream.
*/
if (err != Z_OK)
{
destroy(s);
return Z_NULL;
}
}
s->stream.avail_out = Z_BUFSIZE;
errno = 0;
s->file = fd < 0 ? my_open(path, Flags, MYF(0)) : fd;
if (s->file < 0 )
{
destroy(s);
return Z_NULL;
}
if (s->mode == 'w') {
char buffer[10];
/* Write a very simple .gz header:
*/
snprintf(buffer, 10, "%c%c%c%c%c%c%c%c%c%c", gz_magic[0], gz_magic[1],
Z_DEFLATED, 0 /*flags*/, 0,0,0,0 /*time*/, 0 /*xflags*/, OS_CODE);
s->start = 10L;
my_write(s->file, buffer, s->start, MYF(0));
/* We use 10L instead of ftell(s->file) to because ftell causes an
* fflush on some systems. This version of the library doesn't use
* start anyway in write mode, so this initialization is not
* necessary.
*/
} else {
check_header(s); /* skip the .gz header */
s->start = my_tell(s->file, MYF(0)) - s->stream.avail_in;
}
return 1;
}
/* ===========================================================================
Opens a gzip (.gz) file for reading or writing.
*/
int azopen(azio_stream *s, const char *path, int Flags)
{
return az_open(s, path, Flags, -1);
}
/* ===========================================================================
Associate a gzFile with the file descriptor fd. fd is not dup'ed here
to mimic the behavio(u)r of fdopen.
*/
int azdopen(azio_stream *s, File fd, int Flags)
{
if (fd < 0) return 0;
return az_open (s, NULL, Flags, fd);
}
/* ===========================================================================
Read a byte from a azio_stream; update next_in and avail_in. Return EOF
for end of file.
IN assertion: the stream s has been sucessfully opened for reading.
*/
local int get_byte(s)
azio_stream *s;
{
if (s->z_eof) return EOF;
if (s->stream.avail_in == 0)
{
errno = 0;
s->stream.avail_in = my_read(s->file, (byte *)s->inbuf, Z_BUFSIZE, MYF(0));
if (s->stream.avail_in == 0)
{
s->z_eof = 1;
// if (ferror(s->file)) s->z_err = Z_ERRNO;
return EOF;
}
s->stream.next_in = s->inbuf;
}
s->stream.avail_in--;
return *(s->stream.next_in)++;
}
/* ===========================================================================
Check the gzip header of a azio_stream opened for reading. Set the stream
mode to transparent if the gzip magic header is not present; set s->err
to Z_DATA_ERROR if the magic header is present but the rest of the header
is incorrect.
IN assertion: the stream s has already been created sucessfully;
s->stream.avail_in is zero for the first time, but may be non-zero
for concatenated .gz files.
*/
local void check_header(azio_stream *s)
{
int method; /* method byte */
int flags; /* flags byte */
uInt len;
int c;
/* Assure two bytes in the buffer so we can peek ahead -- handle case
where first byte of header is at the end of the buffer after the last
gzip segment */
len = s->stream.avail_in;
if (len < 2) {
if (len) s->inbuf[0] = s->stream.next_in[0];
errno = 0;
len = (uInt)my_read(s->file, (byte *)s->inbuf + len, Z_BUFSIZE >> len, MYF(0));
if (len == 0) s->z_err = Z_ERRNO;
s->stream.avail_in += len;
s->stream.next_in = s->inbuf;
if (s->stream.avail_in < 2) {
s->transparent = s->stream.avail_in;
return;
}
}
/* Peek ahead to check the gzip magic header */
if (s->stream.next_in[0] != gz_magic[0] ||
s->stream.next_in[1] != gz_magic[1]) {
s->transparent = 1;
return;
}
s->stream.avail_in -= 2;
s->stream.next_in += 2;
/* Check the rest of the gzip header */
method = get_byte(s);
flags = get_byte(s);
if (method != Z_DEFLATED || (flags & RESERVED) != 0) {
s->z_err = Z_DATA_ERROR;
return;
}
/* Discard time, xflags and OS code: */
for (len = 0; len < 6; len++) (void)get_byte(s);
if ((flags & EXTRA_FIELD) != 0) { /* skip the extra field */
len = (uInt)get_byte(s);
len += ((uInt)get_byte(s))<<8;
/* len is garbage if EOF but the loop below will quit anyway */
while (len-- != 0 && get_byte(s) != EOF) ;
}
if ((flags & ORIG_NAME) != 0) { /* skip the original file name */
while ((c = get_byte(s)) != 0 && c != EOF) ;
}
if ((flags & COMMENT) != 0) { /* skip the .gz file comment */
while ((c = get_byte(s)) != 0 && c != EOF) ;
}
if ((flags & HEAD_CRC) != 0) { /* skip the header crc */
for (len = 0; len < 2; len++) (void)get_byte(s);
}
s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK;
}
/* ===========================================================================
* Cleanup then free the given azio_stream. Return a zlib error code.
Try freeing in the reverse order of allocations.
*/
local int destroy (s)
azio_stream *s;
{
int err = Z_OK;
if (s->stream.state != NULL) {
if (s->mode == 'w') {
#ifdef NO_GZCOMPRESS
err = Z_STREAM_ERROR;
#else
err = deflateEnd(&(s->stream));
#endif
}
else if (s->mode == 'r')
{
err = inflateEnd(&(s->stream));
}
}
if (s->file < 0 && my_close(s->file, MYF(0)))
{
#ifdef ESPIPE
if (errno != ESPIPE) /* fclose is broken for pipes in HP/UX */
#endif
err = Z_ERRNO;
}
if (s->z_err < 0) err = s->z_err;
return err;
}
/* ===========================================================================
Reads the given number of uncompressed bytes from the compressed file.
azread returns the number of bytes actually read (0 for end of file).
*/
int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned len)
{
Bytef *start = (Bytef*)buf; /* starting point for crc computation */
Byte *next_out; /* == stream.next_out but not forced far (for MSDOS) */
if (s->mode != 'r') return Z_STREAM_ERROR;
if (s->z_err == Z_DATA_ERROR || s->z_err == Z_ERRNO) return -1;
if (s->z_err == Z_STREAM_END) return 0; /* EOF */
next_out = (Byte*)buf;
s->stream.next_out = (Bytef*)buf;
s->stream.avail_out = len;
if (s->stream.avail_out && s->back != EOF) {
*next_out++ = s->back;
s->stream.next_out++;
s->stream.avail_out--;
s->back = EOF;
s->out++;
start++;
if (s->last) {
s->z_err = Z_STREAM_END;
return 1;
}
}
while (s->stream.avail_out != 0) {
if (s->transparent) {
/* Copy first the lookahead bytes: */
uInt n = s->stream.avail_in;
if (n > s->stream.avail_out) n = s->stream.avail_out;
if (n > 0) {
zmemcpy(s->stream.next_out, s->stream.next_in, n);
next_out += n;
s->stream.next_out = (Bytef *)next_out;
s->stream.next_in += n;
s->stream.avail_out -= n;
s->stream.avail_in -= n;
}
if (s->stream.avail_out > 0)
{
s->stream.avail_out -=
(uInt)my_read(s->file, (byte *)next_out, s->stream.avail_out, MYF(0));
}
len -= s->stream.avail_out;
s->in += len;
s->out += len;
if (len == 0) s->z_eof = 1;
return (int)len;
}
if (s->stream.avail_in == 0 && !s->z_eof) {
errno = 0;
s->stream.avail_in = (uInt)my_read(s->file, (byte *)s->inbuf, Z_BUFSIZE, MYF(0));
if (s->stream.avail_in == 0)
{
s->z_eof = 1;
}
s->stream.next_in = (Bytef *)s->inbuf;
}
s->in += s->stream.avail_in;
s->out += s->stream.avail_out;
s->z_err = inflate(&(s->stream), Z_NO_FLUSH);
s->in -= s->stream.avail_in;
s->out -= s->stream.avail_out;
if (s->z_err == Z_STREAM_END) {
/* Check CRC and original size */
s->crc = crc32(s->crc, start, (uInt)(s->stream.next_out - start));
start = s->stream.next_out;
if (getLong(s) != s->crc) {
s->z_err = Z_DATA_ERROR;
} else {
(void)getLong(s);
/* The uncompressed length returned by above getlong() may be
* different from s->out in case of concatenated .gz files.
* Check for such files:
*/
check_header(s);
if (s->z_err == Z_OK) {
inflateReset(&(s->stream));
s->crc = crc32(0L, Z_NULL, 0);
}
}
}
if (s->z_err != Z_OK || s->z_eof) break;
}
s->crc = crc32(s->crc, start, (uInt)(s->stream.next_out - start));
if (len == s->stream.avail_out &&
(s->z_err == Z_DATA_ERROR || s->z_err == Z_ERRNO))
return -1;
return (int)(len - s->stream.avail_out);
}
#ifndef NO_GZCOMPRESS
/* ===========================================================================
Writes the given number of uncompressed bytes into the compressed file.
azwrite returns the number of bytes actually written (0 in case of error).
*/
int azwrite (azio_stream *s, voidpc buf, unsigned len)
{
s->stream.next_in = (Bytef*)buf;
s->stream.avail_in = len;
while (s->stream.avail_in != 0)
{
if (s->stream.avail_out == 0)
{
s->stream.next_out = s->outbuf;
if (my_write(s->file, (byte *)s->outbuf, Z_BUFSIZE, MYF(0)) != Z_BUFSIZE)
{
s->z_err = Z_ERRNO;
break;
}
s->stream.avail_out = Z_BUFSIZE;
}
s->in += s->stream.avail_in;
s->out += s->stream.avail_out;
s->z_err = deflate(&(s->stream), Z_NO_FLUSH);
s->in -= s->stream.avail_in;
s->out -= s->stream.avail_out;
if (s->z_err != Z_OK) break;
}
s->crc = crc32(s->crc, (const Bytef *)buf, len);
return (int)(len - s->stream.avail_in);
}
#endif
/* ===========================================================================
Flushes all pending output into the compressed file. The parameter
flush is as in the deflate() function.
*/
local int do_flush (s, flush)
azio_stream *s;
int flush;
{
uInt len;
int done = 0;
if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR;
s->stream.avail_in = 0; /* should be zero already anyway */
for (;;) {
len = Z_BUFSIZE - s->stream.avail_out;
if (len != 0) {
if ((uInt)my_write(s->file, (byte *)s->outbuf, len, MYF(0)) != len)
{
s->z_err = Z_ERRNO;
return Z_ERRNO;
}
s->stream.next_out = s->outbuf;
s->stream.avail_out = Z_BUFSIZE;
}
if (done) break;
s->out += s->stream.avail_out;
s->z_err = deflate(&(s->stream), flush);
s->out -= s->stream.avail_out;
/* Ignore the second of two consecutive flushes: */
if (len == 0 && s->z_err == Z_BUF_ERROR) s->z_err = Z_OK;
/* deflate has finished flushing only when it hasn't used up
* all the available space in the output buffer:
*/
done = (s->stream.avail_out != 0 || s->z_err == Z_STREAM_END);
if (s->z_err != Z_OK && s->z_err != Z_STREAM_END) break;
}
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
}
int ZEXPORT azflush (s, flush)
azio_stream *s;
int flush;
{
int err = do_flush (s, flush);
if (err) return err;
my_sync(s->file, MYF(0));
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
}
/* ===========================================================================
Rewinds input file.
*/
int azrewind (s)
azio_stream *s;
{
if (s == NULL || s->mode != 'r') return -1;
s->z_err = Z_OK;
s->z_eof = 0;
s->back = EOF;
s->stream.avail_in = 0;
s->stream.next_in = (Bytef *)s->inbuf;
s->crc = crc32(0L, Z_NULL, 0);
if (!s->transparent) (void)inflateReset(&s->stream);
s->in = 0;
s->out = 0;
return my_seek(s->file, s->start, MY_SEEK_SET, MYF(0));
}
/* ===========================================================================
Sets the starting position for the next azread or azwrite on the given
compressed file. The offset represents a number of bytes in the
azseek returns the resulting offset location as measured in bytes from
the beginning of the uncompressed stream, or -1 in case of error.
SEEK_END is not implemented, returns error.
In this version of the library, azseek can be extremely slow.
*/
z_off_t azseek (s, offset, whence)
azio_stream *s;
z_off_t offset;
int whence;
{
if (s == NULL || whence == SEEK_END ||
s->z_err == Z_ERRNO || s->z_err == Z_DATA_ERROR) {
return -1L;
}
if (s->mode == 'w') {
#ifdef NO_GZCOMPRESS
return -1L;
#else
if (whence == SEEK_SET) {
offset -= s->in;
}
if (offset < 0) return -1L;
/* At this point, offset is the number of zero bytes to write. */
/* There was a zmemzero here if inbuf was null -Brian */
while (offset > 0) {
uInt size = Z_BUFSIZE;
if (offset < Z_BUFSIZE) size = (uInt)offset;
size = azwrite(s, s->inbuf, size);
if (size == 0) return -1L;
offset -= size;
}
return s->in;
#endif
}
/* Rest of function is for reading only */
/* compute absolute position */
if (whence == SEEK_CUR) {
offset += s->out;
}
if (offset < 0) return -1L;
if (s->transparent) {
/* map to my_seek */
s->back = EOF;
s->stream.avail_in = 0;
s->stream.next_in = (Bytef *)s->inbuf;
if (my_seek(s->file, offset, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) return -1L;
s->in = s->out = offset;
return offset;
}
/* For a negative seek, rewind and use positive seek */
if (offset >= s->out) {
offset -= s->out;
} else if (azrewind(s) < 0) {
return -1L;
}
/* offset is now the number of bytes to skip. */
if (offset && s->back != EOF) {
s->back = EOF;
s->out++;
offset--;
if (s->last) s->z_err = Z_STREAM_END;
}
while (offset > 0) {
int size = Z_BUFSIZE;
if (offset < Z_BUFSIZE) size = (int)offset;
size = azread(s, s->outbuf, (uInt)size);
if (size <= 0) return -1L;
offset -= size;
}
return s->out;
}
/* ===========================================================================
Returns the starting position for the next azread or azwrite on the
given compressed file. This position represents a number of bytes in the
uncompressed data stream.
*/
z_off_t ZEXPORT aztell (file)
azio_stream *file;
{
return azseek(file, 0L, SEEK_CUR);
}
/* ===========================================================================
Outputs a long in LSB order to the given file
*/
local void putLong (File file, uLong x)
{
int n;
byte buffer[1];
for (n = 0; n < 4; n++)
{
buffer[0]= (int)(x & 0xff);
my_write(file, buffer, 1, MYF(0));
x >>= 8;
}
}
/* ===========================================================================
Reads a long in LSB order from the given azio_stream. Sets z_err in case
of error.
*/
local uLong getLong (azio_stream *s)
{
uLong x = (uLong)get_byte(s);
int c;
x += ((uLong)get_byte(s))<<8;
x += ((uLong)get_byte(s))<<16;
c = get_byte(s);
if (c == EOF) s->z_err = Z_DATA_ERROR;
x += ((uLong)c)<<24;
return x;
}
/* ===========================================================================
Flushes all pending output if necessary, closes the compressed file
and deallocates all the (de)compression state.
*/
int azclose (azio_stream *s)
{
if (s == NULL) return Z_STREAM_ERROR;
if (s->mode == 'w') {
#ifdef NO_GZCOMPRESS
return Z_STREAM_ERROR;
#else
if (do_flush (s, Z_FINISH) != Z_OK)
return destroy(s);
putLong(s->file, s->crc);
putLong(s->file, (uLong)(s->in & 0xffffffff));
#endif
}
return destroy(s);
}
/*
This libary has been modified for use by the MySQL Archive Engine.
*/
/* zlib.h -- interface of the 'zlib' general purpose compression library
version 1.2.3, July 18th, 2005
Copyright (C) 1995-2005 Jean-loup Gailly and Mark Adler
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Jean-loup Gailly Mark Adler
jloup@gzip.org madler@alumni.caltech.edu
The data format used by the zlib library is described by RFCs (Request for
Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt
(zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
*/
#include <zlib.h>
#include "../../mysys/mysys_priv.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
The 'zlib' compression library provides in-memory compression and
decompression functions, including integrity checks of the uncompressed
data. This version of the library supports only one compression method
(deflation) but other algorithms will be added later and will have the same
stream interface.
Compression can be done in a single step if the buffers are large
enough (for example if an input file is mmap'ed), or can be done by
repeated calls of the compression function. In the latter case, the
application must provide more input and/or consume the output
(providing more output space) before each call.
The compressed data format used by default by the in-memory functions is
the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped
around a deflate stream, which is itself documented in RFC 1951.
The library also supports reading and writing files in gzip (.gz) format
with an interface similar to that of stdio using the functions that start
with "gz". The gzip format is different from the zlib format. gzip is a
gzip wrapper, documented in RFC 1952, wrapped around a deflate stream.
This library can optionally read and write gzip streams in memory as well.
The zlib format was designed to be compact and fast for use in memory
and on communications channels. The gzip format was designed for single-
file compression on file systems, has a larger header than zlib to maintain
directory information, and uses a different, slower check method than zlib.
The library does not install any signal handler. The decoder checks
the consistency of the compressed data, so the library should never
crash even in case of corrupted input.
*/
/*
The application must update next_in and avail_in when avail_in has
dropped to zero. It must update next_out and avail_out when avail_out
has dropped to zero. The application must initialize zalloc, zfree and
opaque before calling the init function. All other fields are set by the
compression library and must not be updated by the application.
The opaque value provided by the application will be passed as the first
parameter for calls of zalloc and zfree. This can be useful for custom
memory management. The compression library attaches no meaning to the
opaque value.
zalloc must return Z_NULL if there is not enough memory for the object.
If zlib is used in a multi-threaded application, zalloc and zfree must be
thread safe.
On 16-bit systems, the functions zalloc and zfree must be able to allocate
exactly 65536 bytes, but will not be required to allocate more than this
if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
pointers returned by zalloc for objects of exactly 65536 bytes *must*
have their offset normalized to zero. The default allocation function
provided by this library ensures this (see zutil.c). To reduce memory
requirements and avoid any allocation of 64K objects, at the expense of
compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
The fields total_in and total_out can be used for statistics or
progress reports. After compression, total_in holds the total size of
the uncompressed data and may be saved for use in the decompressor
(particularly if the decompressor wants to decompress everything in
a single step).
*/
/* constants */
#define Z_NO_FLUSH 0
#define Z_PARTIAL_FLUSH 1 /* will be removed, use Z_SYNC_FLUSH instead */
#define Z_SYNC_FLUSH 2
#define Z_FULL_FLUSH 3
#define Z_FINISH 4
#define Z_BLOCK 5
/* Allowed flush values; see deflate() and inflate() below for details */
#define Z_OK 0
#define Z_STREAM_END 1
#define Z_NEED_DICT 2
#define Z_ERRNO (-1)
#define Z_STREAM_ERROR (-2)
#define Z_DATA_ERROR (-3)
#define Z_MEM_ERROR (-4)
#define Z_BUF_ERROR (-5)
#define Z_VERSION_ERROR (-6)
/* Return codes for the compression/decompression functions. Negative
* values are errors, positive values are used for special but normal events.
*/
#define Z_NO_COMPRESSION 0
#define Z_BEST_SPEED 1
#define Z_BEST_COMPRESSION 9
#define Z_DEFAULT_COMPRESSION (-1)
/* compression levels */
#define Z_FILTERED 1
#define Z_HUFFMAN_ONLY 2
#define Z_RLE 3
#define Z_FIXED 4
#define Z_DEFAULT_STRATEGY 0
/* compression strategy; see deflateInit2() below for details */
#define Z_BINARY 0
#define Z_TEXT 1
#define Z_ASCII Z_TEXT /* for compatibility with 1.2.2 and earlier */
#define Z_UNKNOWN 2
/* Possible values of the data_type field (though see inflate()) */
#define Z_DEFLATED 8
/* The deflate compression method (the only one supported in this version) */
#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
#define Z_BUFSIZE 16384
typedef struct azio_stream {
z_stream stream;
int z_err; /* error code for last stream operation */
int z_eof; /* set if end of input file */
File file; /* .gz file */
Byte inbuf[Z_BUFSIZE]; /* input buffer */
Byte outbuf[Z_BUFSIZE]; /* output buffer */
uLong crc; /* crc32 of uncompressed data */
char *msg; /* error message */
int transparent; /* 1 if input file is not a .gz file */
char mode; /* 'w' or 'r' */
z_off_t start; /* start of compressed data in file (header skipped) */
z_off_t in; /* bytes into deflate or inflate */
z_off_t out; /* bytes out of deflate or inflate */
int back; /* one character push-back */
int last; /* true if push-back is last character */
} azio_stream;
/* basic functions */
extern int azopen(azio_stream *s, const char *path, int Flags);
/*
Opens a gzip (.gz) file for reading or writing. The mode parameter
is as in fopen ("rb" or "wb") but can also include a compression level
("wb9") or a strategy: 'f' for filtered data as in "wb6f", 'h' for
Huffman only compression as in "wb1h", or 'R' for run-length encoding
as in "wb1R". (See the description of deflateInit2 for more information
about the strategy parameter.)
azopen can be used to read a file which is not in gzip format; in this
case gzread will directly read from the file without decompression.
azopen returns NULL if the file could not be opened or if there was
insufficient memory to allocate the (de)compression state; errno
can be checked to distinguish the two cases (if errno is zero, the
zlib error is Z_MEM_ERROR). */
int azdopen(azio_stream *s,File fd, int Flags);
/*
azdopen() associates a azio_stream with the file descriptor fd. File
descriptors are obtained from calls like open, dup, creat, pipe or
fileno (in the file has been previously opened with fopen).
The mode parameter is as in azopen.
The next call of gzclose on the returned azio_stream will also close the
file descriptor fd, just like fclose(fdopen(fd), mode) closes the file
descriptor fd. If you want to keep fd open, use azdopen(dup(fd), mode).
azdopen returns NULL if there was insufficient memory to allocate
the (de)compression state.
*/
extern int azread(azio_stream *file, voidp buf, unsigned len);
/*
Reads the given number of uncompressed bytes from the compressed file.
If the input file was not in gzip format, gzread copies the given number
of bytes into the buffer.
gzread returns the number of uncompressed bytes actually read (0 for
end of file, -1 for error). */
extern int azwrite (azio_stream *file, voidpc buf, unsigned len);
/*
Writes the given number of uncompressed bytes into the compressed file.
gzwrite returns the number of uncompressed bytes actually written
(0 in case of error).
*/
extern int azflush(azio_stream *file, int flush);
/*
Flushes all pending output into the compressed file. The parameter
flush is as in the deflate() function. The return value is the zlib
error number (see function gzerror below). gzflush returns Z_OK if
the flush parameter is Z_FINISH and all output could be flushed.
gzflush should be called only when strictly necessary because it can
degrade compression.
*/
extern z_off_t azseek (azio_stream *file,
z_off_t offset, int whence);
/*
Sets the starting position for the next gzread or gzwrite on the
given compressed file. The offset represents a number of bytes in the
uncompressed data stream. The whence parameter is defined as in lseek(2);
the value SEEK_END is not supported.
If the file is opened for reading, this function is emulated but can be
extremely slow. If the file is opened for writing, only forward seeks are
supported; gzseek then compresses a sequence of zeroes up to the new
starting position.
gzseek returns the resulting offset location as measured in bytes from
the beginning of the uncompressed stream, or -1 in case of error, in
particular if the file is opened for writing and the new starting position
would be before the current position.
*/
extern int azrewind(azio_stream *file);
/*
Rewinds the given file. This function is supported only for reading.
gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET)
*/
extern z_off_t aztell(azio_stream *file);
/*
Returns the starting position for the next gzread or gzwrite on the
given compressed file. This position represents a number of bytes in the
uncompressed data stream.
gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR)
*/
extern int azclose(azio_stream *file);
/*
Flushes all pending output if necessary, closes the compressed file
and deallocates all the (de)compression state. The return value is the zlib
error number (see function gzerror below).
*/
#ifdef __cplusplus
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment