Skip to content

Commit

Permalink
Rewrite loading code to try to satisfy everyone:
Browse files Browse the repository at this point in the history
- Support all three formats (ggml, ggmf, ggjt).  (However, I didn't
  include the hack needed to support GPT4All files without conversion.
  Those can still be used after converting them with convert.py from my
  other PR.)

- Support both mmap and read (mmap is used by default, but can be
  disabled with `--no-mmap`, and is automatically disabled for pre-ggjt
  files or on platforms where mmap is not supported).

- Support multi-file models like before, but automatically determine the
  number of parts rather than requiring `--n_parts`.

- Improve validation and error checking.

- Stop using the per-file type field (f16) entirely in favor of just
  relying on the per-tensor type/size fields.  This has no immediate
  benefit, but makes it easier to experiment with different formats, and
  should make it easier to support the new GPTQ-for-LLaMa models in the
  future (I have some work in progress on that front).

- Support VirtualLock on Windows (using the same `--mlock` option as on
  Unix).

    - Indicate loading progress when using mmap + mlock.  (Which led me
      to the interesting observation that on my Linux machine, with a
      warm file cache, mlock actually takes some time, whereas mmap
      without mlock starts almost instantly...)

      - To help implement this, move mlock support from ggml to the
        loading code.

- madvise/PrefetchVirtualMemory support (based on #740)

- Switch from ifstream to the `fopen` family of functions to avoid
  unnecessary copying and, when mmap is enabled, allow reusing the same
  file descriptor for both metadata reads and mmap (whereas the existing
  implementation opens the file a second time to mmap).

- Quantization now produces a single-file output even with multi-file
  inputs (not really a feature as much as 'it was easier this way').

Implementation notes:

I tried to factor the code into more discrete pieces than before.

Regarding code style: I tried to follow the code style, but I'm naughty
and used a few advanced C++ features repeatedly:

- Destructors to make it easier to ensure everything gets cleaned up.

- Exceptions.  I don't even usually use exceptions when writing C++, and
  I can remove them if desired... but here they make the loading code
  much more succinct while still properly handling a variety of errors,
  ranging from API calls failing to integer overflow and allocation
  failure.  The exceptions are converted to error codes at the
  API boundary.)

Co-authored-by: Pavol Rusnak <[email protected]> (for the bit I copied from #740)
  • Loading branch information
comex authored and blackhole89 committed Apr 9, 2023
1 parent aaf3b23 commit f963b63
Show file tree
Hide file tree
Showing 14 changed files with 1,209 additions and 829 deletions.
9 changes: 8 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,7 @@ if (LLAMA_ALL_WARNINGS)
-Wpedantic
-Wcast-qual
-Wno-unused-function
-Wno-multichar
)
else()
# todo : msvc
Expand All @@ -152,6 +153,10 @@ if (LLAMA_ALL_WARNINGS)

endif()

if (MSVC)
add_compile_definitions(_CRT_SECURE_NO_WARNINGS)
endif()

if (LLAMA_LTO)
include(CheckIPOSupported)
check_ipo_supported(RESULT result OUTPUT output)
Expand Down Expand Up @@ -241,7 +246,9 @@ endif()

add_library(llama
llama.cpp
llama.h)
llama.h
llama_internal.h
llama_util.h)

target_include_directories(llama PUBLIC .)
target_compile_features(llama PUBLIC cxx_std_11) # don't bump
Expand Down
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ LDFLAGS =

# warnings
CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith -Wno-unused-function
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar

# OS specific
# TODO: support Windows
Expand Down Expand Up @@ -142,7 +142,7 @@ default: main quantize perplexity embedding
ggml.o: ggml.c ggml.h
$(CC) $(CFLAGS) -c ggml.c -o ggml.o

llama.o: llama.cpp llama.h
llama.o: llama.cpp llama.h llama_util.h llama_internal.h
$(CXX) $(CXXFLAGS) -c llama.cpp -o llama.o

common.o: examples/common.cpp examples/common.h
Expand Down
9 changes: 6 additions & 3 deletions examples/common.cpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
#include "common.h"

#include "ggml.h"

#include <cassert>
#include <cstring>
#include <fstream>
Expand Down Expand Up @@ -161,6 +159,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
params.use_color = true;
} else if (arg == "--mlock") {
params.use_mlock = true;
} else if (arg == "--no-mmap") {
params.use_mmap = false;
} else if (arg == "--mtest") {
params.mem_test = true;
} else if (arg == "--verbose-prompt") {
Expand Down Expand Up @@ -240,9 +240,12 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch);
fprintf(stderr, " --perplexity compute perplexity over the prompt\n");
fprintf(stderr, " --keep number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep);
if (ggml_mlock_supported()) {
if (llama_mlock_supported()) {
fprintf(stderr, " --mlock force system to keep model in RAM rather than swapping or compressing\n");
}
if (llama_mmap_supported()) {
fprintf(stderr, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
}
fprintf(stderr, " --mtest compute maximum memory usage\n");
fprintf(stderr, " --verbose-prompt print prompt before generation\n");
fprintf(stderr, " -m FNAME, --model FNAME\n");
Expand Down
1 change: 1 addition & 0 deletions examples/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ struct gpt_params {
bool instruct = false; // instruction mode (used for Alpaca models)
bool ignore_eos = false; // do not stop generating after eos
bool perplexity = false; // compute perplexity over the prompt
bool use_mmap = true; // use mmap for faster loads
bool use_mlock = false; // use mlock to keep model in memory
bool mem_test = false; // compute maximum memory usage
bool verbose_prompt = false; // print prompt tokens before generation
Expand Down
1 change: 1 addition & 0 deletions examples/embedding/embedding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ int main(int argc, char ** argv) {
lparams.seed = params.seed;
lparams.f16_kv = params.memory_f16;
lparams.logits_all = params.perplexity;
lparams.use_mmap = params.use_mmap;
lparams.use_mlock = params.use_mlock;
lparams.embedding = params.embedding;

Expand Down
1 change: 1 addition & 0 deletions examples/main/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ int main(int argc, char ** argv) {
lparams.n_parts = params.n_parts;
lparams.seed = params.seed;
lparams.f16_kv = params.memory_f16;
lparams.use_mmap = params.use_mmap;
lparams.use_mlock = params.use_mlock;

ctx = llama_init_from_file(params.model.c_str(), lparams);
Expand Down
1 change: 1 addition & 0 deletions examples/perplexity/perplexity.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ int main(int argc, char ** argv) {
lparams.seed = params.seed;
lparams.f16_kv = params.memory_f16;
lparams.logits_all = params.perplexity;
lparams.use_mmap = params.use_mmap;
lparams.use_mlock = params.use_mlock;
lparams.embedding = params.embedding;

Expand Down
9 changes: 4 additions & 5 deletions examples/quantize-stats/quantize-stats.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include "ggml.h"
#include "llama.h"
#include "llama_internal.h"

#include <algorithm>
#include <cassert>
Expand Down Expand Up @@ -266,15 +267,13 @@ int main(int argc, char ** argv) {
}
}

// Sort tensors for consistent output
const auto tensors = llama_internal_get_tensor_map(ctx);
std::map<std::string, struct ggml_tensor *> tensors_sorted { tensors.begin(), tensors.end() };
const auto &tensors = llama_internal_get_tensor_map(ctx);

// check layer tensors
int included_layers = 0;
int64_t max_nelements = 0;
bool is_f16 = false;
for (const auto& kv_tensor : tensors_sorted) {
for (const auto& kv_tensor : tensors) {
if (!layer_included(params, kv_tensor.first)) {
continue;
}
Expand Down Expand Up @@ -315,7 +314,7 @@ int main(int argc, char ** argv) {

error_stats global_stats {};

for (const auto& kv_tensor : tensors_sorted) {
for (const auto& kv_tensor : tensors) {
if (!layer_included(params, kv_tensor.first)) {
continue;
}
Expand Down
78 changes: 0 additions & 78 deletions ggml.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,17 +97,6 @@ typedef void* thread_ret_t;
#define static_assert(cond, msg) _Static_assert(cond, msg)
#endif

#define GGML_MLOCK_SUPPORT 0

#ifdef __has_include
#if __has_include(<sys/mman.h>)
#undef GGML_MLOCK_SUPPORT
#define GGML_MLOCK_SUPPORT 1
#include <sys/mman.h>
#endif
#endif


/*#define GGML_PERF*/
#define GGML_DEBUG 0
#define GGML_GELU_FP16
Expand Down Expand Up @@ -2690,21 +2679,6 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {

static_assert(GGML_OP_COUNT == 35, "GGML_OP_COUNT != 35");

//
// ggml object
//

struct ggml_object {
size_t offs;
size_t size;

struct ggml_object * next;

char padding[8];
};

static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object);

static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");

Expand All @@ -2716,7 +2690,6 @@ struct ggml_context {
size_t mem_size;
void * mem_buffer;
bool mem_buffer_owned;
bool mem_buffer_mlocked;
bool no_alloc;

int n_objects;
Expand Down Expand Up @@ -3003,7 +2976,6 @@ struct ggml_context * ggml_init(struct ggml_init_params params) {
/*.mem_size =*/ params.mem_size,
/*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : malloc(params.mem_size),
/*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
/*.mem_buffer_mlocked =*/ false,
/*.no_alloc =*/ params.no_alloc,
/*.n_objects =*/ 0,
/*.objects_begin =*/ NULL,
Expand Down Expand Up @@ -3036,14 +3008,6 @@ void ggml_free(struct ggml_context * ctx) {
GGML_PRINT_DEBUG("%s: context %d with %d objects has been freed. memory used = %zu\n",
__func__, i, ctx->n_objects, ctx->objects_end->offs + ctx->objects_end->size);

#if GGML_MLOCK_SUPPORT
if (ctx->mem_buffer_mlocked) {
if (munlock(ctx->mem_buffer, ctx->mem_size)) {
fprintf(stderr, "%s: failed to munlock buffer: %s\n", __func__, strerror(errno));
}
}
#endif

if (ctx->mem_buffer_owned) {
free(ctx->mem_buffer);
}
Expand Down Expand Up @@ -3072,48 +3036,6 @@ size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch)
return result;
}

#ifdef __APPLE__
#define MLOCK_SUGGESTION \
"Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
"decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
#else
#define MLOCK_SUGGESTION \
"Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
#endif

bool ggml_mlock_supported(void) {
return GGML_MLOCK_SUPPORT;
}

bool ggml_mlock(
struct ggml_context * ctx,
const void *opt_extra_addr,
size_t opt_extra_len,
char **err_p) {
// TODO: Use SetProcessWorkingSetSize() + VirtualLock() on WIN32
#if GGML_MLOCK_SUPPORT
if (ctx->mem_buffer_mlocked) {
return true;
}
if (mlock(ctx->mem_buffer, ctx->mem_size) ||
(opt_extra_len &&
mlock(opt_extra_addr, opt_extra_len))) {
if ((*err_p = malloc(1024))) {
snprintf(*err_p, 1024,
"failed to mlock %zu-byte buffer: %s\n" MLOCK_SUGGESTION,
ctx->mem_size + opt_extra_len,
strerror(errno));
}
return false;
}
ctx->mem_buffer_mlocked = true;
return true;
#else // GGML_MLOCK_SUPPORT
*err_p = strdup("can't mlock because it's not supported on this system");
return false;
#endif // GGML_MLOCK_SUPPORT
}

////////////////////////////////////////////////////////////////////////////////

struct ggml_tensor * ggml_new_tensor_impl(
Expand Down
20 changes: 13 additions & 7 deletions ggml.h
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,19 @@ enum ggml_op {
GGML_OP_COUNT,
};


// ggml object
struct ggml_object {
size_t offs;
size_t size;

struct ggml_object * next;

char padding[8];
};

static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object);

// n-dimensional tensor
struct ggml_tensor {
enum ggml_type type;
Expand Down Expand Up @@ -344,13 +357,6 @@ size_t ggml_used_mem(const struct ggml_context * ctx);

size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch);

bool ggml_mlock_supported(void);
bool ggml_mlock(
struct ggml_context * ctx,
const void *opt_extra_addr,
size_t opt_extra_len,
char **err_p);

struct ggml_tensor * ggml_new_tensor(
struct ggml_context * ctx,
enum ggml_type type,
Expand Down
Loading

0 comments on commit f963b63

Please sign in to comment.