22 #include "default_engine.h"
27 static int do_slabs_newslab(
struct default_engine *engine,
const unsigned int id);
30 #ifndef DONT_PREALLOC_SLABS
37 static void slabs_preallocate (
const unsigned int maxslabs);
49 int res = POWER_SMALLEST;
53 while (size > engine->slabs.slabclass[res].size)
54 if (res++ == engine->slabs.power_largest)
66 const bool prealloc) {
67 int i = POWER_SMALLEST - 1;
68 unsigned int size =
sizeof(
hash_item) + engine->config.chunk_size;
70 engine->slabs.mem_limit = limit;
74 engine->slabs.mem_base = malloc(engine->slabs.mem_limit);
75 if (engine->slabs.mem_base != NULL) {
76 engine->slabs.mem_current = engine->slabs.mem_base;
77 engine->slabs.mem_avail = engine->slabs.mem_limit;
83 memset(engine->slabs.slabclass, 0,
sizeof(engine->slabs.slabclass));
85 while (++i < POWER_LARGEST && size <= engine->
config.item_size_max / factor) {
87 if (size % CHUNK_ALIGN_BYTES)
88 size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES);
90 engine->slabs.slabclass[
i].size =
size;
91 engine->slabs.slabclass[
i].perslab = engine->config.item_size_max / engine->slabs.slabclass[
i].size;
93 if (engine->config.verbose > 1) {
94 fprintf(stderr,
"slab class %3d: chunk size %9u perslab %7u\n",
95 i, engine->slabs.slabclass[i].size, engine->slabs.slabclass[i].perslab);
99 engine->slabs.power_largest =
i;
100 engine->slabs.slabclass[engine->slabs.power_largest].size = engine->config.item_size_max;
101 engine->slabs.slabclass[engine->slabs.power_largest].perslab = 1;
102 if (engine->config.verbose > 1) {
103 fprintf(stderr,
"slab class %3d: chunk size %9u perslab %7u\n",
104 i, engine->slabs.slabclass[i].size, engine->slabs.slabclass[i].perslab);
109 char *t_initial_malloc = getenv(
"T_MEMD_INITIAL_MALLOC");
110 if (t_initial_malloc) {
111 engine->slabs.mem_malloced = (size_t)atol(t_initial_malloc);
116 #ifndef DONT_PREALLOC_SLABS
118 char *pre_alloc = getenv(
"T_MEMD_SLABS_ALLOC");
120 if (pre_alloc == NULL || atoi(pre_alloc) != 0) {
121 slabs_preallocate(power_largest);
126 return ENGINE_SUCCESS;
129 #ifndef DONT_PREALLOC_SLABS
130 static void slabs_preallocate (
const unsigned int maxslabs) {
132 unsigned int prealloc = 0;
140 for (i = POWER_SMALLEST; i <= POWER_LARGEST; i++) {
141 if (++prealloc > maxslabs)
149 static int grow_slab_list (
struct default_engine *engine,
const unsigned int id) {
151 if (p->slabs == p->list_size) {
152 size_t new_size = (p->list_size != 0) ? p->list_size * 2 : 16;
153 void *new_list = realloc(p->slab_list, new_size *
sizeof(
void *));
154 if (new_list == 0)
return 0;
155 p->list_size = new_size;
156 p->slab_list = new_list;
161 static int do_slabs_newslab(
struct default_engine *engine,
const unsigned int id) {
163 int len = p->size * p->perslab;
166 if ((engine->slabs.mem_limit && engine->slabs.mem_malloced + len > engine->slabs.mem_limit && p->slabs > 0) ||
167 (grow_slab_list(engine,
id) == 0) ||
168 ((ptr = memory_allocate(engine, (
size_t)len)) == 0)) {
170 MEMCACHED_SLABS_SLABCLASS_ALLOCATE_FAILED(
id);
174 memset(ptr, 0, (
size_t)len);
175 p->end_page_ptr = ptr;
176 p->end_page_free = p->perslab;
178 p->slab_list[p->slabs++] = ptr;
179 engine->slabs.mem_malloced += len;
180 MEMCACHED_SLABS_SLABCLASS_ALLOCATE(
id);
186 static void *do_slabs_alloc(
struct default_engine *engine,
const size_t size,
unsigned int id) {
190 if (id < POWER_SMALLEST || id > engine->slabs.power_largest) {
191 MEMCACHED_SLABS_ALLOCATE_FAILED(size, 0);
195 p = &engine->slabs.slabclass[
id];
197 #ifdef USE_SYSTEM_MALLOC
198 if (engine->slabs.mem_limit && engine->slabs.mem_malloced + size > engine->slabs.mem_limit) {
199 MEMCACHED_SLABS_ALLOCATE_FAILED(size,
id);
202 engine->slabs.mem_malloced +=
size;
204 MEMCACHED_SLABS_ALLOCATE(size,
id, 0, ret);
210 if (! (p->end_page_ptr != 0 || p->sl_curr != 0 ||
211 do_slabs_newslab(engine,
id) != 0)) {
214 }
else if (p->sl_curr != 0) {
216 ret = p->slots[--p->sl_curr];
219 assert(p->end_page_ptr != NULL);
220 ret = p->end_page_ptr;
221 if (--p->end_page_free != 0) {
222 p->end_page_ptr = ((caddr_t)p->end_page_ptr) + p->size;
229 p->requested +=
size;
230 MEMCACHED_SLABS_ALLOCATE(size,
id, p->size, ret);
232 MEMCACHED_SLABS_ALLOCATE_FAILED(size,
id);
238 static void do_slabs_free(
struct default_engine *engine,
void *ptr,
const size_t size,
unsigned int id) {
241 if (id < POWER_SMALLEST || id > engine->slabs.power_largest)
244 MEMCACHED_SLABS_FREE(size,
id, ptr);
245 p = &engine->slabs.slabclass[
id];
247 #ifdef USE_SYSTEM_MALLOC
248 engine->slabs.mem_malloced -=
size;
253 if (p->sl_curr == p->sl_total) {
254 int new_size = (p->sl_total != 0) ? p->sl_total * 2 : 16;
255 void **new_slots = realloc(p->slots, new_size *
sizeof(
void *));
258 p->slots = new_slots;
259 p->sl_total = new_size;
261 p->slots[p->sl_curr++] = ptr;
262 p->requested -=
size;
266 void add_statistics(
const void *cookie,
ADD_STAT add_stats,
267 const char* prefix,
int num,
const char *key,
268 const char *
fmt, ...) {
269 char name[80], val[80];
278 vlen = vsnprintf(val,
sizeof(val) - 1, fmt, ap);
281 if (prefix != NULL) {
282 klen = snprintf(name,
sizeof(name),
"%s:", prefix);
286 klen += snprintf(name + klen,
sizeof(name) - klen,
"%d:", num);
289 klen += snprintf(name + klen,
sizeof(name) - klen,
"%s", key);
291 add_stats(name, klen, val, vlen, cookie);
299 struct conn *
conn = (
struct conn*)cookie;
305 for(i = POWER_SMALLEST; i <= engine->slabs.power_largest; i++) {
308 uint32_t perslab,
slabs;
310 perslab = p->perslab;
312 add_statistics(cookie, add_stats, NULL, i,
"chunk_size",
"%u",
314 add_statistics(cookie, add_stats, NULL, i,
"chunks_per_page",
"%u",
316 add_statistics(cookie, add_stats, NULL, i,
"total_pages",
"%u",
318 add_statistics(cookie, add_stats, NULL, i,
"total_chunks",
"%u",
320 add_statistics(cookie, add_stats, NULL, i,
"used_chunks",
"%u",
321 slabs*perslab - p->sl_curr - p->end_page_free);
322 add_statistics(cookie, add_stats, NULL, i,
"free_chunks",
"%u",
324 add_statistics(cookie, add_stats, NULL, i,
"free_chunks_end",
"%u",
326 add_statistics(cookie, add_stats, NULL, i,
"mem_requested",
"%zu",
329 add_statistics(cookie, add_stats, NULL, i,
"get_hits",
"%"PRIu64,
331 add_statistics(cookie, add_stats, NULL, i,
"cmd_set",
"%"PRIu64,
333 add_statistics(cookie, add_stats, NULL, i,
"delete_hits",
"%"PRIu64,
335 add_statistics(cookie, add_stats, NULL, i,
"cas_hits",
"%"PRIu64,
337 add_statistics(cookie, add_stats, NULL, i,
"cas_badval",
"%"PRIu64,
346 add_statistics(cookie, add_stats, NULL, -1,
"active_slabs",
"%d", total);
347 add_statistics(cookie, add_stats, NULL, -1,
"total_malloced",
"%zu",
348 engine->slabs.mem_malloced);
351 static void *memory_allocate(
struct default_engine *engine,
size_t size) {
354 if (engine->slabs.mem_base == NULL) {
358 ret = engine->slabs.mem_current;
360 if (size > engine->slabs.mem_avail) {
365 if (size % CHUNK_ALIGN_BYTES) {
366 size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES);
369 engine->slabs.mem_current = ((
char*)engine->slabs.mem_current) +
size;
370 if (size < engine->
slabs.mem_avail) {
371 engine->slabs.mem_avail -=
size;
373 engine->slabs.mem_avail = 0;
380 void *slabs_alloc(
struct default_engine *engine,
size_t size,
unsigned int id) {
383 pthread_mutex_lock(&engine->slabs.
lock);
384 ret = do_slabs_alloc(engine, size,
id);
385 pthread_mutex_unlock(&engine->slabs.
lock);
389 void slabs_free(
struct default_engine *engine,
void *ptr,
size_t size,
unsigned int id) {
390 pthread_mutex_lock(&engine->slabs.
lock);
391 do_slabs_free(engine, ptr, size,
id);
392 pthread_mutex_unlock(&engine->slabs.
lock);
396 pthread_mutex_lock(&engine->slabs.
lock);
397 do_slabs_stats(engine, add_stats, c);
398 pthread_mutex_unlock(&engine->slabs.
lock);