22 #include "default_engine.h"
27 static int do_slabs_newslab(
struct default_engine *engine,
const unsigned int id);
30 #ifndef DONT_PREALLOC_SLABS
37 static void slabs_preallocate (
const unsigned int maxslabs);
49 int res = POWER_SMALLEST;
53 while (size > engine->slabs.slabclass[res].size)
54 if (res++ == engine->slabs.power_largest)
66 const bool prealloc) {
67 int i = POWER_SMALLEST - 1;
68 unsigned int size =
sizeof(
hash_item) + engine->config.chunk_size;
70 engine->slabs.mem_limit = limit;
74 engine->slabs.mem_base = malloc(engine->slabs.mem_limit);
75 if (engine->slabs.mem_base != NULL) {
76 engine->slabs.mem_current = engine->slabs.mem_base;
77 engine->slabs.mem_avail = engine->slabs.mem_limit;
83 memset(engine->slabs.slabclass, 0,
sizeof(engine->slabs.slabclass));
85 while (++i < POWER_LARGEST && size <= engine->
config.item_size_max / factor) {
87 if (size % CHUNK_ALIGN_BYTES)
88 size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES);
90 engine->slabs.slabclass[
i].size =
size;
91 engine->slabs.slabclass[
i].perslab = engine->config.item_size_max / engine->slabs.slabclass[
i].size;
93 if (engine->config.verbose > 1) {
96 logger->
log(EXTENSION_LOG_INFO, NULL,
97 "slab class %3d: chunk size %9u perslab %7u\n",
98 i, engine->slabs.slabclass[i].size,
99 engine->slabs.slabclass[i].perslab);
103 engine->slabs.power_largest =
i;
104 engine->slabs.slabclass[engine->slabs.power_largest].size = engine->config.item_size_max;
105 engine->slabs.slabclass[engine->slabs.power_largest].perslab = 1;
106 if (engine->config.verbose > 1) {
109 logger->
log(EXTENSION_LOG_INFO, NULL,
110 "slab class %3d: chunk size %9u perslab %7u\n",
111 i, engine->slabs.slabclass[i].size,
112 engine->slabs.slabclass[i].perslab);
117 char *t_initial_malloc = getenv(
"T_MEMD_INITIAL_MALLOC");
118 if (t_initial_malloc) {
119 engine->slabs.mem_malloced = (size_t)atol(t_initial_malloc);
124 #ifndef DONT_PREALLOC_SLABS
126 char *pre_alloc = getenv(
"T_MEMD_SLABS_ALLOC");
128 if (pre_alloc == NULL || atoi(pre_alloc) != 0) {
129 slabs_preallocate(power_largest);
134 return ENGINE_SUCCESS;
137 #ifndef DONT_PREALLOC_SLABS
138 static void slabs_preallocate (
const unsigned int maxslabs) {
140 unsigned int prealloc = 0;
148 for (i = POWER_SMALLEST; i <= POWER_LARGEST; i++) {
149 if (++prealloc > maxslabs)
157 static int grow_slab_list (
struct default_engine *engine,
const unsigned int id) {
159 if (p->slabs == p->list_size) {
160 size_t new_size = (p->list_size != 0) ? p->list_size * 2 : 16;
161 void *new_list = realloc(p->slab_list, new_size *
sizeof(
void *));
162 if (new_list == 0)
return 0;
163 p->list_size = new_size;
164 p->slab_list = new_list;
169 static int do_slabs_newslab(
struct default_engine *engine,
const unsigned int id) {
171 int len = p->size * p->perslab;
174 if ((engine->slabs.mem_limit && engine->slabs.mem_malloced + len > engine->slabs.mem_limit && p->slabs > 0) ||
175 (grow_slab_list(engine,
id) == 0) ||
176 ((ptr = memory_allocate(engine, (
size_t)len)) == 0)) {
178 MEMCACHED_SLABS_SLABCLASS_ALLOCATE_FAILED(
id);
182 memset(ptr, 0, (
size_t)len);
183 p->end_page_ptr = ptr;
184 p->end_page_free = p->perslab;
186 p->slab_list[p->slabs++] = ptr;
187 engine->slabs.mem_malloced += len;
188 MEMCACHED_SLABS_SLABCLASS_ALLOCATE(
id);
194 static void *do_slabs_alloc(
struct default_engine *engine,
const size_t size,
unsigned int id) {
198 if (id < POWER_SMALLEST || id > engine->slabs.power_largest) {
199 MEMCACHED_SLABS_ALLOCATE_FAILED(size, 0);
203 p = &engine->slabs.slabclass[
id];
205 #ifdef USE_SYSTEM_MALLOC
206 if (engine->slabs.mem_limit && engine->slabs.mem_malloced + size > engine->slabs.mem_limit) {
207 MEMCACHED_SLABS_ALLOCATE_FAILED(size,
id);
210 engine->slabs.mem_malloced +=
size;
212 MEMCACHED_SLABS_ALLOCATE(size,
id, 0, ret);
218 if (! (p->end_page_ptr != 0 || p->sl_curr != 0 ||
219 do_slabs_newslab(engine,
id) != 0)) {
222 }
else if (p->sl_curr != 0) {
224 ret = p->slots[--p->sl_curr];
227 assert(p->end_page_ptr != NULL);
228 ret = p->end_page_ptr;
229 if (--p->end_page_free != 0) {
230 p->end_page_ptr = ((caddr_t)p->end_page_ptr) + p->size;
237 p->requested +=
size;
238 MEMCACHED_SLABS_ALLOCATE(size,
id, p->size, ret);
240 MEMCACHED_SLABS_ALLOCATE_FAILED(size,
id);
246 static void do_slabs_free(
struct default_engine *engine,
void *ptr,
const size_t size,
unsigned int id) {
249 if (id < POWER_SMALLEST || id > engine->slabs.power_largest)
252 MEMCACHED_SLABS_FREE(size,
id, ptr);
253 p = &engine->slabs.slabclass[
id];
255 #ifdef USE_SYSTEM_MALLOC
256 engine->slabs.mem_malloced -=
size;
261 if (p->sl_curr == p->sl_total) {
262 int new_size = (p->sl_total != 0) ? p->sl_total * 2 : 16;
263 void **new_slots = realloc(p->slots, new_size *
sizeof(
void *));
266 p->slots = new_slots;
267 p->sl_total = new_size;
269 p->slots[p->sl_curr++] = ptr;
270 p->requested -=
size;
274 void add_statistics(
const void *cookie,
ADD_STAT add_stats,
275 const char* prefix,
int num,
const char *key,
276 const char *
fmt, ...) {
277 char name[80], val[80];
286 vlen = vsnprintf(val,
sizeof(val) - 1, fmt, ap);
289 if (prefix != NULL) {
290 klen = snprintf(name,
sizeof(name),
"%s:", prefix);
294 klen += snprintf(name + klen,
sizeof(name) - klen,
"%d:", num);
297 klen += snprintf(name + klen,
sizeof(name) - klen,
"%s", key);
299 add_stats(name, klen, val, vlen, cookie);
307 struct conn *
conn = (
struct conn*)cookie;
313 for(i = POWER_SMALLEST; i <= engine->slabs.power_largest; i++) {
316 uint32_t perslab,
slabs;
318 perslab = p->perslab;
320 add_statistics(cookie, add_stats, NULL, i,
"chunk_size",
"%u",
322 add_statistics(cookie, add_stats, NULL, i,
"chunks_per_page",
"%u",
324 add_statistics(cookie, add_stats, NULL, i,
"total_pages",
"%u",
326 add_statistics(cookie, add_stats, NULL, i,
"total_chunks",
"%u",
328 add_statistics(cookie, add_stats, NULL, i,
"used_chunks",
"%u",
329 slabs*perslab - p->sl_curr - p->end_page_free);
330 add_statistics(cookie, add_stats, NULL, i,
"free_chunks",
"%u",
332 add_statistics(cookie, add_stats, NULL, i,
"free_chunks_end",
"%u",
334 add_statistics(cookie, add_stats, NULL, i,
"mem_requested",
"%zu",
337 add_statistics(cookie, add_stats, NULL, i,
"get_hits",
"%"PRIu64,
339 add_statistics(cookie, add_stats, NULL, i,
"cmd_set",
"%"PRIu64,
341 add_statistics(cookie, add_stats, NULL, i,
"delete_hits",
"%"PRIu64,
343 add_statistics(cookie, add_stats, NULL, i,
"cas_hits",
"%"PRIu64,
345 add_statistics(cookie, add_stats, NULL, i,
"cas_badval",
"%"PRIu64,
354 add_statistics(cookie, add_stats, NULL, -1,
"active_slabs",
"%d", total);
355 add_statistics(cookie, add_stats, NULL, -1,
"total_malloced",
"%zu",
356 engine->slabs.mem_malloced);
359 static void *memory_allocate(
struct default_engine *engine,
size_t size) {
362 if (engine->slabs.mem_base == NULL) {
366 ret = engine->slabs.mem_current;
368 if (size > engine->slabs.mem_avail) {
373 if (size % CHUNK_ALIGN_BYTES) {
374 size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES);
377 engine->slabs.mem_current = ((
char*)engine->slabs.mem_current) +
size;
378 if (size < engine->
slabs.mem_avail) {
379 engine->slabs.mem_avail -=
size;
381 engine->slabs.mem_avail = 0;
388 void *slabs_alloc(
struct default_engine *engine,
size_t size,
unsigned int id) {
391 pthread_mutex_lock(&engine->slabs.
lock);
392 ret = do_slabs_alloc(engine, size,
id);
393 pthread_mutex_unlock(&engine->slabs.
lock);
397 void slabs_free(
struct default_engine *engine,
void *ptr,
size_t size,
unsigned int id) {
398 pthread_mutex_lock(&engine->slabs.
lock);
399 do_slabs_free(engine, ptr, size,
id);
400 pthread_mutex_unlock(&engine->slabs.
lock);
404 pthread_mutex_lock(&engine->slabs.
lock);
405 do_slabs_stats(engine, add_stats, c);
406 pthread_mutex_unlock(&engine->slabs.
lock);
409 void slabs_adjust_mem_requested(
struct default_engine *engine,
unsigned int id,
size_t old,
size_t ntotal)
411 pthread_mutex_lock(&engine->slabs.
lock);
413 if (id < POWER_SMALLEST || id > engine->slabs.power_largest) {
416 logger->
log(EXTENSION_LOG_WARNING, NULL,
417 "Internal error! Invalid slab class\n");
421 p = &engine->slabs.slabclass[
id];
422 p->requested = p->requested - old + ntotal;
423 pthread_mutex_unlock(&engine->slabs.
lock);