Skip to content

Commit

Permalink
Make pids.max limit work for apache processes.
Browse files Browse the repository at this point in the history
Migrated cgroup processes are counted agains pids.current but pids.max
limit is not enforced (by killing processes for example).

Thus we have to do checking manually before migration and if limit
is reached we return 429 / HTTP_TOO_MANY_REQUESTS.
  • Loading branch information
arekm committed Nov 23, 2018
1 parent 4b69143 commit 1e50c3b
Showing 1 changed file with 96 additions and 38 deletions.
134 changes: 96 additions & 38 deletions mod_cgroupmin.c
Original file line number Diff line number Diff line change
@@ -1,14 +1,18 @@
/*
** mod_cgroup.c -- Apache sample cgroup module
** [Autogenerated via ``apxs -n cgroup -g'']
**
** Stripped down version of https://github.com/MatthewIfe/mod_cgroup.git without libcgroup dependency
** Build with: apxs -c mod_cgroupmin.c -o mod_cgroupmin.la
*/
** mod_cgroup.c -- Apache sample cgroup module
** [Autogenerated via ``apxs -n cgroup -g'']
**
** Stripped down version of https://github.com/MatthewIfe/mod_cgroup.git without libcgroup dependency
** Build with: apxs -c mod_cgroupmin.c -o mod_cgroupmin.la
*/

#define ACTIVE_ON 1
#define ACTIVE_OFF 0

#define CGROUP_OK 0
#define CGROUP_ERROR -1
#define CGROUP_MIGRATION_NOTALLOWED -2

#include "httpd.h"
#include "http_config.h"
#include "http_log.h"
Expand All @@ -34,40 +38,85 @@ struct cgroup_config {

module AP_MODULE_DECLARE_DATA cgroupmin_module;

// check if cgroup process limit is reached
// returns 0 if if limit is not reached
static int cgroup_procs_not_allowed(apr_pool_t *pool, server_rec *r, char *cgroup) {
apr_file_t *cg;
apr_status_t rc;
const char *cg_filename;
char buf[1024];
unsigned long pid_cur, pid_max;

cg_filename = apr_pstrcat(pool, "/sys/fs/cgroup/", cgroup, "/pids.max", NULL);
if ((rc = apr_file_open(&cg, cg_filename, APR_FOPEN_READ, APR_FPROT_OS_DEFAULT, pool)) != APR_SUCCESS) {
return CGROUP_ERROR;
}
rc = apr_file_read_full(cg, buf, sizeof(buf), NULL);
if (rc != APR_SUCCESS && rc != APR_EOF) {
apr_file_close(cg);
return CGROUP_ERROR;
}
apr_file_close(cg);
if (!strncmp("max", buf, 3)) {
return CGROUP_OK;
}
pid_max = apr_atoi64(buf);

cg_filename = apr_pstrcat(pool, "/sys/fs/cgroup/", cgroup, "/pids.current", NULL);
if ((rc = apr_file_open(&cg, cg_filename, APR_FOPEN_READ, APR_FPROT_OS_DEFAULT, pool)) != APR_SUCCESS) {
return CGROUP_ERROR;
}
rc = apr_file_read_full(cg, buf, sizeof(buf), NULL);
if (rc != APR_SUCCESS && rc != APR_EOF) {
apr_file_close(cg);
return CGROUP_ERROR;
}
apr_file_close(cg);
pid_cur = apr_atoi64(buf);

ap_log_error(APLOG_MARK, APLOG_DEBUG, errno, r, "Migration to cgroup: %s limit check: procs.cur (%lu) >= procs.max (%lu).", cgroup, pid_cur, pid_max);
if (pid_cur >= pid_max) {
ap_log_error(APLOG_MARK, APLOG_ERR, errno, r, "Migration to cgroup: %s not allowed. procs.cur (%lu) >= procs.max (%lu). Limit reached.", cgroup, pid_cur, pid_max);
return CGROUP_MIGRATION_NOTALLOWED;
}

return CGROUP_OK;
}


static int cgroup_attach_task(apr_pool_t *pool, server_rec *r, char *cgroup) {
apr_file_t *cg;
apr_status_t rc;
const char *cg_filename;
const char *pid;

cg_filename = apr_pstrcat(pool, "/sys/fs/cgroup/", cgroup, "/cgroup.procs", NULL);

#if 0
{
apr_file_t *hf;
const char *hat_filename;
hat_filename = apr_psprintf(pool, "/proc/%d/attr/current", getpid());

if ((rc = apr_file_open(&hf, hat_filename, APR_FOPEN_READ, APR_FPROT_OS_DEFAULT, pool)) == APR_SUCCESS) {
char buf[1024];
apr_file_read_full(hf, buf, sizeof(buf), NULL);
}
apr_file_t *hf;
const char *hat_filename;
hat_filename = apr_psprintf(pool, "/proc/%d/attr/current", getpid());

if ((rc = apr_file_open(&hf, hat_filename, APR_FOPEN_READ, APR_FPROT_OS_DEFAULT, pool)) == APR_SUCCESS) {
char buf[1024];
apr_file_read_full(hf, buf, sizeof(buf), NULL);
}
}
#endif

cg_filename = apr_pstrcat(pool, "/sys/fs/cgroup/", cgroup, "/cgroup.procs", NULL);
if ((rc = apr_file_open(&cg, cg_filename, APR_FOPEN_WRITE, APR_FPROT_OS_DEFAULT, pool)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, errno, r, "Could not migrate to cgroup: %s. Opening cgroup.procs file failed.", cgroup);
return rc;
ap_log_error(APLOG_MARK, APLOG_ERR, errno, r, "Could not migrate to cgroup: %s. Opening cgroup.procs file failed.", cgroup);
return rc;
}

pid = apr_psprintf(pool, "%d", getpid());
if (!pid) {
ap_log_error(APLOG_MARK, APLOG_ERR, errno, r, "Could not migrate to cgroup: %s. pid buffer allocation failed.", cgroup);
return FALSE;
ap_log_error(APLOG_MARK, APLOG_ERR, errno, r, "Could not migrate to cgroup: %s. pid buffer allocation failed.", cgroup);
return FALSE;
}
if ((rc = apr_file_write_full(cg, pid, strlen(pid), NULL)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, errno, r, "Could not migrate to cgroup: %s. Write to cgroup.procs failed.", cgroup);
return rc;
ap_log_error(APLOG_MARK, APLOG_ERR, errno, r, "Could not migrate to cgroup: %s. Write to cgroup.procs failed.", cgroup);
return rc;
}
return apr_file_close(cg);
}
Expand All @@ -94,8 +143,8 @@ static const char* cgroup_relinquish(cmd_parms *cmd, void *mconfig, int arg) {
cgconf->relinquish = ACTIVE_ON;
dirconfig->relinquish = ACTIVE_ON;
if (arg == 0) {
cgconf->relinquish = ACTIVE_OFF;
dirconfig->relinquish = ACTIVE_OFF;
cgconf->relinquish = ACTIVE_OFF;
dirconfig->relinquish = ACTIVE_OFF;
}
return NULL;
}
Expand All @@ -105,21 +154,28 @@ static void cgroup_child_init(apr_pool_t *pool, server_rec *server)
cgroup_config *cgconf = ap_get_module_config(server->module_config, &cgroupmin_module);
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, "Using default cgroup: %s (handler)", cgconf->default_cgroup);
if (cgroup_attach_task(pool, server, cgconf->default_cgroup) == 0)
inside_default_cgroup = 1;
inside_default_cgroup = 1;

}

static int cgroup_enter(request_rec *r)
{
/* We only change cgroup for the main request, not subrequests */
if (r->main)
return OK;
return OK;

if (inside_default_cgroup) {
cgroup_config *cgconf = ap_get_module_config(r->per_dir_config, &cgroupmin_module);
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, "Using cgroup: %s (handler)", cgconf->cgroup);
cgroup_attach_task(r->pool, r->server, cgconf->cgroup);
inside_default_cgroup = 0;
cgroup_config *cgconf = ap_get_module_config(r->per_dir_config, &cgroupmin_module);


// check if cgroup process limit is reached
if (cgroup_procs_not_allowed(r->pool, r->server, cgconf->cgroup) == CGROUP_MIGRATION_NOTALLOWED) {
return HTTP_TOO_MANY_REQUESTS;
}

ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, "Using cgroup: %s (handler)", cgconf->cgroup);
cgroup_attach_task(r->pool, r->server, cgconf->cgroup);
inside_default_cgroup = 0;
}

return OK;
Expand All @@ -130,12 +186,12 @@ static int cgroup_exit(request_rec *r) {
cgroup_config *cgconf = ap_get_module_config(r->per_dir_config, &cgroupmin_module);

if (cgconf->relinquish == ACTIVE_OFF) {
return DECLINED;
return DECLINED;
}

ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, "Moving back to cgroup %s", cgconf->default_cgroup);
if (cgroup_attach_task(r->pool, r->server, cgconf->default_cgroup) == 0)
inside_default_cgroup = 0;
inside_default_cgroup = 0;

return OK;
}
Expand Down Expand Up @@ -194,14 +250,14 @@ static void *cgroup_merge_dir(apr_pool_t *pool, void *parent_conf, void *child_c

static const command_rec cgroup_cmds[] = {
AP_INIT_TAKE1("cgroup",
cgroup_vhost, NULL, RSRC_CONF|ACCESS_CONF,
"The cgroup you want to allocate the vhost to"),
cgroup_vhost, NULL, RSRC_CONF|ACCESS_CONF,
"The cgroup you want to allocate the vhost to"),
AP_INIT_TAKE1("defaultcgroup",
cgroup_default, NULL, RSRC_CONF,
"The default cgroup apache should reside in"),
cgroup_default, NULL, RSRC_CONF,
"The default cgroup apache should reside in"),
AP_INIT_FLAG("relinquishcgroup",
cgroup_relinquish, NULL, RSRC_CONF,
"Whether to switch out of the cgroup once finishing a request. Takes 'on' or 'off'"),
cgroup_relinquish, NULL, RSRC_CONF,
"Whether to switch out of the cgroup once finishing a request. Takes 'on' or 'off'"),
{NULL}
};

Expand All @@ -217,3 +273,5 @@ module AP_MODULE_DECLARE_DATA cgroupmin_module = {
cgroup_register_hooks /* register hooks */
};


// # vim: set ts=4 sw=4 sts=4 et :

0 comments on commit 1e50c3b

Please sign in to comment.