diff --git a/Dockerfile.base b/Dockerfile.base new file mode 100644 index 0000000..857dc03 --- /dev/null +++ b/Dockerfile.base @@ -0,0 +1,20 @@ +FROM python:2-slim-stretch + +RUN apt-get update && apt-get install -y git libmagic1 && \ + useradd -s /bin/false -U fame -m && \ + pip install --no-cache-dir virtualenv && \ + rm -rf /var/lib/apt/lists/* + +COPY . /fame +COPY docker/fame/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh + +WORKDIR /fame +RUN utils/run.sh -m pip install --no-cache-dir -r requirements.txt + +ENV FAME_DOCKER=1 +ENV PYTHONUNBUFFERED=1 + +VOLUME [ "/fame/conf" ] + +ENTRYPOINT [ "docker-entrypoint.sh" ] +CMD [ "/bin/bash" ] \ No newline at end of file diff --git a/Dockerfile.web b/Dockerfile.web new file mode 100644 index 0000000..1074476 --- /dev/null +++ b/Dockerfile.web @@ -0,0 +1,16 @@ +FROM fame-base + +ENV FAME_WORKER=0 + +COPY docker/fame/run.web.sh /fame/run.sh + +RUN apt-get update && apt-get install -y libldap2-dev libsasl2-dev build-essential && \ + utils/run.sh -m pip install --no-cache-dir -r requirements-web.txt uwsgi && \ + apt-get purge -y libldap2-dev libsasl2-dev build-essential && \ + rm -rf /var/lib/apt/lists/* + +EXPOSE 8080 + +VOLUME [ "/fame/fame/modules", "/fame/storage", "/fame/web/static/img/avatars" ] + +CMD [ "/fame/run.sh" ] diff --git a/Dockerfile.worker b/Dockerfile.worker new file mode 100644 index 0000000..ca71ca3 --- /dev/null +++ b/Dockerfile.worker @@ -0,0 +1,14 @@ +FROM fame-base + +ENV FAME_WORKER=1 + +RUN apt-get update -y && apt-get install -y build-essential && \ + rm -rf /var/lib/apt/lists/* && \ + git config --global user.name "FAME Web" && \ + git config --global user.email "fame-web@example.com" + +COPY docker/fame/run.worker.sh /fame/run.sh + +RUN utils/run.sh -m pip install --no-cache-dir -r requirements-worker.txt + +CMD [ "/fame/run.sh" ] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..57c41d3 --- /dev/null +++ b/Makefile @@ -0,0 +1,15 @@ +.DEFAULT_GOAL := all + +mongo: + docker build --pull -t fame-mongo docker/mongo/ + +base: + docker build --pull -t fame-base -f Dockerfile.base . + +web: base + docker build -t fame-web -f Dockerfile.web . + +worker: base + docker build -t fame-worker -f Dockerfile.worker . + +all: mongo base web worker diff --git a/agent/agent.py b/agent/agent.py index d2f6a30..8c1cd7f 100644 --- a/agent/agent.py +++ b/agent/agent.py @@ -115,10 +115,11 @@ def run_each_with_type(self, target, target_type): return self.each_with_type(target, target_type) except IsolatedExceptions.ModuleExecutionError, e: self.log("error", "Could not run on %s: %s" % (target, e)) + self.log("debug", traceback.format_exc()) return False - except: - tb = traceback.format_exc() - self.log("error", "Could not run on %s.\n %s" % (target, tb)) + except Exception: + self.log("error", "Could not run on %s.\n" % (target,)) + self.log("debug", traceback.format_exc()) return False @@ -134,6 +135,7 @@ def fake_module(path, klass): sys.modules[path] = klass + fake_module('fame.core.module', IsolatedModule) fake_module('fame.common.exceptions', IsolatedExceptions) @@ -169,7 +171,7 @@ def set_module(self, name, config): module = import_module('module') for _, obj in inspect.getmembers(module, inspect.isclass): - if obj.name and obj.name == name: + if hasattr(obj, "name") and obj.name == name: self.queue = Queue() self.module = obj() @@ -286,7 +288,8 @@ def get_file(task_id): with open(filepath, 'rb') as fd: response = make_response(fd.read()) - response.headers["Content-Disposition"] = "attachment; filename='{0}'".format(os.path.basename(filepath)) + response.headers["Content-Disposition"] = \ + "attachment; filename={0}".format(os.path.basename(filepath)) return response diff --git a/celeryconfig.py b/celeryconfig.py index 9d11329..c3b8e1c 100755 --- a/celeryconfig.py +++ b/celeryconfig.py @@ -16,11 +16,16 @@ CELERY_ACCEPT_CONTENT = ['json_util'] CELERY_TASK_SERIALIZER = 'json_util' -CELERY_IMPORTS = ('fame.core.analysis', 'fame.core.repository') +CELERY_IMPORTS = ('fame.worker.analysis', 'fame.worker.repository') def connect_to_db(**kwargs): fame_init() + from fame.core.user import User + worker_user = User.get(email="worker@fame") + if worker_user: + fame_config.api_key = worker_user['api_key'] + signals.worker_process_init.connect(connect_to_db) diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..5d12462 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,95 @@ +version: '3.4' + +networks: + fame_internal: + internal: true + + gateway: + + traefik_default: + external: + name: traefik_default + +x-mongo-env: &mongo_env + MONGO_HOST: "fame-mongo" + MONGO_PORT: "27017" + MONGO_DB: "fame" + MONGO_USERNAME: "fame" + MONGO_PASSWORD: "super-secret-password" + +secrets: + ssh_priv_key: + file: ./ssh/id_rsa + +volumes: + fame-share: + +services: + fame-mongo: + image: fame-mongo + environment: + MONGO_INITDB_DATABASE: "fame" + container_name: fame-mongo + restart: unless-stopped + volumes: + - /opt/fame-mongo:/data/db:z + command: --auth + networks: + - "fame_internal" + + fame-web: + image: fame-web + container_name: fame-web + depends_on: + - fame-mongo + environment: + <<: *mongo_env + FAME_INSTALL_COMMUNITY_REPO: "1" + FAME_URL: "http://localhost/" + FAME_ADMIN_FULLNAME: "The Admin" + FAME_ADMIN_EMAIL: "admin@example.com" + FAME_ADMIN_GROUPS: "cert" + FAME_ADMIN_DEFAULT_SHARING: "cert" + FAME_ADMIN_PERMISSIONS: "*" + FAME_ADMIN_PASSWORD: "secret" + FAME_PUBLIC_KEY: "ssh-rsa ..." + FAME_SECRET_KEY: "" + FAME_AUTHENTICATION_TYPE: "user_password" + # LDAP_URI: "ldap://example.com" + # LDAP_USER: "ldap-user" + # LDAP_PASSWORD: "ldap-password" + # LDAP_FILTER_EMAIL: "(&(objectCategory=Person)(sAMAccountName=*)(mail={})" + # LDAP_FILTER_DN: "OU=People,DC=example,DC=com" + volumes: + - /opt/fame-modules:/fame/fame/modules:z + - /opt/fame-storage:/fame/storage:z + - /opt/fame-avatars:/fame/web/static/img/avatars:z + labels: + - "traefik.enable=true" + - "traefik.docker.network=traefik_default" + - "traefik.port=8080" + - "traefik.frontend.rule=Host: fame.example.com" + networks: + - "fame_internal" + - "traefik_default" + restart: unless-stopped + hostname: fame-web + + fame-worker: + image: fame-worker + container_name: fame-worker + depends_on: + - fame-mongo + - fame-web + environment: + <<: *mongo_env + FAME_URL: "http://fame-web:8080/" + DOCKER_HOST: "unix:///var/run/docker.sock" + volumes: + - /var/run/docker.sock:/var/run/docker.sock:z + - fame-share:/fame/docker-storage:z + networks: + - "fame_internal" + - "gateway" + secrets: + - ssh_priv_key diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index 3833301..0000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,45 +0,0 @@ - -FROM debian:stretch - -WORKDIR /opt - -RUN apt-get update && \ - apt-get upgrade -y && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - git \ - python-dev \ - python-pip \ - screen \ - p7zip-full \ - libjpeg-dev \ - zlib1g-dev \ - apt-transport-https \ - ca-certificates \ - curl \ - gnupg2 \ - software-properties-common - -RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \ - --recv 9DA31620334BD75D9DCB49F368818C72E52529D4 -RUN echo "deb http://repo.mongodb.org/apt/debian stretch/mongodb-org/4.0 main" | tee /etc/apt/sources.list.d/mongodb-org-4.0.list - -RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - && \ - add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/debian \ - $(lsb_release -cs) \ - stable" - -RUN apt-get update && apt-get install -y \ - docker-ce \ - docker-ce-cli \ - containerd.io - -RUN apt-get update && apt-get install -y \ - mongodb-org - - -RUN pip install virtualenv && \ - git clone https://github.com/certsocietegenerale/fame - -ENTRYPOINT ["/opt/fame/docker/launch.sh"] -EXPOSE 4200 diff --git a/docker/README.md b/docker/README.md deleted file mode 100644 index a05a531..0000000 --- a/docker/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Docker support - -This is probably the quickest way to spawn a Fame dev instance. - -## Install docker - -Follow the [official instructions](https://www.docker.com/community-edition). - -## Clone the repo - - $ git clone https://github.com/certsocietegenerale/fame/ - $ cd fame/docker - -## Build docker image - - $ docker build -t famedev:latest . - -## Run docker image - -To run container based on famedev image with docker inception (bind docker socket and bind fame temp dir). -So chose a temp directory on your system and use the following command. - - $ docker run -it -v /var/run/docker.sock:/var/run/docker.sock -v :/opt/fame/temp --name famedev -p 4200:4200 famedev:latest diff --git a/docker/fame/docker-entrypoint.sh b/docker/fame/docker-entrypoint.sh new file mode 100755 index 0000000..f0abc11 --- /dev/null +++ b/docker/fame/docker-entrypoint.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +echo "[+] Ensuring empty __init__.py in modules directory" +touch /fame/fame/modules/__init__.py + +echo "[+] Adjusting permissions" +chown fame:fame /fame -R + +exec "$@" \ No newline at end of file diff --git a/docker/fame/run.web.sh b/docker/fame/run.web.sh new file mode 100755 index 0000000..1eb6a8c --- /dev/null +++ b/docker/fame/run.web.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +TIMEOUT=60 + +echo "[+] Waiting $TIMEOUT seconds for MongoDB to come up" +python docker/wait-for.py fame-mongo 27017 $TIMEOUT +if [ "$?" -ne "0" ]; then + echo "[X] Could not connect to MongoDB instance - is it up and running?" + exit 1 +fi + +utils/run.sh utils/install_docker.py + +chown fame:fame /fame -R + +echo "[+] Running webserver" +exec /fame/env/bin/uwsgi -H /fame/env --uid fame --http :8080 -w webserver --callable app \ No newline at end of file diff --git a/docker/fame/run.worker.sh b/docker/fame/run.worker.sh new file mode 100755 index 0000000..9eb79df --- /dev/null +++ b/docker/fame/run.worker.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +echo "[+] Ensuring presence of temp dir" +mkdir -p temp && chown fame:fame temp/ + +if [ -f /run/secrets/ssh_priv_key ]; then + echo "[+] Copying SSH private key" + mkdir -p conf + cp /run/secrets/ssh_priv_key conf/id_rsa + chown fame:fame conf -R + chmod 600 conf/id_rsa +fi + +if [ -e /var/run/docker.sock ]; then + gid="$(stat -c %g /var/run/docker.sock)" + echo "[+] Creating docker_fame group with gid $gid and adding user 'fame' to it" + groupadd -g $gid docker_fame + usermod -aG docker_fame fame +fi + +TIMEOUT=60 + +echo "[+] Waiting $TIMEOUT seconds for MongoDB to come up" +python docker/wait-for.py fame-mongo 27017 $TIMEOUT +if [ "$?" -ne "0" ]; then + echo "[X] Could not connect to MongoDB instance - is it up and running?" + exit 1 +fi + +echo "[+] Waiting $TIMEOUT seconds for web server to come up" +python docker/wait-for.py fame-web 8080 $TIMEOUT +if [ "$?" -ne "0" ]; then + echo "[X] Could not connect to web server instance - is it up and running?" + exit 1 +fi + +exec utils/run.sh worker.py -r 5 -c '--uid fame --gid fame' diff --git a/docker/launch.sh b/docker/launch.sh deleted file mode 100755 index d93b5f0..0000000 --- a/docker/launch.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -cd /opt/fame -service docker start -mongod -f /etc/mongod.conf & -utils/run.sh utils/install.py -screen -dmS "web" bash -c "utils/run.sh webserver.py" -screen -dmS "worker" bash -c "utils/run.sh worker.py" -/bin/bash diff --git a/docker/mongo/Dockerfile b/docker/mongo/Dockerfile new file mode 100644 index 0000000..f3f6377 --- /dev/null +++ b/docker/mongo/Dockerfile @@ -0,0 +1,3 @@ +FROM mongo + +COPY adduser.js /docker-entrypoint-initdb.d/adduser.js \ No newline at end of file diff --git a/docker/mongo/adduser.js b/docker/mongo/adduser.js new file mode 100644 index 0000000..a39b68c --- /dev/null +++ b/docker/mongo/adduser.js @@ -0,0 +1,9 @@ +"use fame"; +db.createUser({ + user: "fame", + pwd: "super-secret-password", + roles: [ + { role: "readWrite", db: "fame" }, + { role: "dbOwner", db: "fame" } + ] +}); diff --git a/docker/wait-for.py b/docker/wait-for.py new file mode 100644 index 0000000..c977a96 --- /dev/null +++ b/docker/wait-for.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python2 +""" +Taken from: http://code.activestate.com/recipes/576655-wait-for-network-service-to-appear/ +""" +import errno +import socket +import sys + + +def wait_net_service(server, port, timeout=None): + """ Wait for network service to appear + @param timeout: in seconds, if None or 0 wait forever + @return: True of False, if timeout is None may return only True or + throw unhandled network exception + """ + s = socket.socket() + if timeout: + from time import time as now + # time module is needed to calc timeout shared between two exceptions + end = now() + timeout + + while True: + try: + if timeout: + next_timeout = end - now() + if next_timeout < 0: + return False + else: + s.settimeout(next_timeout) + + s.connect((server, port)) + + except socket.timeout: + # this exception occurs only if timeout is set + if timeout: + return False + + except socket.error: + # just ignore anything else until we run into timeout + pass + else: + s.close() + return True + + +if __name__ == "__main__": + if len(sys.argv) not in [3, 4]: + print "Usage: %s []" % (sys.argv[0]) + sys.exit(1) + + timeout = 60 + if len(sys.argv) == 4: + timeout = int(sys.argv[3]) + + if wait_net_service(sys.argv[1], int(sys.argv[2]), timeout): + sys.exit(0) + else: + sys.exit(1) diff --git a/docs/concept.rst b/docs/concept.rst index eee2c7f..8068a71 100644 --- a/docs/concept.rst +++ b/docs/concept.rst @@ -80,7 +80,7 @@ FAME relies on three components: .. image:: /images/concept-architecture.png -Components can all be on the same server, or split across multiple servers. +Components can all be on the same server, or split across multiple servers. A dockerized version of FAME is also available. The web server is where antivirus modules and threat intelligence modules are executed. diff --git a/docs/installation.rst b/docs/installation.rst index c045b13..be4dfcd 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -3,7 +3,68 @@ Installation ************ .. note:: - This page documents how to install FAME on Ubuntu 16.04. FAME being written in Python, you can install it on the system of your choice. + FAME provides Dockerfiles and a preconfigured ``docker-compose.yml`` for deploying FAME via Docker. + +.. _docker: + +====== +Docker +====== + +This part of the page presents information on how to run FAME via Docker. + +Preliminaries +============= + +First, you need to have a running docker environment on your machine (preferably including ``docker-compose``). The Docker community provides information on how to install Docker for different operating systems: https://docs.docker.com/install/ + +.. note:: + The docker installation method was only tested on Ubuntu 18.04 LTS. By the nature of Docker it should work on all supported platforms but **any other platform than Ubuntu 18.04 LTS is untested**. + +Once you have Docker running, spinning up a working FAME environment is as simple as ``docker-compose up -d``. FAME will listen internally on ``http://fame-web:8080`` so you a required to have a properly configured web server accessible within the FAME stack to be able to access FAME from within your local network. + +Configuration +============= + +The Docker container can be configured by environment variables only. All available environment variables are described below. + +:FAME_INSTALL_COMMUNITY_REPO: (web only) Defines whether or not to install the community module repository when spawning a fresh instance. +:FAME_URL: (worker/web) On the *worker*, this defines the (internal) base URL of the fame-web container (e.g. ``http://fame-web:8080/``). For the *web* container, this defines at which URL the web interface will be available to the users (e.g. ``http://fame.example.com/``) +:FAME_ADMIN_FULLNAME: (web only) The full name of the admin user (e.g. ``FAME Admin``). +:FAME_ADMIN_EMAIL: (web only) The email address of the admin user (e.g. ``admin@fame.example.com``). **Note**: this must be a valid email address. Otherwise logging in through the web interface will not be possible. +:FAME_ADMIN_GROUPS: (web only) The default list of groups for the admin user (e.g. ``cert``). +:FAME_ADMIN_DEFAULT_SHARING: (web only) +:FAME_ADMIN_PERMISSIONS: (web only) +:FAME_ADMIN_PASSWORD: (web only) The password of the admin user account. +:FAME_PUBLIC_KEY: (web only) The SSH *public* which is shown to the admins when a private repository is to be cloned (e.g. ``ssh-rsa [..] FAME deploy key``). +:FAME_SECRET_KEY: (web only) The Flask secret to use for session encryption. Should be generated randomly (e.g. via ``cat /dev/urandom | head -c 32 | xxd -p | tr -d '\n'``). +:DOCKER_HOST: (worker only) The address of the docker daemon (e.g. ``unix:///var/run/docker.sock``). Please refer to the official Docker documentation of this variable for all allowed values. +:MONGO_HOST: (worker/web) The (internal) hostname of the MongoDB instance that powers FAME. +:MONGO_PORT: (worker/web) The port of the MongoDB instance that powers FAME. +:MONGO_DB: (worker/web) The database name which FAME should use. +:MONGO_USERNAME: The username of the FAME MongoDB user (**note**: this value must match the user defined in ``docker/mongo/adduser.js``). +:MONGO_PASSWORD: The password of the FAME MongoDB user (**note**: this value must match the password defined in ``docker/mongo/adduser.js``). + +Serving FAME in Docker +====================== + +We recommend using Traefik (https://traefik.io) for serving the FAME web interface. The provided ``docker-compose.yml`` file includes all necessary information for Traefik to serve FAME properly (depending on your configuration of Traefik this also includes serving FAME via TLS). + +Docker networking +================= + +The default configuration creates an internal network for all FAME containers. If you use your own Docker network stack, it is strongly recommended to put all FAME containers into the same dedicated Docker network to achieve container isolation. + +.. note:: + The worker containers need to have a working internet connection to be able to install module requirements. The web interface is not required to have a working internet connection in general. Only if you like fancy profile avatars or want to use antivirus modules a working internet connection is required. Threat intel modules only need to be able to connect to their target instance and thus an internet/network connection is only required if the target instance is not available within the FAME network. + + +======================= +Bare-Metal Installation +======================= + +.. note:: + This part of the page documents how to install FAME on Ubuntu 16.04. FAME being written in Python, you can install it on the system of your choice. Dependencies ============ diff --git a/docs/modules.rst b/docs/modules.rst index 4c5a429..dc6f21d 100644 --- a/docs/modules.rst +++ b/docs/modules.rst @@ -29,7 +29,8 @@ The best practice is to do the following: * Create a directory for your module inside one of the repositories. * Make sure your directory is a valid Python package (do not use spaces, make sure every directory in the path as a ``__init__.py`` file). * Create a python file inside your directory for your module. -* Inside your python file, create a class that inherits from :class:`fame.core.module.ProcessingModule`, :class:`fame.core.module.PreloadingModule`, :class:`fame.core.module.ReportingModule`, :class:`fame.core.module.ThreatIntelligenceModule` or :class:`fame.core.module.AntivirusModule`. +* Inside your python file, create a class that inherits from :class:`fame.core.module.PreloadingModule`, :class:`fame.core.module.ProcessingModule`, :class:`fame.core.module.ReportingModule`, :class:`fame.core.module.ThreatIntelligenceModule` or :class:`fame.core.module.AntivirusModule`. + Writing a Processing module =========================== diff --git a/fame/common/config.py b/fame/common/config.py index 38bde1d..cebeb91 100755 --- a/fame/common/config.py +++ b/fame/common/config.py @@ -37,9 +37,40 @@ def get(self, key): except AttributeError: return None + @classmethod + def from_env(cls): + config = Dictionary() + config['mongo_host'] = os.getenv("MONGO_HOST", "localhost") + config['mongo_port'] = int(os.getenv("MONGO_PORT", "27017")) + config['mongo_db'] = os.getenv("MONGO_DB", "fame") + config['mongo_user'] = os.getenv("MONGO_USERNAME", "") + config['mongo_password'] = os.getenv("MONGO_PASSWORD", "") + + config['ldap_uri'] = os.getenv("LDAP_URI", "") + config['ldap_user'] = os.getenv("LDAP_USER", "") + config['ldap_password'] = os.getenv("LDAP_PASSWORD", "") + config['ldap_filter_email'] = os.getenv("LDAP_FILTER_EMAIL", "") + config['ldap_filter_dn'] = os.getenv("LDAP_FILTER_DN", "") + + config['auth'] = os.getenv("FAME_AUTHENTICATION_TYPE", "user_password") + + config['fame_url'] = os.getenv("FAME_URL", "http://localhost/") + config['is_worker'] = os.getenv("FAME_WORKER", "0") + + config['storage_path'] = os.getenv("FAME_STORAGE_PATH", "{root:s}/storage").format(root=FAME_ROOT) + config['temp_path'] = os.getenv("FAME_TEMP_PATH", "{root:s}/temp").format(root=FAME_ROOT) + config['secret_key'] = os.getenv("FAME_SECRET_KEY", "") + + return config + def get_fame_config(): - fame_config = ConfigObject(filename="fame").get('fame') + docker = (os.getenv("FAME_DOCKER") == "1") + if docker: + fame_config = ConfigObject.from_env() + else: + fame_config = ConfigObject(filename="fame").get('fame') + if fame_config is None: fame_config = Dictionary() fame_config['mongo_host'] = 'localhost' @@ -49,4 +80,5 @@ def get_fame_config(): return fame_config + fame_config = get_fame_config() diff --git a/fame/common/mongo_dict.py b/fame/common/mongo_dict.py index d8c774a..df089c0 100755 --- a/fame/common/mongo_dict.py +++ b/fame/common/mongo_dict.py @@ -58,9 +58,12 @@ def update_value(self, names, value): return self._update({'$set': {mongo_field: value}}) - def append_to(self, names, value): + def append_to(self, names, value, set_=True): self._local_field(names).append(value) - return self._update({'$addToSet': {self._mongo_field(names): value}}) + if set_: + return self._update({'$addToSet': {self._mongo_field(names): value}}) + else: + return self._update({'$push': {self._mongo_field(names): value}}) def remove_from(self, names, value): local_array = self._local_field(names) diff --git a/fame/common/utils.py b/fame/common/utils.py index 12c5372..a249743 100755 --- a/fame/common/utils.py +++ b/fame/common/utils.py @@ -1,9 +1,7 @@ import os -import requests import collections from time import sleep from uuid import uuid4 -from urlparse import urljoin from datetime import datetime from shutil import copyfileobj from werkzeug.utils import secure_filename @@ -65,29 +63,19 @@ def ordered_list_value(list_of_values): return result -def send_file_to_remote(file, url): - if isinstance(file, basestring): - file = open(file, 'rb') - - url = urljoin(fame_config.remote, url) - response = requests.post(url, files={'file': file}, headers={'X-API-KEY': fame_config.api_key}) - response.raise_for_status() - - file.close() - - return response - - def unique_for_key(l, key): return {d[key]: d for d in l}.values() -def tempdir(): - tempdir = os.path.join(fame_config.temp_path, str(uuid4()).replace('-', '')) +def tempdir(prefix=None): + if not prefix: + prefix = fame_config.temp_path + + tempdir = os.path.join(prefix, str(uuid4()).replace('-', '')) try: os.makedirs(tempdir) - except: + except OSError: pass return tempdir @@ -95,7 +83,8 @@ def tempdir(): def save_response(response): tmp = tempdir() - filename = secure_filename(parse_requests_response(response).filename_unsafe) + filename = secure_filename( + parse_requests_response(response).filename_unsafe) filepath = os.path.join(tmp, filename) with open(filepath, 'wb') as out: diff --git a/fame/core/analysis.py b/fame/core/analysis.py index d7de3ca..a4f2951 100755 --- a/fame/core/analysis.py +++ b/fame/core/analysis.py @@ -1,13 +1,10 @@ import os -import requests import datetime import traceback from shutil import copy -from hashlib import md5 -from urlparse import urljoin from fame.common.config import fame_config -from fame.common.utils import iterify, u, send_file_to_remote +from fame.common.utils import iterify, u from fame.common.mongo_dict import MongoDict from fame.core.store import store from fame.core.celeryctl import celery @@ -15,14 +12,6 @@ from fame.core.config import Config -# Celery task to retrieve analysis object and run specific module on it -@celery.task -def run_module(analysis_id, module): - dispatcher.reload() - analysis = Analysis(store.analysis.find_one({'_id': analysis_id})) - analysis.run(module) - - class Analysis(MongoDict): STATUS_ERROR = 'error' STATUS_PENDING = 'pending' @@ -81,6 +70,10 @@ def __init__(self, values): self._automatic() self.resume() + # can/will be overridden by the worker implementation + def _get_generated_file_path(self, location): + return location + def magic_enabled(self): return ('magic_enabled' not in self['options']) or (self['options']['magic_enabled']) @@ -90,19 +83,18 @@ def add_generated_files(self, file_type, locations): self['generated_files'][file_type] = [] for location in iterify(locations): - if fame_config.remote: - response = send_file_to_remote(location, '/analyses/{}/generated_file'.format(self['_id'])) - filepath = response.json()['path'] - else: - filepath = location - - self.log('debug', u"Adding generated file '{0}' of type '{1}'".format(filepath, file_type)) - self.append_to(['generated_files', file_type], filepath) + location = self._get_generated_file_path(location) + self.log('debug', u"Adding generated file '{0}' of type '{1}'".format(location, file_type)) + self.append_to(['generated_files', file_type], location) # Then, trigger registered modules if magic is enabled if self.magic_enabled(): self.queue_modules(dispatcher.triggered_by("_generated_file(%s)" % file_type)) + # can/will be overridden by the worker implementation + def _get_file_from_filepath(self, filepath, fd): + return File(filename=os.path.basename(filepath), stream=fd) + def add_extracted_file(self, filepath, automatic_analysis=True): self.log('debug', u"Adding extracted file '{}'".format(filepath)) @@ -111,11 +103,7 @@ def add_extracted_file(self, filepath, automatic_analysis=True): f = File(filename=filename, stream=fd, create=False) if not f.existing: - if fame_config.remote: - response = send_file_to_remote(filepath, '/files/') - f = File(response.json()['file']) - else: - f = File(filename=os.path.basename(filepath), stream=fd) + f = self._get_file_from_filepath(filepath, fd) # Automatically analyze extracted file if magic is enabled and module did not disable it if self.magic_enabled() and automatic_analysis: @@ -140,23 +128,25 @@ def change_type(self, filepath, new_type): else: self.log('warning', u"Tried to change type of generated file '{}'".format(filepath)) - def add_support_file(self, module_name, name, filepath): - self.log('debug', "Adding support file '{}' at '{}'".format(name, filepath)) + # can/will be overridden by the worker implementation + def _store_support_file(self, filepath, module_name): + dirpath = os.path.join(fame_config.storage_path, 'support_files', module_name, str(self['_id'])) + dstfilepath = os.path.join(dirpath, os.path.basename(filepath)) - if fame_config.remote: - response = send_file_to_remote(filepath, '/analyses/{}/support_file/{}'.format(self['_id'], module_name)) - dstfilepath = response.json()['path'] - else: - dirpath = os.path.join(fame_config.storage_path, 'support_files', module_name, str(self['_id'])) - dstfilepath = os.path.join(dirpath, os.path.basename(filepath)) + # Create parent dirs if they don't exist + try: + os.makedirs(dirpath) + except OSError: + pass - # Create parent dirs if they don't exist - try: - os.makedirs(dirpath) - except: - pass + copy(filepath, dstfilepath) - copy(filepath, dstfilepath) + return dstfilepath + + def add_support_file(self, module_name, name, filepath): + self.log('debug', "Adding support file '{}' at '{}'".format(name, filepath)) + + dstfilepath = self._store_support_file(filepath, module_name) if module_name not in self['support_files']: self['support_files'][module_name] = [] @@ -298,8 +288,10 @@ def _cancel_module(self, module): # Queue execution of specific module(s) def queue_modules(self, modules, fallback_waiting=True): for module_name in iterify(modules): - self.log("debug", "Trying to queue module '{0}'".format(module_name)) - if module_name not in self['executed_modules'] and module_name not in self['pending_modules']: + self.log( + "debug", "Trying to queue module '{0}'".format(module_name)) + if (module_name not in self['executed_modules'] and + module_name not in self['pending_modules']): module = dispatcher.get_module(module_name) if module is None: @@ -307,52 +299,12 @@ def queue_modules(self, modules, fallback_waiting=True): else: if self._can_execute_module(module): if self.append_to('pending_modules', module_name): - run_module.apply_async((self['_id'], module_name), queue=module.info['queue']) + celery.send_task('run_module', + args=(self['_id'], module_name), + queue=module.info['queue']) elif fallback_waiting: self.append_to('waiting_modules', module_name) - # Run specific module, should only be executed on celery worker - def run(self, module_name): - self.log('debug', "Trying to run {0}".format(module_name)) - print "Trying to run {0}".format(module_name) - - # This test prevents multiple execution of the same module - if self.append_to('executed_modules', module_name): - module = dispatcher.get_module(module_name) - - if module is None: - self._error_with_module(module_name, "module has been removed or disabled.") - else: - try: - module.initialize() - - if module.info['type'] == "Preloading": - self.update_value('status', self.STATUS_PRELOADING) - else: - self.update_value('status', self.STATUS_RUNNING) - - if module.execute(self): - # Save results, if any - if module.results is not None: - self.update_value(['results', module_name], module.results) - - # Save tags, and queue triggered modules - for tag in module.tags: - tag_string = "%s(%s)" % (module_name, tag) - self.add_tag(tag_string) - - self.add_tag(module_name) - - self.log('debug', "Done with {0}".format(module_name)) - except Exception: - tb = traceback.format_exc() - self._error_with_module(module_name, tb) - - self.remove_from('pending_modules', module_name) - self.remove_from('waiting_modules', module_name) - - self.resume() - def add_tag(self, tag): self.append_to('tags', tag) @@ -361,34 +313,12 @@ def add_tag(self, tag): self.queue_modules(dispatcher.triggered_by(tag)) def log(self, level, message): - message = "%s: %s: %s" % (datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), level, message) - self.append_to('logs', message) + message = "%s: %s: %s" % ( + datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), level, message) + self.append_to('logs', message, set_=False) - # This will give the correct and locally valid filepath of given file - # When on a remote worker, the file needs to be retrieved first def filepath(self, path): - if fame_config.remote: - pathhash = md5(path.encode('utf-8')).hexdigest() - local_path = os.path.join(fame_config.storage_path, pathhash) - if not os.path.isfile(local_path): - # Make sure fame_config.storage_path exists - try: - os.makedirs(fame_config.storage_path) - except Exception: - pass - - url = urljoin(fame_config.remote, '/analyses/{}/get_file/{}'.format(self['_id'], pathhash)) - response = requests.get(url, stream=True, headers={'X-API-KEY': fame_config.api_key}) - response.raise_for_status() - f = open(local_path, 'ab') - for chunk in response.iter_content(1024): - f.write(chunk) - - f.close() - - return local_path - else: - return path + return path def get_main_file(self): filepath = self._file['filepath'] @@ -425,8 +355,7 @@ def _lookup_ioc(self, ioc): ti_tags += tags ti_indicators += indicators except Exception, e: - import traceback - traceback.print_exc() + self.log('debug', traceback.format_exc()) self.log('error', "error in threat intelligence module '{}': {}".format(module.name, e)) return ti_tags, ti_indicators @@ -492,4 +421,4 @@ def _error_with_module(self, module, message): # For cyclic imports -from fame.core.file import File +from fame.core.file import File # noqa diff --git a/fame/core/config.py b/fame/core/config.py index 1a8a897..9989632 100755 --- a/fame/core/config.py +++ b/fame/core/config.py @@ -5,7 +5,7 @@ from fame.common.mongo_dict import MongoDict -def config_to_dict(config): +def _config_to_dict(config): result = {} for setting in config: @@ -17,7 +17,7 @@ def config_to_dict(config): # We will keep configured values, only if they have the same name and type def apply_config_update(config, config_update): new_config = [] - config = config_to_dict(config) + config = _config_to_dict(config) for setting in config_update: new_setting = copy(setting) diff --git a/fame/core/file.py b/fame/core/file.py index 691d4c9..7df72cc 100755 --- a/fame/core/file.py +++ b/fame/core/file.py @@ -10,16 +10,6 @@ from fame.core.module_dispatcher import dispatcher from fame.core.config import Config -from fame.common.email_utils import EmailServer - -notification_body_tpl = u"""Hi, - -{0} has written the following comment on analysis {1}: - -\t{2} - -Best regards""" - def _hash_by_length(hash): _map = { @@ -108,7 +98,7 @@ def _init_with_file(self, filename, stream, create): self._compute_default_properties() self.save() - def add_comment(self, analyst_id, comment, analysis_id=None, probable_name=None, notify=None): + def add_comment(self, analyst_id, comment, analysis_id=None, probable_name=None): if probable_name: self.add_probable_name(probable_name) @@ -119,33 +109,6 @@ def add_comment(self, analyst_id, comment, analysis_id=None, probable_name=None, 'probable_name': probable_name, 'date': datetime.datetime.now() }) - if notify is not None and analysis_id is not None: - self.notify_new_comment(analysis_id, analyst_id, comment) - - def notify_new_comment(self, analysis_id, commentator_id, comment): - commentator = store.users.find_one({'_id': commentator_id}) - analysis = store.analysis.find_one({'_id': ObjectId(analysis_id)}) - analyst_id = analysis['analyst'] - recipients = set() - # First let's add submiter analyst and check if he is not commentator - if commentator_id != analyst_id: - analyst = store.users.find_one({'_id': analysis['analyst']}) - recipients.add(analyst['email']) - # iter on commentators and add them as recipient - for comment in self['comments']: - if comment['analyst'] not in [analyst_id, commentator_id]: - recipient = store.users.find_one({'_id': comment['analyst']}) - recipients.add(recipient['email']) - if len(recipients): - config = Config.get(name="email").get_values() - analysis_url = "{0}/analyses/{1}".format(fame_config.fame_url, analysis_id) - body = notification_body_tpl.format(commentator['name'], - analysis_url, - comment['comment']) - email_server = EmailServer() - if email_server.is_connected: - msg = email_server.new_message("[FAME] New comment on analysis", body) - msg.send(list(recipients)) def add_probable_name(self, probable_name): for name in self['probable_names']: @@ -227,6 +190,8 @@ def _compute_hashes(self, stream): # Compute default properties # For now, just 'name' and 'type' def _compute_default_properties(self, hash_only=False): + self['analysis'] = [] + if not hash_only: self['names'] = [os.path.basename(self['filepath'])] self['detailed_type'] = magic.from_file(self['filepath']) @@ -298,7 +263,7 @@ def _store_file(self, filename, stream): # Create parent dirs if they don't exist try: os.makedirs(os.path.join(fame_config.storage_path, self['sha256'])) - except: + except OSError: pass # Save file contents diff --git a/fame/core/module.py b/fame/core/module.py index 5d8489b..70e6894 100755 --- a/fame/core/module.py +++ b/fame/core/module.py @@ -2,14 +2,21 @@ import inspect import requests import traceback + +from copy import copy from time import sleep from urlparse import urljoin -from markdown2 import markdown from datetime import datetime, timedelta from fame.common.constants import MODULES_ROOT -from fame.common.exceptions import ModuleInitializationError, ModuleExecutionError, MissingConfiguration -from fame.common.utils import iterify, is_iterable, list_value, save_response, ordered_list_value +from fame.common.utils import ( + iterify, is_iterable, list_value, save_response, + ordered_list_value +) +from fame.common.exceptions import ( + ModuleInitializationError, ModuleExecutionError, + MissingConfiguration +) from fame.common.mongo_dict import MongoDict from fame.core.config import Config, apply_config_update, incomplete_config from fame.core.internals import Internals @@ -39,15 +46,6 @@ def get_file(self, filename): return None - def get_readme(self): - readme = self.get_file('README.md') - - if readme: - with open(readme, 'r') as f: - readme = markdown(f.read(), extras=["code-friendly"]) - - return readme - def details_template(self): return '/'.join(self['path'].split('.')[2:-1]) + '/details.html' @@ -66,7 +64,8 @@ def update_config(self, new_info): self._update_diffed_value('acts_on', new_info['acts_on']) self['description'] = new_info['description'] - self['config'] = apply_config_update(self['config'], new_info['config']) + self['config'] = apply_config_update( + self['config'], new_info['config']) if self['enabled'] and incomplete_config(self['config']): self['enabled'] = False @@ -497,11 +496,12 @@ def _try_each(self, target, file_type): return self.each_with_type(target, file_type) except ModuleExecutionError, e: + self.log("debug", traceback.format_exc()) self.log("error", "Could not run on %s: %s" % (target, e)) return False - except: - tb = traceback.format_exc() - self.log("error", "Could not run on %s.\n %s" % (target, tb)) + except Exception, e: + self.log("debug", traceback.format_exc()) + self.log("error", "Could not run on %s.\n %s" % (target, e)) return False @classmethod @@ -1076,7 +1076,7 @@ def is_ready(self): r = requests.get(self.agent_url, timeout=1) return r.status_code == 200 - except: + except Exception: return False def restore(self, should_raise=True): diff --git a/fame/core/module_dispatcher.py b/fame/core/module_dispatcher.py index 5f17597..a1c985e 100755 --- a/fame/core/module_dispatcher.py +++ b/fame/core/module_dispatcher.py @@ -43,7 +43,8 @@ def reload(self): ('configs', 'Grants access to the "Configs" section that tracks malware configurations.'), ('submit_iocs', 'Allows user to send observables to Threat Intelligence modules.'), ('add_probable_name', "Allows user to set an object's probable name"), - ('see_logs', 'Allows user to access the log section of anlyses. This could reveal information on the underlying system.') + ('see_logs', 'Allows user to access the log section of anlyses. This could reveal information on the underlying system.'), + ('system', 'Allows user to access system view.') ]) self.load_all_modules() @@ -252,7 +253,7 @@ def _add_module_triggers(self, module): def _add_transforms(self, module): if not module['acts_on']: - for generated_type in iterify(module.generates): + for generated_type in iterify(module['generates']): if generated_type not in self._direct_transforms: self._direct_transforms[generated_type] = [] self._direct_transforms[generated_type].append(module['name']) @@ -388,7 +389,7 @@ def _shortest_path_to_module(self, types_available, target_module, excluded_modu for destination_type in iterify(target_module.info['acts_on']): module, length = self._shortest_path_to_type(types_available, destination_type, excluded_modules + [target_module.info['name']]) - if path_length is None or length < path_length: + if path_length is None or (length is not None and length < path_length): path_length = length next_module = module diff --git a/fame/core/repository.py b/fame/core/repository.py index b79b5ba..1ab2d36 100755 --- a/fame/core/repository.py +++ b/fame/core/repository.py @@ -1,47 +1,26 @@ import os -from time import time + from shutil import rmtree -from git import Repo from fame.common.mongo_dict import MongoDict from fame.common.constants import FAME_ROOT from fame.core.celeryctl import celery from fame.core.module import ModuleInfo -from fame.core.internals import Internals -from fame.core.module_dispatcher import dispatcher - - -# Celery task to retrieve analysis object and run specific module on it -@celery.task(soft_time_limit=60) -def clone_repository(repository_id): - repository = Repository.get(_id=repository_id) - repository.do_clone() - - -@celery.task(soft_time_limit=60) -def pull_repository(repository_id): - repository = Repository.get(_id=repository_id) - repository.do_pull() class Repository(MongoDict): collection_name = 'repositories' - def __init__(self, values={}): - keyfile = os.path.join(FAME_ROOT, "conf", "id_rsa") - self['ssh_cmd'] = "ssh -o StrictHostKeyChecking=no -i {}".format(keyfile) - MongoDict.__init__(self, values) - def delete(self): # First, remove modules from database for module in ModuleInfo.find(): - if module['path'].startswith('fame.modules.{}.'.format(self['name'])): + if module['path'].startswith('fame.modules.{}.'.format(self['name'])): # noqa module.delete() # Then, delete the files try: rmtree(self.path()) - except: + except OSError: pass # Finally, delete record of repository @@ -50,56 +29,7 @@ def delete(self): def path(self): return os.path.join(FAME_ROOT, 'fame', 'modules', self['name']) - def clone(self): - clone_repository.apply_async((self['_id'],), queue='updates') - - def do_clone(self): - print "[+] Cloning '{}'".format(self['name']) - try: - if self['private']: - Repo.clone_from(self['address'], self.path(), env=dict(GIT_SSH_COMMAND=self['ssh_cmd'])) - else: - Repo.clone_from(self['address'], self.path()) - - dispatcher.update_modules(self) - self.update_value('status', 'active') - except Exception, e: - self['status'] = 'error' - self['error_msg'] = 'Could not clone repository, probably due to authentication issues.\n{}'.format(e) - self.save() - - internals = Internals.get(name="updates") - internals.update_value("last_update", time()) - - def pull(self): - self.update_value('status', 'updating') - pull_repository.apply_async((self['_id'],), queue='updates') - - def do_pull(self): - print "[+] Pulling '{}'".format(self['name']) - try: - repo = Repo(self.path()) - - if self['private']: - with repo.git.custom_environment(GIT_SSH_COMMAND=self['ssh_cmd']): - repo.remotes.origin.pull() - else: - repo.remotes.origin.pull() - - # Make sure we delete orphan .pyc files - for root, dirs, files in os.walk(self.path()): - for f in files: - f = os.path.join(root, f) - if f.endswith(".pyc") and not os.path.exists(f[:-1]): - print "Deleting orphan file '{}'".format(f) - os.remove(f) - - dispatcher.update_modules(self) - self.update_value('status', 'active') - except Exception, e: - self['status'] = 'error' - self['error_msg'] = 'Could not update repository.\n{}'.format(e) - self.save() - - updates = Internals.get(name="updates") - updates.update_value("last_update", time()) + def update_files(self): + celery.send_task('refresh_repository', + args=(self['_id'],), + queue='updates') diff --git a/fame/core/store.py b/fame/core/store.py index df394a3..9ebb6ed 100755 --- a/fame/core/store.py +++ b/fame/core/store.py @@ -1,7 +1,6 @@ from urllib import quote_plus from pymongo import MongoClient, TEXT - from fame.common.config import fame_config @@ -16,7 +15,7 @@ def init(self): self.db = self._con[fame_config.mongo_db] if fame_config.mongo_user and fame_config.mongo_password: self.db.authenticate(fame_config.mongo_user, quote_plus(fame_config.mongo_password), mechanism='SCRAM-SHA-1') - + # Collections self.files = self.db.files self.analysis = self.db.analysis @@ -46,4 +45,5 @@ def connect(self): def collection(self, name): return self.db[name] + store = Store() diff --git a/fame/core/user.py b/fame/core/user.py index 178a04d..d2c2902 100755 --- a/fame/core/user.py +++ b/fame/core/user.py @@ -61,7 +61,7 @@ def generate_avatar(self): response.raise_for_status() with open(os.path.join(AVATARS_ROOT, "{}.png".format(self['_id'])), 'w') as f: f.write(response.content) - except: + except Exception: print "Could not generate avatar for {}".format(self['email']) @staticmethod diff --git a/fame/worker/__init__.py b/fame/worker/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/fame/worker/analysis.py b/fame/worker/analysis.py new file mode 100755 index 0000000..93b6147 --- /dev/null +++ b/fame/worker/analysis.py @@ -0,0 +1,140 @@ +import os +import requests +import traceback + +from bson import json_util +from hashlib import md5 +from urlparse import urljoin + +from fame.common.config import fame_config +from fame.core.analysis import Analysis as CoreAnalysis +from fame.core.celeryctl import celery +from fame.core.file import File +from fame.core.module_dispatcher import dispatcher +from fame.core.store import store +from fame.worker.utils import send_file_to_remote + + +# Celery task to retrieve analysis object and run specific module on it +@celery.task(name="run_module") +def run_module(analysis_id, module): + dispatcher.reload() + analysis = Analysis(store.analysis.find_one({'_id': analysis_id})) + analysis.run(module) + + +class Analysis(CoreAnalysis): + def __init__(self, values): + super(Analysis, self).__init__(values) + + def _get_generated_file_path(self, location): + response = send_file_to_remote( + location, '/analyses/{}/generated_file'.format(self['_id'])) + return response.json()['path'] + + def _get_file_from_filepath(self, filepath, fd=None): + response = send_file_to_remote(filepath, '/files/') + return File(json_util.loads(response.text)['file']) + + def _store_preloaded_file(self, filepath=None, fd=None): + if not filepath and not fd: + raise ValueError( + "Please provide either the path to the file or a file-like " + "object containing the data.") + + if filepath and fd: + self.log("debug", + "Please provide either the path to the file or a " + "file-like object containing the data, not both. " + "Chosing the file-like object for now.") + response = send_file_to_remote(fd, '/files/') + else: + response = send_file_to_remote(filepath, '/files/') + + return File(json_util.loads(response.text)['file']) + + def _store_support_file(self, filepath, module_name): + response = send_file_to_remote( + filepath, '/analyses/{}/support_file/{}'.format( + self['_id'], module_name)) + return response.json()['path'] + + # Run specific module, should only be executed on celery worker + def run(self, module_name): + self.log('debug', "Trying to run {0}".format(module_name)) + print "Trying to run {0}".format(module_name) + + # This test prevents multiple execution of the same module + if self.append_to('executed_modules', module_name): + module = dispatcher.get_module(module_name) + + if module is None: + self._error_with_module( + module_name, "module has been removed or disabled.") + else: + try: + module.initialize() + + if module.info['type'] == "Preloading": + self.update_value('status', self.STATUS_PRELOADING) + else: + self.update_value('status', self.STATUS_RUNNING) + + if module.execute(self): + # Save results, if any + if module.results is not None: + self.update_value(['results', module_name], + module.results) + + # Save tags, and queue triggered modules + for tag in module.tags: + tag_string = "%s(%s)" % (module_name, tag) + self.add_tag(tag_string) + + self.add_tag(module_name) + + self.log('debug', "Done with {0}".format(module_name)) + except Exception: + tb = traceback.format_exc() + self._error_with_module(module_name, tb) + + self.remove_from('pending_modules', module_name) + self.remove_from('waiting_modules', module_name) + + self.resume() + + # This will give the correct and locally valid filepath of given file + # When on a remote worker, the file needs to be retrieved first + # Thus we overload the function in this subclass to have that transparent + def filepath(self, path): + pathhash = md5(path.encode('utf-8')).hexdigest() + # Some modules require proper filenames, so don't join with just pathhash + local_path = os.path.join(fame_config.storage_path, pathhash, os.path.basename(path)) + if not os.path.isfile(local_path): + # Make sure local_path exists + try: + os.makedirs(os.path.dirname(local_path)) + except OSError: + pass + + url = urljoin( + fame_config.fame_url, '/analyses/{}/get_file/{}'.format( + self['_id'], pathhash)) + + response = requests.get(url, stream=True, + headers={'X-API-KEY': fame_config.api_key}) + response.raise_for_status() + + f = open(local_path, 'wb') + for chunk in response.iter_content(1024): + f.write(chunk) + + f.close() + + return local_path + + def get_main_file(self): + filepath = self._file['filepath'] + if self._file['type'] == "hash": + return filepath + return self.filepath(filepath) diff --git a/fame/worker/repository.py b/fame/worker/repository.py new file mode 100755 index 0000000..ba64619 --- /dev/null +++ b/fame/worker/repository.py @@ -0,0 +1,163 @@ +import os +import requests + +from time import time +from git import Repo +from tempfile import mkdtemp, TemporaryFile +from urlparse import urljoin +from zipfile import ZipFile +from io import BytesIO + +from fame.common.config import fame_config +from fame.common.constants import FAME_ROOT +from fame.core.celeryctl import celery +from fame.core.internals import Internals +from fame.core.repository import Repository as CoreRepository + + +@celery.task(soft_time_limit=60, name="refresh_repository") +def refresh_repository(repository_id): + repository = Repository.get(_id=repository_id) + + # load current module blob from remote + url = urljoin(fame_config.fame_url, '/modules/download') + try: + print "[+] Get current modules" + response = requests.get( + url, stream=True, headers={'X-API-KEY': fame_config.api_key}) + response.raise_for_status() + + print "[+] Extract modules" + module_tempdir = mkdtemp() + with ZipFile(BytesIO(response.content), 'r') as zipf: + zipf.extractall(module_tempdir) + + print "[+] Constructing git repository path" + repo_path = os.path.join(module_tempdir, repository['name']) + + success = False + + if os.path.exists(repo_path): + # path exists + if os.path.isdir(repo_path): + if len(os.listdir(repo_path)) == 0: + # directory empty + print "[+] Cloning into existing directory" + repository['status'] = 'cloning' + repository.save() + success = repository.do_clone(path=repo_path) + + else: + git_folder = os.path.join(repo_path, ".git") + + # path contains .git folder -> pull + if os.path.exists(git_folder) and os.path.isdir(git_folder): # noqa + print "[+] Pulling latest changes" + repository['status'] = 'pulling' + repository.save() + success = repository.do_pull(path=repo_path) + else: + raise "Took unexpected path in program logic!" + + else: + raise "Path exists but is not a directory" + else: + # directory empty + print "[+] Cloning new repository" + repository['status'] = 'cloning' + repository.save() + success = repository.do_clone(path=repo_path) + + if not success: + # Error was set by do_pull/do_clone + print "[E] Could not update repository" + return + + print "[+] Zipping files up" + with TemporaryFile() as tempf: + with ZipFile(tempf, 'w') as zipf: + for root, dirs, files in os.walk(repo_path): + for filename in files: + # Ignore pyc files + if not filename.endswith('.pyc'): + filepath = os.path.join(root, filename) + zipf.write( + filepath, os.path.relpath(filepath, repo_path)) + + print "[+] Putting files to web server" + url = urljoin( + fame_config.fame_url, + '/modules/repository/{}/update'.format(repository['_id'])) + tempf.seek(0) + resp = requests.put( + url, + data=tempf, + headers={ + 'X-API-KEY': fame_config.api_key, + 'Content-Type': 'application/zip' + }) + resp.raise_for_status() + + except Exception as e: + repository['status'] = 'error' + repository['error_msg'] = e.message + repository.save() + print "[E] Could not update repository: {}".format(e.message) + else: + print "[*] Job success" + + +class Repository(CoreRepository): + def __init__(self, values={}): + keyfile = os.path.join(FAME_ROOT, "conf", "id_rsa") + self['ssh_cmd'] = "ssh -o StrictHostKeyChecking=no -i {}".format(keyfile) + + super(Repository, self).__init__(values) + + def do_clone(self, path=None): + print "[+] Cloning '{}'".format(self['name']) + try: + if self['private']: + Repo.clone_from(self['address'], path or self.path(), + env=dict(GIT_SSH_COMMAND=self['ssh_cmd'])) + else: + Repo.clone_from(self['address'], path or self.path()) + + internals = Internals.get(name="updates") + internals.update_value("last_update", time()) + return True + + except Exception, e: + self['status'] = 'error' + self['error_msg'] = 'Could not clone repository, probably due to authentication issues.\n{}'.format(e) # noqa + self.save() + return False + + def do_pull(self, path=None): + print "[+] Pulling '{}'".format(self['name']) + try: + repo = Repo(path or self.path()) + + if self['private']: + with repo.git.custom_environment( + GIT_SSH_COMMAND=self['ssh_cmd']): + repo.remotes.origin.pull() + else: + repo.remotes.origin.pull() + + # Make sure we delete orphan .pyc files + for root, dirs, files in os.walk(path or self.path()): + for f in files: + f = os.path.join(root, f) + if f.endswith(".pyc") and not os.path.exists(f[:-1]): + print "Deleting orphan file '{}'".format(f) + os.remove(f) + + updates = Internals.get(name="updates") + updates.update_value("last_update", time()) + return True + except Exception, e: + self['status'] = 'error' + self['error_msg'] = 'Could not update repository.\n{}'.format(e) + self.save() + return False diff --git a/fame/worker/utils.py b/fame/worker/utils.py new file mode 100644 index 0000000..bec873c --- /dev/null +++ b/fame/worker/utils.py @@ -0,0 +1,19 @@ +import requests + +from urlparse import urljoin + +from fame.common.config import fame_config + + +def send_file_to_remote(file, url): + if isinstance(file, basestring): + file = open(file, 'rb') + + url = urljoin(fame_config.fame_url, url) + response = requests.post(url, files={'file': file}, + headers={'X-API-KEY': fame_config.api_key}) + response.raise_for_status() + + file.close() + + return response diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..4d9448f --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,6 @@ +-r requirements.txt +-r requirements-web.txt +-r requirements-worker.txt +Sphinx~=1.7.4 +sphinx_rtd_theme~=0.3.1 +sphinxcontrib-httpdomain~=1.6.1 \ No newline at end of file diff --git a/requirements-web.txt b/requirements-web.txt new file mode 100644 index 0000000..0253851 --- /dev/null +++ b/requirements-web.txt @@ -0,0 +1,10 @@ +-r requirements.txt +Flask~=1.0.2 +Flask-Classy~=0.6.10 +Flask-Login==0.3.2 +Flask-Negotiation~=0.1.9 +flask-paginate~=0.5.1 +markdown2~=2.3.5 +zxcvbn==1.0 +git+https://github.com/g2p/rfc6266#egg=rfc6266 +python-ldap~=3.2.0 \ No newline at end of file diff --git a/requirements-worker.txt b/requirements-worker.txt new file mode 100644 index 0000000..eb3e308 --- /dev/null +++ b/requirements-worker.txt @@ -0,0 +1,2 @@ +-r requirements.txt +GitPython~=2.1.9 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index c99f9ba..d52dba2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,16 +1,6 @@ -Flask~=1.0.2 -Flask-Classy~=0.6.10 -Flask-Negotiation~=0.1.9 -flask-paginate~=0.5.1 -Sphinx~=1.7.4 -sphinx_rtd_theme~=0.3.1 -sphinxcontrib-httpdomain~=1.6.1 celery~=4.1.0 pymongo~=3.6.1 -python-magic~=0.4.15 requests~=2.18.4 -Flask-Login==0.3.2 -zxcvbn==1.0 -GitPython~=2.1.9 -markdown2~=2.3.5 -git+https://github.com/g2p/rfc6266#egg=rfc6266 +python-magic~=0.4.15 +werkzeug +git+https://github.com/g2p/rfc6266#egg=rfc6266 \ No newline at end of file diff --git a/ssh/.gitkeep b/ssh/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/utils/create_user.py b/utils/create_user.py index 84a9d24..1b3c446 100755 --- a/utils/create_user.py +++ b/utils/create_user.py @@ -11,20 +11,38 @@ def create_user(admin=False, password=True): - full_name = user_input("Full Name") - email = user_input("Email Address") - groups = user_input("Groups (comma-separated)", "cert").split(',') + full_name = os.getenv("FAME_ADMIN_FULLNAME", "") + if not full_name: + full_name = user_input("Full Name") + + email = os.getenv("FAME_ADMIN_EMAIL", "") + if not email: + email = user_input("Email Address") + + groups = os.getenv("FAME_ADMIN_GROUPS", "") + if not groups: + groups = user_input("Groups (comma-separated)", "cert") + groups = groups.split(",") if admin: default_sharing = groups groups.append('*') permissions = ['*'] else: - default_sharing = user_input("Default Sharing Groups (comma-separated)").split(',') - permissions = user_input("Permissions (comma-separated)").split(',') + default_sharing = os.getenv("FAME_ADMIN_DEFAULT_SHARING", "") + if not default_sharing: + default_sharing = user_input("Default Sharing Groups (comma-separated)") + default_sharing = default_sharing.split(",") + + permissions = os.getenv("FAME_ADMIN_PERMISSIONS", "") + if not permissions: + permissions = user_input("Permissions (comma-separated)") + permissions = permissions.split(",") if password: - password = get_new_password() + password = os.getenv("FAME_ADMIN_PASSWORD", "") + if not password: + password = get_new_password() else: password = None diff --git a/utils/install.py b/utils/install.py index ea62624..4aea4c4 100755 --- a/utils/install.py +++ b/utils/install.py @@ -1,6 +1,7 @@ import os import sys import errno +from argparse import ArgumentParser from urllib import quote_plus from urlparse import urljoin from subprocess import call @@ -64,10 +65,11 @@ def define_mongo_connection(context): context['mongo_user'] = '' context['mongo_password'] = '' if not test_mongodb_connection(db): - context['mongo_user'] = user_input("MongoDB username") + context['mongo_user'] = user_input("MongoDB user name") context['mongo_password'] = user_input("MongoDB password") + try: - db.authenticate(context['mongo_user'], quote_plus(context['mongo_password'])) + db.authenticate(context['mongo_user'], context['mongo_password']) except: error("Could not connect to MongoDB (invalid credentials).") @@ -81,6 +83,7 @@ def define_installation_type(context): print " - 2: Remote worker\n" itype = user_input("Installation type", "1", ["1", "2"]) + if itype == "1": context['installation_type'] = 'local' else: @@ -126,16 +129,18 @@ def add_community_repository(): 'status': 'cloning' }) repo.save() - repo.do_clone() + repo.update_files() def perform_local_installation(context): templates = Templates() context['fame_url'] = user_input("FAME's URL for users (e.g. https://fame.yourdomain/)") + print "[+] Creating configuration file ..." context['secret_key'] = os.urandom(64).encode('hex') templates.save_to(os.path.join(FAME_ROOT, 'conf', 'fame.conf'), 'local_fame.conf', context) + templates.save_to(os.path.join(FAME_ROOT, 'conf', 'fame-worker.conf'), 'remote_fame.conf', context) generate_ssh_key() @@ -195,20 +200,20 @@ def perform_remote_installation(context): templates.save_to(os.path.join(FAME_ROOT, 'conf', 'fame.conf'), 'remote_fame.conf', context) -def install_requirements(): - print "[+] Installing requirements ..." +def install_requirements(what, req_file): + print "[+] Installing {} requirements ...".format(what) - rcode, output = pip_install('-r', os.path.join(FAME_ROOT, 'requirements.txt')) + rcode, output = pip_install('-r', os.path.join(FAME_ROOT, req_file)) if rcode: print output - error("Could not install requirements.") + error("Could not install {} requirements.".format(what)) def main(): context = {} - install_requirements() + install_requirements("base", "requirements.txt") define_mongo_connection(context) @@ -216,8 +221,11 @@ def main(): define_installation_type(context) if context['installation_type'] == 'local': perform_local_installation(context) + install_requirements("web", "requirements-web.txt") + install_requirements("worker", "requirements-worker.txt") else: perform_remote_installation(context) + install_requirements("worker", "requirements-worker.txt") if __name__ == '__main__': diff --git a/utils/install_docker.py b/utils/install_docker.py new file mode 100644 index 0000000..28be86f --- /dev/null +++ b/utils/install_docker.py @@ -0,0 +1,26 @@ +import os +import sys + +sys.path.append(os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))) + +from fame.core import fame_init +from utils.initial_data import create_initial_data +from utils.install import create_admin_user, create_user_for_worker, add_community_repository + + +def main(): + fame_init() + + print("[+] Creating initial data ...") + create_initial_data() + + print("[+] Creating users...") + create_admin_user() + create_user_for_worker(dict()) + + if os.getenv("FAME_INSTALL_COMMUNITY_REPO", "1") == "1": + add_community_repository() + + +if __name__ == "__main__": + main() diff --git a/utils/run.sh b/utils/run.sh index f526b0d..0045697 100755 --- a/utils/run.sh +++ b/utils/run.sh @@ -1,4 +1,4 @@ -#! /bin/sh +#!/bin/bash UTILS_ROOT=$(dirname "$0") FAME_ROOT=$(dirname "$UTILS_ROOT") diff --git a/utils/single_module.py b/utils/single_module.py index b8001b0..6693408 100755 --- a/utils/single_module.py +++ b/utils/single_module.py @@ -8,12 +8,14 @@ import datetime import argparse -sys.path.append(os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))) +fame_dir = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")) +sys.path.append(fame_dir) from utils import error, user_input from fame.core import fame_init from fame.core import module as module_classes from fame.common.objects import Dictionary +from fame.common.config import fame_config from fame.common.constants import MODULES_ROOT from fame.common.utils import iterify, u from fame.core.module import ProcessingModule @@ -36,7 +38,7 @@ def __init__(self, interactive): except: pass - def get_processing_module(self, name): + def get_module(self, name): if name in self.modules: module = self.modules[name](with_config=False) module.info = module.static_info() @@ -79,7 +81,7 @@ def get_value_from_user(self, setting, prefix=None): if setting['type'] == 'integer': value = int(value) elif setting['type'] == 'bool': - value = bool(value) + value = value.lower() in ['true', '1'] return value @@ -177,8 +179,13 @@ def pprint(self): def test_mode_module(name, interactive): print "[+] Enabling test mode." + + if 'temp_path' not in fame_config: + fame_config.temp_path = os.path.join(fame_dir, "temp") + print "[+] Setting temp_path to {}".format(fame_config.temp_path) + dispatcher = Dispatcher(interactive) - module = dispatcher.get_processing_module(name) + module = dispatcher.get_module(name) if not module: error("Could not find module '{}'".format(name)) @@ -215,7 +222,7 @@ def test_mode_module(name, interactive): else: try: fame_init() - module = dispatcher.get_processing_module(args.module) + module = dispatcher.get_module(args.module) module.initialize() except: module = test_mode_module(args.module, args.interactive) diff --git a/web/static/img/default_avatar.png b/web/static/img/default_avatar.png new file mode 100755 index 0000000..822078a Binary files /dev/null and b/web/static/img/default_avatar.png differ diff --git a/web/templates/base.html b/web/templates/base.html index 22bc5e5..d710da2 100755 --- a/web/templates/base.html +++ b/web/templates/base.html @@ -182,6 +182,9 @@ {% if current_user.has_permission('manage_users') %}
  • Users
  • {% endif %} + {% if current_user.has_permission('system') %} +
  • System
  • + {% endif %}
  • Logout
  • diff --git a/web/templates/files/details.html b/web/templates/files/details.html index d2ae133..07b649a 100755 --- a/web/templates/files/details.html +++ b/web/templates/files/details.html @@ -162,23 +162,23 @@

    Comments

    -
    - - - - -
    -
    - + + + + +
    + +
    +
    diff --git a/web/templates/modules/index.html b/web/templates/modules/index.html index 2582f91..c6ec94c 100755 --- a/web/templates/modules/index.html +++ b/web/templates/modules/index.html @@ -100,6 +100,72 @@

    Error

    +
    +
    +
    +
    +

    Preloading Modules

    +

    +
    +
    + {% for module in data.modules['Preloading'] %} +
    +
    +
    +
    {{module.name}}
    +
    +
    + Configure + {% if module.enabled %} +
    + +
    + {% else %} + {% if not module.error %} +
    + +
    + {% endif %} + {% endif %} +
    +
    + {% if module.description %} +
    {{module.description}}
    + {% endif %} + + {% if module.error %} +
    +
    +

    Error

    +
    +
    +
    {{module.error}}
    +
    +
    + {% endif %} + +
      + {% if module.acts_on %} +
    • Acts On {{module.acts_on|smart_join}}
    • + {% endif %} +
    • Queue {{module.queue}}
    • + {% if module.enabled %} +
    • enabled
    • + {% else %} +
    • Disabled
    • + {% endif %} +
    • Queue {{module.queue}}
    • +
    +
    + {% else %} +
    No module to list. Consider adding a module repository.
    + {% endfor %} +
    +
    +
    +
    + +
    @@ -109,7 +175,7 @@

    Processing Modules

    {% for module in data.modules['Processing'] %} -
    +
    {{module.name}}
    @@ -242,7 +308,7 @@

    Filetype Modules

    {% for module in data.modules['Filetype'] %} -
    +
    {{module.name}}
    @@ -306,7 +372,7 @@

    {{type}} Modules

    {% for module in data.modules[type] %} -
    +
    {{module.name}}
    diff --git a/web/templates/modules/module_configuration.html b/web/templates/modules/module_configuration.html index bdf4621..d99b354 100755 --- a/web/templates/modules/module_configuration.html +++ b/web/templates/modules/module_configuration.html @@ -29,7 +29,6 @@

    {{data.module.name}}

    The module's priority when preloading. The smallest values are used first (defaults to 100).

    -
    diff --git a/web/templates/system/index.html b/web/templates/system/index.html new file mode 100644 index 0000000..956a540 --- /dev/null +++ b/web/templates/system/index.html @@ -0,0 +1,94 @@ +{% extends "base.html" %} + +{% block body %} +
    +
    +
    +
    +
    +

    Pending Analyses

    +
    +
    +
    + {% if not data['pending_analyses'] %} + No pending analyses. + {% else %} +
    + + + + + + + + + + + + {% for analysis in data['pending_analyses'] %} + + + + + + + + {% endfor %} + +
    IdFilename(s)MD5Created AtAction
    {{ analysis._id|e }}{{ analysis.file.names|join(', ')|e }}{{ analysis.file.md5|e }}{{ analysis.date|e }}
    +
    + {% endif %} +
    +
    +
    +
    +
    +
    +
    +
    +
    +

    Stale Analyses

    +
    +
    +
    + {% if not data['stale_analyses'] %} + No stale analyses. + {% else %} +
    + + + + + + + + + + + + {% for analysis in data['stale_analyses'] %} + + + + + + + + {% endfor %} + +
    IdFilename(s)MD5Created AtAction
    {{ analysis._id|e }}{{ analysis.file.names|join(', ')|e }}{{ analysis.file.md5|e }}{{ analysis.date|e }}
    +
    + {% endif %} +
    +
    +
    +
    +{% endblock %} diff --git a/web/views/analyses.py b/web/views/analyses.py index d166554..9462dd3 100755 --- a/web/views/analyses.py +++ b/web/views/analyses.py @@ -249,7 +249,7 @@ def post(self): :form string option[*]: value of each enabled option. """ file_id = request.form.get('file_id') - modules = filter(None, request.form.get('modules', '').split(',')) + modules = [_f for _f in request.form.get('modules', '').split(',') if _f] groups = request.form.get('groups', '').split(',') comment = request.form.get('comment', '') diff --git a/web/views/files.py b/web/views/files.py index 8c34161..60e314f 100755 --- a/web/views/files.py +++ b/web/views/files.py @@ -1,3 +1,4 @@ +from bson import ObjectId from pymongo import DESCENDING from flask import make_response, request, flash, redirect from flask_classy import FlaskView, route @@ -5,17 +6,29 @@ from flask_login import current_user from werkzeug.utils import secure_filename +from fame.common.email_utils import EmailServer +from fame.common.config import fame_config from fame.core.store import store from fame.core.file import File from fame.core.module_dispatcher import dispatcher from web.views.negotiation import render, render_json from web.views.constants import PER_PAGE from web.views.helpers import ( - file_download, get_or_404, requires_permission, clean_files, clean_analyses, clean_users, - enrich_comments, comments_enabled + file_download, get_or_404, requires_permission, clean_files, + clean_analyses, clean_users, enrich_comments, comments_enabled ) from web.views.mixins import UIView + +notification_body_tpl = u"""Hi, + +{0} has written the following comment on analysis {1}: + +\t{2} + +Best regards""" + + def return_file(file): analyses = list(current_user.analyses.find({'_id': {'$in': file['file']['analysis']}})) file['av_modules'] = [m.name for m in dispatcher.get_antivirus_modules()] @@ -193,6 +206,31 @@ def change_type(self, id): return redirect(request.referrer) + def notify_new_comment(self, file_, analysis_id, commentator_id, comment): + commentator = store.users.find_one({'_id': commentator_id}) + analysis = store.analysis.find_one({'_id': ObjectId(analysis_id)}) + analyst_id = analysis['analyst'] + recipients = set() + # First let's add submiter analyst and check if he is not commentator + if commentator_id != analyst_id: + analyst = store.users.find_one({'_id': analysis['analyst']}) + recipients.add(analyst['email']) + # iter on commentators and add them as recipient + for comment in file_['comments']: + if comment['analyst'] not in [analyst_id, commentator_id]: + recipient = store.users.find_one({'_id': comment['analyst']}) + recipients.add(recipient['email']) + if len(recipients): + analysis_url = "{0}/analyses/{1}".format(fame_config.fame_url, analysis_id) + body = notification_body_tpl.format(commentator['name'], + analysis_url, + comment['comment']) + email_server = EmailServer() + if email_server.is_connected: + msg = email_server.new_message("[FAME] New comment on analysis", body) + msg.send(list(recipients)) + + @route('//add_comment/', methods=["POST"]) def add_comment(self, id): if comments_enabled(): @@ -212,7 +250,10 @@ def add_comment(self, id): if analysis_id: get_or_404(current_user.analyses, _id=analysis_id) - f.add_comment(current_user['_id'], comment, analysis_id, probable_name, notify) + f.add_comment(current_user['_id'], comment, analysis_id, probable_name) + if notify: + self.notify_new_comment(f, analysis_id, current_user['_id'], comment) + else: flash('Comment should not be empty', 'danger') diff --git a/web/views/helpers.py b/web/views/helpers.py index 71a1fbb..b348c4e 100755 --- a/web/views/helpers.py +++ b/web/views/helpers.py @@ -5,6 +5,7 @@ from werkzeug.exceptions import Forbidden from functools import wraps from os.path import basename +import os from fame.core.store import store from fame.core.config import Config @@ -102,6 +103,7 @@ def file_download(filepath): response = make_response(fd.read()) response.headers["Content-Disposition"] = u"attachment; filename={0}".format(basename(filepath)).encode('latin-1', errors='ignore') + response.headers["Content-Type"] = u"application/binary" return response diff --git a/web/views/modules.py b/web/views/modules.py index f875552..656ac2b 100755 --- a/web/views/modules.py +++ b/web/views/modules.py @@ -1,8 +1,12 @@ import os +from io import BytesIO +from shutil import move, rmtree from time import time from zipfile import ZipFile from flask import url_for, request, flash from flask_classy import FlaskView, route +from uuid import uuid4 +from markdown2 import markdown from web.views.negotiation import render, redirect, validation_error from web.views.mixins import UIView @@ -23,6 +27,10 @@ def get_name(module): def get_deploy_key(): + # Public key comes as an env var when running as docker container + if os.getenv("FAME_DOCKER", "0") == "1": + return os.getenv("FAME_PUBLIC_KEY") + keyfile = os.path.join(FAME_ROOT, "conf", "id_rsa.pub") key = None @@ -35,6 +43,16 @@ def get_deploy_key(): return key +def get_module_readme(module): + readme = module.get_file('README.md') + + if readme: + with open(readme, 'r') as f: + readme = markdown(f.read(), extras=["code-friendly"]) + + return readme + + def update_config(settings, options=False): for config in settings: value = request.form.get("config_{}".format(config['name'])) @@ -217,9 +235,13 @@ def disable(self, id): """ module = ModuleInfo(get_or_404(ModuleInfo.get_collection(), _id=id)) module.update_value('enabled', False) + + updates = Internals(get_or_404(Internals.get_collection(), name="updates")) + updates.update_value("last_update", time()) + dispatcher.reload() - return redirect({'module': clean_modules(module)}, url_for('ModulesView:index')) + return redirect({'module': clean_modules(module)}, url_for('ModulesView:index', _anchor=module['name'])) @requires_permission('manage_modules') @route('//enable', methods=['POST']) @@ -255,13 +277,17 @@ def enable(self, id): return validation_error(url_for('ModulesView:configure', id=module['_id'])) module.update_value('enabled', True) + + updates = Internals(get_or_404(Internals.get_collection(), name="updates")) + updates.update_value("last_update", time()) + dispatcher.reload() - readme = module.get_readme() + readme = get_module_readme(module) if readme: flash(readme, 'persistent') - return redirect({'module': clean_modules(module)}, url_for('ModulesView:index')) + return redirect({'module': clean_modules(module)}, url_for('ModulesView:index', _anchor=module['name'])) @requires_permission('manage_modules') @route('//configuration', methods=['GET', 'POST']) @@ -316,14 +342,15 @@ def configure(self, id): :param id: id of the named configuration. :form acts_on: comma-delimited list of FAME types this module can act on - (only for Processing modules). + (for Processing modules). :form triggered_by: comma-delimited list of triggers (only for Processing modules). :form queue: name of the queue to use for this module (for Processing and Preloading modules). """ + module = ModuleInfo(get_or_404(ModuleInfo.get_collection(), _id=id)) - module['readme'] = module.get_readme() + module['readme'] = get_module_readme(module) if request.method == "POST": if module['type'] == 'Filetype': @@ -352,7 +379,7 @@ def configure(self, id): module.save() dispatcher.reload() - return redirect({'module': clean_modules(module)}, url_for('ModulesView:index')) + return redirect({'module': clean_modules(module)}, url_for('ModulesView:index', _anchor=module['name'])) else: return render({'module': clean_modules(module)}, 'modules/module_configuration.html') @@ -402,10 +429,60 @@ def repository_update(self, id): :>json Repository repository: the repository. """ repository = Repository(get_or_404(Repository.get_collection(), _id=id)) - repository.pull() + repository.update_files() return redirect({'repository': clean_repositories(repository)}, url_for('ModulesView:index')) + @requires_permission('worker') + @route('/repository//update', methods=['PUT']) + def repository_receive_update(self, id): + repository = Repository( + get_or_404(Repository.get_collection(), _id=id)) + + backup_path = os.path.join(fame_config.temp_path, 'modules_backup_{}'.format(uuid4())) + + # make sure, the path exists before we backup things; + # prevents errors if the repository was not installed + # prior to this 'update' request + try: + os.makedirs(repository.path()) + except OSError: + pass + + try: + move(repository.path(), backup_path) + + with ZipFile(BytesIO(request.data), 'r') as zipf: + zipf.extractall(repository.path()) + + repository['status'] = 'active' + repository['error_msg'] = '' + repository.save() + + dispatcher.update_modules(repository) + + updates = Internals(get_or_404(Internals.get_collection(), name="updates")) + updates.update_value("last_update", time()) + + rmtree(backup_path) + + return make_response('', 204) # no response + except Exception, e: + print "[E] Could not update repository {}: {}".format( + repository['name'], e) + print "[E] Restoring previous version" + rmtree(repository.path()) + move(backup_path, repository.path()) + + repository['status'] = 'error' + repository['error_msg'] = \ + "could not update repository: '{}'".format(e) + + import traceback + traceback.print_exc() + + return validation_error() + @requires_permission('manage_modules') @route('/repository//delete', methods=['POST']) def repository_delete(self, id): @@ -463,9 +540,9 @@ def repository_new(self): flash("Private repositories are disabled because of a problem with your installation (you do not have a deploy key in 'conf/id_rsa.pub')", 'danger') return validation_error() - repository['status'] = 'cloning' repository.save() - repository.clone() + repository.update_files() + return redirect({'repository': clean_repositories(repository)}, url_for('ModulesView:index')) return render({'repository': repository, 'deploy_key': deploy_key}, 'modules/repository_new.html') diff --git a/web/views/system.py b/web/views/system.py new file mode 100644 index 0000000..2c64489 --- /dev/null +++ b/web/views/system.py @@ -0,0 +1,35 @@ +from flask import flash, url_for +from flask_classy import FlaskView, route + +from fame.core.analysis import Analysis +from fame.core.store import store +from web.views.helpers import get_or_404, requires_permission +from web.views.mixins import UIView +from web.views.negotiation import redirect, render + + +class SystemView(FlaskView, UIView): + @requires_permission('system') + def index(self): + pending_analyses = [] + stale_analyses = [] + + for analysis in store.analysis.find({'status': 'pending'}): + file = store.files.find_one({'_id': analysis['file']}) + analysis['file'] = file + pending_analyses.append(analysis) + + for analysis in store.analysis.find({'status': 'running', 'waiting_modules': {'$ne': []}}): + file = store.files.find_one({'_id': analysis['file']}) + analysis['file'] = file + stale_analyses.append(analysis) + + return render({'pending_analyses': pending_analyses, 'stale_analyses': stale_analyses}, "system/index.html") + + @route("//resume", methods=["POST"]) + def resume(self, id): + analysis = Analysis(get_or_404(Analysis.get_collection(), _id=id)) + analysis.resume() + + flash("Resumed analysis {}".format(analysis['_id'])) + return redirect({}, url_for('SystemView:index')) diff --git a/web/views/users.py b/web/views/users.py index 3b8e498..b7a623d 100755 --- a/web/views/users.py +++ b/web/views/users.py @@ -230,7 +230,7 @@ def default_sharing(self, id): break else: flash('You have to at least keep one of your groups.', 'danger') - return redirect(request.referrer) + return redirect({'user': clean_users(user)}, request.referrer) user.update_value('default_sharing', groups) diff --git a/webserver.py b/webserver.py index f4591d4..a3ca8c6 100755 --- a/webserver.py +++ b/webserver.py @@ -20,6 +20,7 @@ from web.views.search import SearchView from web.views.configs import ConfigsView from web.views.users import UsersView +from web.views.system import SystemView from web.views.helpers import user_if_enabled try: @@ -133,7 +134,7 @@ def avatar(user_id): if os.path.exists(os.path.join(AVATARS_ROOT, "{}.png".format(user_id))): return url_for('static', filename="img/avatars/{}.png".format(user_id)) else: - return url_for('static', filename="img/avatars/default.png") + return url_for('static', filename="img/default_avatar.png") @app.template_global() @@ -165,6 +166,7 @@ def root(): SearchView.register(app) ConfigsView.register(app) UsersView.register(app) +SystemView.register(app) if __name__ == '__main__': app.run(debug=True, port=4200, host="0.0.0.0") diff --git a/worker.py b/worker.py index 72eaf93..65c2151 100755 --- a/worker.py +++ b/worker.py @@ -1,3 +1,4 @@ +from __future__ import print_function import os import sys import signal @@ -5,7 +6,7 @@ import requests from urlparse import urljoin from socket import gethostname -from StringIO import StringIO +from BytesIO import BytesIO from zipfile import ZipFile from shutil import move, rmtree from uuid import uuid4 @@ -18,10 +19,10 @@ from fame.common.config import fame_config from fame.common.constants import MODULES_ROOT from fame.common.pip import pip_install - +from fame.core.user import User UNIX_INSTALL_SCRIPTS = { - "install.sh": ["sh", "{}"], + "install.sh": ["bash", "{}"], "install.py": ["python", "{}"] } @@ -37,28 +38,34 @@ def __init__(self, queues, celery_args, refresh_interval): self.celery_args = [arg for arg in celery_args.split(' ') if arg] self.refresh_interval = refresh_interval + worker_user = User.get(email="worker@fame") + if worker_user: + fame_config.api_key = worker_user['api_key'] + else: + raise Exception("Worker user (worker@fame) does not exist") + def update_modules(self): # Module updates are only needed for remote workers - if fame_config.remote: + if fame_config.is_worker: # First, backup current code backup_path = os.path.join(fame_config.temp_path, 'modules_backup_{}'.format(uuid4())) move(MODULES_ROOT, backup_path) # Replace current code with code fetched from web server - url = urljoin(fame_config.remote, '/modules/download') + url = urljoin(fame_config.fame_url, '/modules/download') try: response = requests.get(url, stream=True, headers={'X-API-KEY': fame_config.api_key}) response.raise_for_status() os.makedirs(MODULES_ROOT) - with ZipFile(StringIO(response.content), 'r') as zipf: + with ZipFile(BytesIO(response.raw.read()), 'r') as zipf: zipf.extractall(MODULES_ROOT) rmtree(backup_path) - print "Updated modules." + print("Updated modules.") except Exception, e: - print "Could not update modules: '{}'".format(e) - print "Restoring previous version" + print("Could not update modules: '{}'".format(e)) + print("Restoring previous version") move(backup_path, MODULES_ROOT) self.update_module_requirements() @@ -69,25 +76,26 @@ def update_module_requirements(self): if 'error' in module: del(module['error']) + module.save() - if module['type'] == "Processing": + if module['type'] in ("Processing", "Preloading", "Virtualization") and 'queue' in module: should_update = (module['queue'] in self.queues) - elif module['type'] in ["Threat Intelligence", "Reporting", "Filetype"]: - should_update = True else: - should_update = (not fame_config.remote) + should_update = True - if should_update: - self.update_python_requirements(module) - self.launch_install_scripts(module) + if should_update and module['enabled']: + # only in the docker environment the worker has + # permissions to perform elevated tasks + if os.getenv("FAME_DOCKER", "0") == "1": + self.launch_install_scripts(module) - module.save() + self.update_python_requirements(module) def update_python_requirements(self, module): requirements = self._module_requirements(module) if requirements: - print "Installing requirements for '{}' ({})".format(module['name'], requirements) + print("Installing requirements for '{}' ({})".format(module['name'], requirements)) rcode, output = pip_install('-r', requirements) @@ -98,10 +106,10 @@ def update_python_requirements(self, module): def launch_install_scripts(self, module): scripts = self._module_install_scripts(module) - for script in scripts: + for script, cwd in scripts: try: - print "Launching installation script '{}'".format(' '.join(script)) - check_output(script, stderr=STDOUT) + print("Launching installation script '{}'".format(' '.join(script))) + check_output(script, stderr=STDOUT, cwd=cwd) except CalledProcessError, e: self._module_installation_error(' '.join(script), module, e.output) except Exception, e: @@ -112,8 +120,9 @@ def _module_installation_error(self, cmd, module, errors): module['enabled'] = False module['error'] = errors + module.save() - print errors + print(errors) def _module_requirements(self, module): return module.get_file('requirements.txt') @@ -134,7 +143,7 @@ def _module_install_scripts(self, module): for arg in INSTALL_SCRIPTS[filename]: cmdline.append(arg.format(filepath)) - results.append(cmdline) + results.append((cmdline, os.path.dirname(filepath))) return results @@ -194,7 +203,8 @@ def start(self): pass def _new_celery_worker(self): - return Popen(['celery', '-A', 'fame.core.celeryctl', 'worker', '-Q', ','.join(self.queues)] + self.celery_args) + return Popen(['celery', '-A', 'fame.core.celeryctl', 'worker', '-Q', ','.join(self.queues)] + self.celery_args, + stdout=sys.stdout, stderr=sys.stderr) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Launches a FAME worker.') @@ -216,9 +226,9 @@ def _new_celery_worker(self): else: queues = ['unix'] - # A local worker should also take care of updates - if not fame_config.remote: - queues.append('updates') + # ensure workers also listen to update requests if *nix + if "unix" in queues: + queues.append("updates") fame_init() Worker(queues, args.celery_args, args.refresh_interval).start()