summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPreston Cody <codeman@gentoo.org>2008-02-17 19:42:31 +0000
committerPreston Cody <codeman@gentoo.org>2008-02-17 19:42:31 +0000
commit4996792a7669bfcc32b5271656a0ab994411bc97 (patch)
tree0155293fca1db9b477de2a31219dd96da6fb36b7
parentadding some module support code as well as a prototype page (diff)
downloadscire-4996792a7669bfcc32b5271656a0ab994411bc97.tar.gz
scire-4996792a7669bfcc32b5271656a0ab994411bc97.tar.bz2
scire-4996792a7669bfcc32b5271656a0ab994411bc97.zip
moving the old python scire server/client into a branch and out of trunk.
svn path=/trunk/; revision=359
-rw-r--r--client/SecureXMLRPCClient.py240
-rw-r--r--client/certgen.py92
-rwxr-xr-xclient/scirec.py324
-rw-r--r--server/DB_functions.py22
-rwxr-xr-xserver/ScireDB.py23
-rw-r--r--server/SecureXMLRPCServer.py66
-rw-r--r--server/blackace/pyCrypto.py485
-rw-r--r--server/blackace/pySecureServer.py137
-rwxr-xr-xserver/blackace/scireserver.py121
-rw-r--r--server/certgen.py92
-rwxr-xr-xserver/cron.py469
-rw-r--r--server/cronTest.txt137
-rw-r--r--server/modules/GACL_functions.py64
-rw-r--r--server/modules/__init__.py0
-rw-r--r--server/modules/client.py76
-rw-r--r--server/modules/general.py8
-rw-r--r--server/modules/job.py354
-rw-r--r--server/postprocess/postprocess.py17
-rw-r--r--server/pygacl.py246
-rwxr-xr-xserver/scired.py178
20 files changed, 0 insertions, 3151 deletions
diff --git a/client/SecureXMLRPCClient.py b/client/SecureXMLRPCClient.py
deleted file mode 100644
index 26ffcc1..0000000
--- a/client/SecureXMLRPCClient.py
+++ /dev/null
@@ -1,240 +0,0 @@
-import httplib
-import xmlrpclib
-import socket
-import time
-import os
-import select
-import string
-import sys
-import urllib
-import traceback
-from OpenSSL import SSL, crypto
-
-def display_traceback():
- for line in traceback.format_stack():
- print line.strip()
-
-class SecureXMLRPCClient(xmlrpclib.ServerProxy):
-
- def __init__(self, host, port, client_cert, client_key, verify_cert_func=None):
- self._transport = SafeTransport(self.__host, client_cert, client_key, verify_cert_func)
- xmlrpclib.ServerProxy.__init__(self, "https://" + host + ":" + str(port), transport=self._transport, encoding="utf-8", allow_none=True)
-
- def cancel(self):
- self._transport.close()
-
-class SafeTransport(xmlrpclib.Transport):
-
- def __init__(self, host, client_cert, client_key, verify_cert_func=None):
- self.__host = host
- self.__client_cert = client_cert
- self.__client_key = client_key
- self.__verify_cert_func = verify_cert_func
-
- def make_connection(self, host):
- host, extra_headers, x509 = self.get_host_info(host)
- _host, _port = urllib.splitport(host)
- self._https = HTTPS(_host, int(_port), self.__client_cert, self.__client_key, self.__verify_cert_func)
- return self._https
-
- def close(self):
- pass
-# print "SafeTransport.close()"
-# if self._https:
-# self._https.close()
-# self._https = None
-
-class HTTPSConnection(httplib.HTTPConnection):
-
- response_class = httplib.HTTPResponse
-
- def __init__(self, host, port=None, cert_file=None, key_file=None, verify_cert_func=None):
- httplib.HTTPConnection.__init__(self, host, port, None)
- self.verify_cert_func = verify_cert_func
- self.cert_file = cert_file
- self.key_file = key_file
- self.sock = None
-
- def connect(self):
- # Initialize context
- ctx = SSL.Context(SSL.SSLv23_METHOD)
- if self.verify_cert_func:
- ctx.set_verify(SSL.VERIFY_PEER, self.verify_cert_func) # Demand a certificate
- ctx.use_privatekey_file(self.key_file)
- ctx.use_certificate_file(self.cert_file)
-
- # Set up client
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- con = SSL.Connection(ctx, sock)
- self.sock = SSLConnection(con)
- self.sock.connect((self.host, self.port))
-
-class HTTPS(httplib.HTTP):
-
- _connection_class = HTTPSConnection
-
- def __init__(self, host='', port=None, cert_file=None, key_file=None, verify_cert_func=None):
- self._setup(self._connection_class(host, port, cert_file, key_file, verify_cert_func))
-
-# Higher-level SSL objects used by rpclib
-#
-# Copyright (c) 2002 Red Hat, Inc.
-#
-# Author: Mihai Ibanescu <misa@redhat.com>
-# Modifications by Dan Williams <dcbw@redhat.com>
-
-class SSLConnection:
- """
- This whole class exists just to filter out a parameter
- passed in to the shutdown() method in SimpleXMLRPC.doPOST()
- """
-
-# DEFAULT_TIMEOUT = 20
- DEFAULT_TIMEOUT = 0
-
- def __init__(self, conn):
- """
- Connection is not yet a new-style class,
- so I'm making a proxy instead of subclassing.
- """
- self.__dict__["conn"] = conn
- self.__dict__["close_refcount"] = 0
- self.__dict__["closed"] = False
- self.__dict__["timeout"] = self.DEFAULT_TIMEOUT
-
- def __del__(self):
- self.__dict__["conn"].close()
-
- def __getattr__(self,name):
- return getattr(self.__dict__["conn"], name)
-
- def __setattr__(self,name, value):
- setattr(self.__dict__["conn"], name, value)
-
- def settimeout(self, timeout):
- if timeout == None:
- self.__dict__["timeout"] = self.DEFAULT_TIMEOUT
- else:
- self.__dict__["timeout"] = timeout
- self.__dict__["conn"].settimeout(timeout)
-
- def shutdown(self, how=1):
- """
- SimpleXMLRpcServer.doPOST calls shutdown(1),
- and Connection.shutdown() doesn't take
- an argument. So we just discard the argument.
- """
- self.__dict__["conn"].shutdown()
-
- def accept(self):
- """
- This is the other part of the shutdown() workaround.
- Since servers create new sockets, we have to infect
- them with our magic. :)
- """
- c, a = self.__dict__["conn"].accept()
- return (SSLConnection(c), a)
-
- def makefile(self, mode, bufsize):
- """
- We need to use socket._fileobject Because SSL.Connection
- doesn't have a 'dup'. Not exactly sure WHY this is, but
- this is backed up by comments in socket.py and SSL/connection.c
-
- Since httplib.HTTPSResponse/HTTPConnection depend on the
- socket being duplicated when they close it, we refcount the
- socket object and don't actually close until its count is 0.
- """
-# display_traceback()
- self.__dict__["close_refcount"] = self.__dict__["close_refcount"] + 1
-# print "SSLConnection.makefile(): close_refcount=", str(self.__dict__["close_refcount"])
- return PlgFileObject(self, mode, bufsize)
-
- def close(self):
-# print "SSLConnection.close()"
-# display_traceback()
- if self.__dict__["closed"]:
- return
- self.__dict__["close_refcount"] = self.__dict__["close_refcount"] - 1
-# print "SSLConnection.close(): close_refcount=", str(self.__dict__["close_refcount"])
- if self.__dict__["close_refcount"] == 0:
- pass
-# print "SSLConnection.close(): close_refcount=0...actually closing"
-# self.shutdown()
-# self.__dict__["conn"].close()
-# self.__dict__["closed"] = True
-
- def sendall(self, data, flags=0):
- """
- - Use select() to simulate a socket timeout without setting the socket
- to non-blocking mode.
- - Don't use pyOpenSSL's sendall() either, since it just loops on WantRead
- or WantWrite, consuming 100% CPU, and never times out.
- """
- timeout = self.__dict__["timeout"]
- con = self.__dict__["conn"]
-# (read, write, excpt) = select.select([], [con], [], timeout)
-# if not con in write:
-# raise socket.timeout((110, "Operation timed out."))
-
-# starttime = time.time()
- origlen = len(data)
- sent = -1
- while len(data):
-# curtime = time.time()
-# if curtime - starttime > timeout:
-# raise socket.timeout((110, "Operation timed out."))
-
- try:
- sent = con.send(data, flags)
- except SSL.SysCallError, e:
- if e[0] == 32: # Broken Pipe
- self.close()
- sent = 0
- else:
- raise socket.error(e)
- except (SSL.WantWriteError, SSL.WantReadError):
- time.sleep(0.2)
- continue
-
- data = data[sent:]
- return origlen - len(data)
-
- def recv(self, bufsize, flags=0):
-# timeout = self.__dict__["timeout"]
- con = self.__dict__["conn"]
- if self.closed:
-# print "socket is closed"
- return None
-# (read, write, excpt) = select.select([con], [], [], timeout)
-# if not con in read:
-# raise socket.timeout((110, "Operation timed out."))
-
-# starttime = time.time()
- while True:
-# curtime = time.time()
-# if curtime - starttime > timeout:
-# raise socket.timeout((110, "Operation timed out."))
-
- try:
- data = con.recv(bufsize, flags)
- return data
- except SSL.ZeroReturnError:
- return None
- except SSL.WantReadError:
- time.sleep(0.2)
- return None
-
-class PlgFileObject(socket._fileobject):
-
- def close(self):
- """
- socket._fileobject doesn't actually _close_ the socket,
- which we want it to do, so we have to override.
- """
- try:
- if self._sock:
- self.flush()
- self._sock.close()
- finally:
- self._sock = None
diff --git a/client/certgen.py b/client/certgen.py
deleted file mode 100644
index ffab9c9..0000000
--- a/client/certgen.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#
-# certgen.py
-#
-# Copyright (C) Martin Sjogren and AB Strakt 2001, All rights reserved
-#
-# $Id$
-#
-"""
-Certificate generation module.
-"""
-
-from OpenSSL import crypto
-
-TYPE_RSA = crypto.TYPE_RSA
-TYPE_DSA = crypto.TYPE_DSA
-
-def createKeyPair(type, bits):
- """
- Create a public/private key pair.
-
- Arguments: type - Key type, must be one of TYPE_RSA and TYPE_DSA
- bits - Number of bits to use in the key
- Returns: The public/private key pair in a PKey object
- """
- pkey = crypto.PKey()
- pkey.generate_key(type, bits)
- return pkey
-
-def createCertRequest(pkey, digest="md5", **name):
- """
- Create a certificate request.
-
- Arguments: pkey - The key to associate with the request
- digest - Digestion method to use for signing, default is md5
- **name - The name of the subject of the request, possible
- arguments are:
- C - Country name
- SP - State or province name
- L - Locality name
- O - Organization name
- OU - Organizational unit name
- CN - Common name
- email - E-mail address
- Returns: The certificate request in an X509Req object
- """
- req = crypto.X509Req()
- subj = req.get_subject()
-
- for (key,value) in name.items():
- setattr(subj, key, value)
-
- req.set_pubkey(pkey)
- req.sign(pkey, digest)
- return req
-
-def createCertificate(req, (issuerCert, issuerKey), serial, (notBefore, notAfter), digest="md5"):
- """
- Generate a certificate given a certificate request.
-
- Arguments: req - Certificate reqeust to use
- issuerCert - The certificate of the issuer
- issuerKey - The private key of the issuer
- serial - Serial number for the certificate
- notBefore - Timestamp (relative to now) when the certificate
- starts being valid
- notAfter - Timestamp (relative to now) when the certificate
- stops being valid
- digest - Digest method to use for signing, default is md5
- Returns: The signed certificate in an X509 object
- """
- cert = crypto.X509()
- cert.set_serial_number(serial)
- cert.gmtime_adj_notBefore(notBefore)
- cert.gmtime_adj_notAfter(notAfter)
- cert.set_issuer(issuerCert.get_subject())
- cert.set_subject(req.get_subject())
- cert.set_pubkey(req.get_pubkey())
- cert.sign(issuerKey, digest)
- return cert
-
-def certgen(keytype, keylength, commonname, keyfile, certfile):
- key_types = { "RSA": TYPE_RSA, "DSA": TYPE_DSA }
- key = createKeyPair(key_types[keytype], keylength)
- req = createCertRequest(key, CN=commonname)
- cert = createCertificate(req, (req, key), 0, (0, 60*60*24*365*50)) # fifty years
- pkey_file = open(keyfile, 'w')
- pkey_file.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key))
- pkey_file.close()
- cert_file = open(certfile, 'w')
- cert_file.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
- cert_file.close()
-
diff --git a/client/scirec.py b/client/scirec.py
deleted file mode 100755
index 78c6799..0000000
--- a/client/scirec.py
+++ /dev/null
@@ -1,324 +0,0 @@
-#!/usr/bin/python2.4
-
-from OpenSSL import SSL
-from OpenSSL import crypto
-import certgen
-import sys, os, socket, getopt, commands, time
-#import fcntl, select
-import logging, syslog
-from SecureXMLRPCClient import SecureXMLRPCClient
-import xmlrpclib
-import traceback
-from pprint import pprint
-
-bind_address = "localhost"
-bind_port = 9876
-config_dir = "/etc/scire"
-sim = False
-debug = True
-verbose = False
-daemon = False
-poll_int = 10
-
-# MATT: When job is received, store it in a spool directory (as XML?)
-# Implemented in scired.py (stub).
-
-# MATT: When the client starts up, it needs to read in any jobs from
-# the spool. (Potential for indexing here?)
-# Implemented in scired.py (stub).
-
-# MATT: When the job is finished executing, remove it from the spool.
-# Implemented in scired.py (stub).
-
-# MATT: Populate some kind of summary vector to send to the server so
-# the server won't resend jobs. The vector should consist of
-# hashes.
-
-# MATT: What is necessary to make these actions atomic? Is that even
-# possible?
-
-def display_traceback():
- etype, value, tb = sys.exc_info()
- s = traceback.format_exception(etype, value, tb)
- for line in s:
- print line.strip()
-
-def run_jobs(client,jobs):
- # The rcodes dict will contain the results for the
- # execution of each job. It will be returned in its
- # entirety.
- # 0 = Successful
- # 1 = Job execution failed
- # 2 = Sanity check failed
-
- for jobid in jobs:
- job=client.get_job(jobid)
-
- if sim or debug or verbose:
- print "Job %s:" % job['jobid']
- print job
- #for key in job.keys():
- # if key != "script":
- # print ' %s: %s' % (key,job[key])
- #print ' Script %s:' %job['script']['scriptid']
- #for key in job['script'].keys():
- # print ' %s: %s' % (key,job['script'][key])
- #print ''
-
- if not sanity_check(job):
- rcodes[jobid] = 2
- continue
-
- # Here is where we run the job.
- # Do we want to allow binary executables? If so, this might need
- # to be done a bit differently.
-
- # Here we don't use tmpfile because it would disappear too quickly.
- # The file needs to stick around long enough for us to run it, but
- # we also have to be confident that the file is finished.
- pid = os.getpid()
- scriptfile = '/tmp/'+`pid`+'.'+`job['jobid']`
- if debug:
- print 'scriptfile: %s' % scriptfile
- scriptfd = open(scriptfile,'w')
- scriptfd.writelines(job['script']['script_data'])
- scriptfd.close()
-
- # As we are sure that the job is now on the client,
- # let's mark it as downloaded
- client.mark_job_as('Downloaded', job['jobid'])
-
- os.chmod(scriptfile,0755)
-
- # Is this dangerous?
- rcode = False
- if sim:
- print 'Command not executed (simulation mode):\n%s' % job['script']['script_data']
- else:
- (rcode,output) = run_job(client,job,scriptfile)
- print 'rcode: %s' % str(rcode)
- print 'success code is %s' % str(job['script']['success_code'])
- output = repr(output)
- #print 'Command output to return: %s' % output
- if rcode == 'ScireDepErr':
- success = 'Failed'
- client.mark_job_as('Failed', jobid)
- client.job_return(job['jobid'],success,output)
- elif rcode == 'Aborted':
- success = 'Aborted'
- client.mark_job_as('Cancelled', jobid)
- client.job_return(job['jobid'],success)
- elif int(rcode) == int(job['script']['success_code']):
- success = 'Succeeded'
- client.mark_job_as('Finished', jobid)
- job['script']['return_output'] = 1; #FIXME don't hardcode hacks like this. fix the DB/UI
- if job['script']['return_output'] and (job['script']['return_output']== 1):
- client.job_return(job['jobid'],success,output)
- else:
- client.job_return(job['jobid'],success)
- # unspool_job should probably take jobid, not job
- #client.unspool_job(job)
- else:
- success = 'Failed'
- client.job_return(job['jobid'],success,output)
-
- jobs.remove(jobid)
- os.remove(scriptfile)
-
- return jobs
-
-
-# Here we execute the command and return the output from the command.
-# MATT: We still need to implement failure detection/return codes.
-def run_job(client,job,command):
-
- jobid = job['jobid']
-
- if client.job_cancelled(jobid):
- return 'Aborted','Job aborted'
- # RLAZO: Comment for testing
-# deps = job['job_dependency']
- deps = 'None'
- if deps != 'None':
- deplist = ()
- deplist = job['job_dependency'].split(',')
- for jobdep in deplist:
- d_status = client.get_jstatus(jobdep)
- print "Dependency for jobid %s: %s. Status: %s" % (jobid,jobdep,d_status)
- if not d_status == 'Succeeded':
- rmsg = "Dependency for jobid %s: %s. Status: %s" % (jobid,jobdep,d_status)
- return 'ScireDepErr',rmsg
-
- client.mark_job_as('Running', jobid)
- status,output = commands.getstatusoutput(''.join([command,' 2>&1']))
-
- if debug:
- print 'Command Output:\n %s' % output
-
- if output:
- # MATT: Right now this is just an arbitrary file.
- # No reason not to use the value provided by the
- # database except for the fact that we will need to
- # change it to use syslog. Here's an idea: if
- # the logfile is defined in the database, write to
- # it, but also always write to syslog.
- # Also, we should timestamp the log entries and
- # deal with file existence.
-
-# self.conn = adodb.NewADOConnection(config.get("db","type"))
- logfile = job['script']['log_location']
- #if logfile and os.path.isfile(logfile):
- if logfile:
- print 'Writing log to %s' % logfile
- logging.basicConfig(level=logging.INFO,
- format='%(asctime)s %(message)s',
- filename=logfile,
- filemode='a')
- logging.info('Jobid %s: %s', job['jobid'], job['description'])
- logging.info(output)
-
- syslog.syslog('Jobid %s: %s' % (job['jobid'], job['description']))
- syslog.syslog(output)
-
- return os.WEXITSTATUS(status),output
-
-
-def sanity_check(job):
- return True
-
-def sys_info():
- client_info = {}
-# defint = os.popen('/sbin/route|grep default').read().split()[7]
- default_interface = commands.getoutput(r"/sbin/route | awk '/^default/ { print $8 }'").strip()
-
-# cmdstr = '/sbin/ifconfig ' + defint + ' | grep HWaddr'
-# client_info['mac'] = os.popen(cmdstr).read().replace('\n','').split()[4]
- client_info['mac'] = commands.getoutput("/sbin/ifconfig " + default_interface + r" | awk '/HWaddr/ { print $5 }'").strip()
-
- #cmdstr = "/sbin/ifconfig " + defint + " | grep 'inet '"
- #client_info['ip'] = os.popen(cmdstr).read().replace('addr:','').split()[1]
- client_info['ip'] = commands.getoutput("/sbin/ifconfig " + default_interface + r" | sed -ne '/^[[:space:]]*inet addr:/{s///;s/[[:space:]]\+.*$//p}'").strip()
-
- try:
- client_info['hostname'] = socket.gethostbyaddr(client_info['ip'])[0]
- except:
- client_info['hostname'] = client_info['ip']
-
- return client_info
-
-def verify_server_cert(conn, cert, errnum, depth, ok):
- if not os.path.isfile(config_dir + "/known_servers"):
- print "Recording server's cert digest in known_servers"
- known_servers_file = open(config_dir + "/known_servers", "w")
- known_servers_file.write(cert.digest("sha1") + "\n")
- known_servers_file.close()
- else:
- known_servers_file = open(config_dir + "/known_servers", "r")
- known_servers = [x.strip() for x in known_servers_file.readlines()]
- known_servers_file.close()
- if not cert.digest("sha1") in known_servers:
- return False
- return True
-
-def generate_cert_and_key(keytype="RSA", keylength=1024):
- # Generate server cert/key
- sys.stdout.write("Generating client certificate...")
- #certgen.certgen(keytype, keylength, "Scire Client", config_dir + '/client.key', config_dir + '/client.cert')
- certgen.certgen(keytype, keylength, socket.gethostname(), config_dir + '/client.key', config_dir + '/client.cert')
- print "done"
-
-def scirec_main(client,known_jobs):
- summary = client.gen_summary(known_jobs)
- print 'Summary: ' + str(summary)
-
- #print client.db_version()
- if verbose: print "Acquiring jobs..."
- jobs = client.get_jobs(summary,known_jobs)
-
- if jobs == []:
- print 'There are no jobs.'
- leftovers = []
- elif not jobs:
- print 'Problem acquiring jobs. Quitting.'
- sys.exit(1)
- else:
- print 'Jobs: %s' %jobs
- leftovers = run_jobs(client,jobs)
-
- return leftovers
-
-if __name__ == "__main__":
- try:
- opts, args = getopt.getopt(sys.argv[1:], 'sdDvp:h:')
- except getopt.error, msg:
- print msg
- print """usage: python %s [-s] [-d] [-v] [-D] [-h host] [-p port]
- [-s] = Simulation mode. Doesn't actually run scripts.
- [-d] = Turn on debugging
- [-v] = Turn on verboseness
- [-D] = Run in daemon mode
- [-h host] = Set the host name/ip of the scire server
- [-p port] = Set the port to look for the scire server
- """ % sys.argv[0]
- sys.exit(2)
- for o, a in opts:
- if o == '-s': sim = True
- elif o == '-d':
- debug = True
- verbose = True
- elif o == '-D': daemon = True
- elif o == '-h': bind_address = a
- elif o == '-p': bind_port = int(a)
- elif o == '-v': verbose = True
-
- if verbose: print "Starting up...\nChecking for the certificate... ",
-
- # Check for public/private keypair and generate if they don't exist
- if not os.path.isfile(config_dir + "/client.key") or not os.path.isfile(config_dir + "/client.cert"):
- if verbose: print "No cert found. Generating.\n"
- generate_cert_and_key()
-
- try:
- if verbose: print "Loading the certificate...\n"
- certfh = open(config_dir + "/client.cert","r")
- except:
- print 'Could not load certificate'
- sys.exit(1)
-
- certbuf = certfh.read()
- certfh.close()
- client_cert = crypto.load_certificate(crypto.FILETYPE_PEM,certbuf)
-
- if verbose: print "Connecting to the server...\n"
- try:
- client = SecureXMLRPCClient(bind_address, bind_port, config_dir + "/client.cert", config_dir + "/client.key", verify_server_cert)
- except:
- print "ERROR: Couldn't connect to server!\n"
- display_traceback()
-
- if debug: print client_cert.digest("sha1")
-
- if verbose: print "Registering client... "
- client_status = client.register_client()
-
- if not client_status:
- try:
- client_info = sys_info()
- client.add_client(certbuf,client_info)
- print "Client added successfully. Status is pending. Will exit now.\n"
- except:
- print "ERROR: Could not add client\n"
- display_traceback()
- else:
- print "Client Status is: %s \n" % client_status
-
- summary = []
- known_jobs = []
-
- if daemon:
- if verbose: print "Running in daemon mode.\n"
- while True:
- known_jobs = scirec_main(client,known_jobs)
- time.sleep(poll_int)
- else:
- scirec_main(client,known_jobs)
diff --git a/server/DB_functions.py b/server/DB_functions.py
deleted file mode 100644
index b9e889d..0000000
--- a/server/DB_functions.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from ScireDB import *
-from adodb import *
-from client import *
-import md5
-import datetime
-
-def add_package(clientid,package,version):
- print "Clientid: "+str(clientid)+"\tPackage: "+str(package[1:])+"\tVersion: "+str(version)+"\n"
- db = ScireDB()
- query = "INSERT INTO software (clientid,package,current_ver) VALUES ("+str(clientid)+",'"+str(package)+"','"+str(version)+"') ON DUPLICATE KEY UPDATE current_ver='"+str(version)+"'"
- print query
- cursor = db.conn.Execute(query)
-# cursor = db.conn.Execute('''
-#INSERT INTO software
-#(clientid,package,current_ver)
-#VALUES (%d,%s,%s) ON DUPLICATE KEY UPDATE
-#''', (int(clientid),package[1:],version))
-
-def update_package(clientid,package,version):
- pass
-def delete_package(clientid,package,version):
- pass
diff --git a/server/ScireDB.py b/server/ScireDB.py
deleted file mode 100755
index 3ab137f..0000000
--- a/server/ScireDB.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from adodb import *
-import ConfigParser
-
-configfile = "/etc/scire.conf"
-
-class ScireDB(ADOConnection, ADOCursor):
- def __init__(self):
- config = ConfigParser.ConfigParser()
- config.read(configfile)
-
- self.conn = adodb.NewADOConnection(config.get("db","type"))
- self.conn.Connect (config.get("db","host"),
- config.get("db","user"),
- config.get("db","passwd"),
- config.get("db","db"))
-
- def version(self):
- row = self.conn.GetOne("SELECT VERSION()")
- #print "server version:", row
- return row
-
- def close(self):
- self.conn.Close()
diff --git a/server/SecureXMLRPCServer.py b/server/SecureXMLRPCServer.py
deleted file mode 100644
index f92a096..0000000
--- a/server/SecureXMLRPCServer.py
+++ /dev/null
@@ -1,66 +0,0 @@
-from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
-from OpenSSL import SSL
-import SocketServer
-import socket
-
-class SecureSocketServer(SocketServer.ForkingMixIn, SocketServer.TCPServer):
-
- def __init__(self, addr, cert, key, requestHandler, verify_cert_func=None):
- SocketServer.TCPServer.__init__(self, addr, requestHandler)
- ctx = SSL.Context(SSL.SSLv23_METHOD)
- if not verify_cert_func and getattr(self, 'verify_client_cert'):
- verify_cert_func = getattr(self, 'verify_client_cert')
- if verify_cert_func:
- ctx.set_verify(SSL.VERIFY_PEER|SSL.VERIFY_FAIL_IF_NO_PEER_CERT, verify_cert_func)
- ctx.use_privatekey_file(key)
- ctx.use_certificate_file(cert)
-
- tmpConnection = SSL.Connection(ctx, socket.socket(socket.AF_INET, socket.SOCK_STREAM))
- self.socket = SecureSocketConnection(tmpConnection)
-
- self.server_bind()
- self.server_activate()
-
- def finish_request(self, request, client_address):
- """Finish one request by instantiating RequestHandlerClass."""
- self.RequestHandlerClass(request, client_address, self)
-
-# def verify_client_cert(self, conn, cert, errnum, depth, ok):
-# self.client_digest = cert.digest("sha1")
-# return True
-
-class SecureXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
-
- def __init__(self, request, client_address, server, client_digest=None):
- SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server)
- self.client_digest = client_digest
-
- def setup(self):
- self.connection = self.request
- self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
- self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
-
-class SecureXMLRPCServer(SecureSocketServer, SimpleXMLRPCServer):
-
- def __init__(self, address, cert, key, handler=SecureXMLRPCRequestHandler, verify_cert_func=None):
- self.logRequests = False
- SecureSocketServer.__init__(self, address, cert, key, handler, verify_cert_func)
- self.funcs = {}
-
-class SecureSocketConnection:
-
- def __init__(self, connection):
- self.__dict__["connection"] = connection
-
- def __getattr__(self, name):
- return getattr(self.__dict__["connection"], name)
-
- def __setattr__(self, name, value):
- setattr(self.__dict__["connection"], name, value)
-
- def shutdown(self, how=1):
- self.__dict__["connection"].shutdown()
-
- def accept(self):
- connection, address = self.__dict__["connection"].accept()
- return (SecureSocketConnection(connection), address)
diff --git a/server/blackace/pyCrypto.py b/server/blackace/pyCrypto.py
deleted file mode 100644
index 04fe3a9..0000000
--- a/server/blackace/pyCrypto.py
+++ /dev/null
@@ -1,485 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# $Id: pyCrypto.py,v 0.1 2006/05/18 06:15:20 wolfwood Exp $
-#
-# pyCrypto 0.1 - Object oriented pycrypto class with key serializing.
-# http://starwind.homelinux.com/
-#
-# Copyright (c) 2006 Blackace Enterprises
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-#
-
-import sys, os, time, syslog, types, re, struct, binascii
-from Crypto.Hash import SHA, MD5
-from Crypto.Cipher import DES3
-from Crypto.PublicKey import ElGamal, RSA
-from Crypto.Util.randpool import RandomPool
-from Crypto.Util import number as CryptoNumber
-
-def log(priority, message):
- priorities = {
- 'emerg': syslog.LOG_EMERG,
- 'alert': syslog.LOG_ALERT,
- 'crit': syslog.LOG_CRIT,
- 'err': syslog.LOG_ERR,
- 'warn': syslog.LOG_WARNING,
- 'notice': syslog.LOG_NOTICE,
- 'info': syslog.LOG_INFO,
- 'debug': syslog.LOG_DEBUG
- }
- name = re.compile('^(?:[^/]*/)?(.*?)(?:\.py)?$').sub('\\1', sys.argv[0])
- syslog.openlog(name, syslog.LOG_PID, syslog.LOG_DAEMON)
- syslog.syslog(priorities[priority], message)
- syslog.closelog()
-
-def die(message):
- log('err', 'died with error: ' + message)
- sys.stderr.write(message + '\n\r')
- sys.exit(1)
-
-class pyCrypto:
- keytypes = {
- 'ElGamal': ElGamal,
- 'ELGAMAL': ElGamal,
- 'scire-elgamal': ElGamal,
- 'ssh-elgamal': ElGamal,
- 'rsa': RSA,
- 'RSA': RSA,
- 'scire-rsa': RSA,
- 'ssh-rsa': RSA
- }
-
- def __init__(self, keylength=2048, keytype='ElGamal', hashtype='SHA', keypath='file:///etc/scire/id_elgamal'):
- self.keylength = keylength
- self.keytype = keytype
- self.hashtype = hashtype
- self.keypath = keypath
- self.random = RandomPool(keylength, hash=eval(hashtype))
- self.key = self.getKey()
- if isinstance(self.key, basestring):
- log('warn', 'getting key-pair failed: ' + self.key)
- log('info', 'generating new key-pair')
- self.key = self.genKey()
- if isinstance(self.key, basestring):
- die('generating key-pair failed: ' + self.key)
- log('info', 'putting key-pair')
- self.putKey()
-
- def getRandom(self, bytes=256):
- start = time.time()
- random = self.random.get_bytes(bytes)
- time.sleep(0.9999)
- stop = str(int(re.sub("^[^.]*\.", "", str(time.time() - start))))
- self.random.stir_n(int(stop[0]))
- self.random.add_event(stop)
- return random
-
- def encrypt(self, data, key=False):
- retval = ''
- if isinstance(key, bool):
- key = self.key
- while len(data) > 0:
- length = key.size() / 8
- chunk = ''
- if isinstance(key, ElGamal.ElGamalobj):
- pieces = key.encrypt(data[:length], self.getRandom())
- else:
- pieces = key.encrypt(data[:length], '')
- for piece in pieces:
- chunk += binascii.b2a_base64(piece) + '$'
- retval += binascii.b2a_base64(chunk[:-1])
- data = data[length:]
- return retval[:-1]
-
- def decrypt(self, data, key=False):
- retval = ''
- if isinstance(key, bool):
- key = self.key
- chunks = data.split('\n')
- for chunk in chunks:
- base64pieces = binascii.a2b_base64(chunk).split('$')
- pieces = []
- for piece in base64pieces:
- pieces.append(binascii.a2b_base64(piece))
- retval += key.decrypt(tuple(pieces))
- return retval
-
- def genKey(self):
- keyobj = self.keytypes[self.keytype]
- try:
- key = keyobj.generate(self.keylength, self.getRandom)
- except:
- return '"' + self.keytype + '" is not a supported key type'
- return key
-
- def getKey(self, keypath=''):
- if len(keypath) == 0:
- keypath = self.keypath
- if len(keypath) == 0:
- return 'no keypath specified'
- passphrase = ''
- mech, path = keypath.split(':', 1)
- if path[:2] == '//':
- path = path[2:]
- pos = path.rfind('*')
- if pos >= 0:
- passphrase = path[pos+1:]
- path = path[:pos]
- del(pos)
- data = ''
- if mech == 'file':
- if path[0] == '~':
- path = os.path.expanduser(path)
- else:
- path = os.path.abspath(path)
- if not os.path.isfile(path):
- return '"' + path + '" does not exist'
- if not os.access(path, os.R_OK):
- return 'read permission denied to "' + path + '"'
- kf = open(path, 'r')
- data = kf.read()
- kf.close()
- del(kf)
- elif mech == 'string':
- data = path
- elif mech == 'mysql':
- print '',
- # parse path as user:pass*passphrase@host/db/table/col/wherecol/whereval
- # and retrieve data
- else:
- return '"' + mech + '" is not a supported key storage mechanism'
- key = self.unpackKey(data, passphrase)
- if isinstance(key, basestring):
- return 'unpacking key-pair failed: ' + key
- else:
- return key
-
- def putKey(self, keypath=''):
- if len(keypath) == 0:
- keypath = self.keypath
- if len(keypath) == 0:
- return False
- mech, path = keypath.split(':', 1)
- passphrase = ''
- pos = path.rfind('*')
- if pos >= 0:
- passphrase = path[pos+1:]
- path = path[:pos]
- del(pos)
- data = self.packKey(self.key, passphrase)
- if len(data) <= 0:
- log('err', 'packing key-pair failed')
- return False
- if mech == 'file':
- if path[:2] == '//':
- path = path[2:]
- if path[0] == '~':
- path = os.path.expanduser(path)
- else:
- path = os.path.abspath(path)
- if not os.path.isdir(os.path.dirname(path)):
- log('err', '"' + os.path.dirname(path) + '" does not exist')
- return False
- if os.path.isfile(path) and not os.access(path, os.W_OK):
- log('err', 'write permission denied to "' + path + '"')
- return False
- if not os.path.isfile(path):
- if os.access(os.path.dirname(path), os.W_OK):
- kf = open(path, 'w')
- kf.write('')
- kf.close()
- del(kf)
- os.chmod(path, 0600)
- else:
- log('err', 'write permission denied to "' + os.path.dirname(path) + '"')
- return False
- kf = open(path, 'w')
- kf.write(data)
- kf.close()
- del(kf)
- return True
- elif mech == 'mysql':
- print '',
- # parse path as user:pass*passphrase@host/db/table/col/wherecol/whereval
- # and store data
- else:
- log('err', '"' + mech + '" is not a supported key storage mechanism')
- return False
-
- def unpackKeyBlob(self, blob):
- fields = []
- while blob:
- type = ord(blob[0])
- if (type & 0xc0) != 0:
- return False
- length = ord(blob[1])
- if blob == 0x80:
- return False
- if length & 0x80:
- longlength = length & 0x7f
- length = CryptoNumber.bytes_to_long(blob[2:2+longlength])
- size = 2 + longlength
- else:
- size = 2
- body, blob = blob[size:size+length], blob[size+length:]
- type = type & (~0x20)
- if type == 0x10:
- result = self.unpackKeyBlob(body)
- if not isinstance(result, bool):
- fields.append(result)
- elif type == 0x02:
- fields.append(CryptoNumber.bytes_to_long(body))
- if len(fields) == 1:
- return fields[0]
- return fields
-
- def packKeyBlob(self, fields):
- blob = ''
- for field in fields:
- if isinstance(field, tuple) or isinstance(field, types.ListType):
- data = self.packKeyBlob(field)
- type = 0x10|0x20
- elif isinstance(field, int) or isinstance(field, long):
- data = CryptoNumber.long_to_bytes(field)
- if ord(data[0])&(0x80):
- data = '\x00' + data
- type = 0x02
- blob += chr(type)
- if len(data) > 127:
- length = CryptoNumber.long_to_bytes(len(data))
- blob += chr(len(length)|0x80) + length
- else:
- blob += chr(len(data))
- blob += data
- return blob
-
- def unpackKey(self, data, passphrase=''):
- parsing = False
- keyobj = None
- key = {'type': '', 'encrypted': False, 'cipher': '', 'iv': '', 'headers': [], 'blob': '', 'comment': ''}
- for line in data.split('\n'):
- if line[:8] == '-----END' and line[-16:] == 'PRIVATE KEY-----':
- break
- if len(line.split(':', 1)) == 2:
- if parsing:
- parsing = False
- header, value = line.split(':', 1)
- header = header.strip()
- value = value.strip()
- key['headers'].append({'header': header, 'value': value})
- if header.lower() == 'proc-type' and value.lower() == '4,encrypted':
- key['encrypted'] = True
- if header.lower() == 'dek-info':
- key['cipher'], key['iv'] = value.split(',', 1)
- key['iv'] = binascii.a2b_hex(key['iv'])
- continue
- if parsing:
- key['blob'] = key['blob'] + line
- if line[:10] == '-----BEGIN' and line[-16:] == 'PRIVATE KEY-----':
- key['type'] = line[11:-17]
- parsing = True
- if len(key['headers']) > 0 and line == '':
- parsing = True
- del(parsing)
- if len(key['blob']) > 0:
- # Private key
- if not self.keytypes.has_key(key['type']):
- return '"' + key['type'] + '" is not a supported key type'
- blob = binascii.a2b_base64(key['blob'])
- if key['encrypted']:
- if len(blob) % 8 != 0:
- return 'invalid encrypted key blob size'
- blocka = MD5.new(passphrase + key['iv']).digest()
- blockb = MD5.new(blocka + passphrase + key['iv']).digest()
- cipher = DES3.new(blocka + blockb[:8], DES3.MODE_CBC, key['iv'])
- del(blocka)
- del(blockb)
- blob = cipher.decrypt(blob)
- del(cipher)
- fields = self.unpackKeyBlob(blob)
- if isinstance(fields, bool):
- return 'invalid passphrase'
- if self.keytypes[key['type']] == RSA:
- n, e, d, p, q = fields[1:6]
- keyobj = RSA.construct((n, e, d, p, q))
- elif self.keytypes[key['type']] == ElGamal:
- p, g, y, x = fields[1:5]
- keyobj = ElGamal.construct((p, g, y, x))
- else:
- # Public key
- for line in data.split('\n'):
- fields = line.split(' ')
- if len(fields) == 3:
- key['type'], key['blob'], key['comment'] = fields[:3]
- break
- if len(fields) == 2:
- key['type'], key['blob'] = fields[:2]
- break
- if not self.keytypes.has_key(key['type']):
- return '"' + key['type'] + '" is not a supported key type'
- blob = binascii.a2b_base64(key['blob'])
- (length,) = struct.unpack('>I', blob[:4])
- type = blob[4:4+length]
- blob = blob[4+length:]
- if not self.keytypes.has_key(type):
- return '"' + key['type'] + '" is not a supported key type'
- (length,) = struct.unpack('>I', blob[:4])
- bytes = blob[4:4+length]
- e = 0L
- for byte in bytes:
- e = e * 256 + ord(byte)
- blob = blob[4+length:]
- (length,) = struct.unpack('>I', blob[:4])
- bytes = blob[4:4+length]
- n = 0L
- for byte in bytes:
- n = n * 256 + ord(byte)
- blob = blob[4+length:]
- if self.keytypes[type] == ElGamal:
- (length,) = struct.unpack('>I', blob[:4])
- bytes = blob[4:4+length]
- y = 0L
- for byte in bytes:
- y = y * 256 + ord(byte)
- blob = blob[4+length:]
- keyobj = ElGamal.construct((e, n, y))
- elif self.keytypes[type] == RSA:
- keyobj = RSA.construct((n, e))
- keyobj.metadata = key
- return keyobj
-
- def packKey(self, key, passphrase=''):
- data = ''
- keytype = ''
- blob = ''
- if key.has_private():
- # Private key
- if isinstance(key, RSA.RSAobj):
- keytype = 'RSA'
- dmq1 = key.d % (key.q-1)
- dmp1 = key.d % (key.p-1)
- impq = CryptoNumber.inverse(key.p, key.q)
- blob = self.packKeyBlob([0, key.n, key.e, key.d, key.q, key.p, dmq1, dmp1, impq])
- del(dmq1)
- del(dmp1)
- del(impq)
- elif isinstance(key, ElGamal.ElGamalobj):
- keytype = 'ELGAMAL'
- blob = self.packKeyBlob((0, key.p, key.g, key.y, key.x))
- else:
- log('err', 'packing key-pair: no key type matched')
- if len(blob) <= 0:
- return ''
- data += '-----BEGIN ' + keytype + ' PRIVATE KEY-----\n'
- if len(passphrase) > 0:
- iv = self.getRandom(8)
- blocka = MD5.new(passphrase + iv).digest()
- blockb = MD5.new(blocka + passphrase + iv).digest()
- cipher = DES3.new(blocka + blockb[:8], DES3.MODE_CBC, iv)
- del(blocka)
- del(blockb)
- while len(blob) % 8:
- blob += '='
- blob = cipher.encrypt(blob)
- del(cipher)
- data += 'Proc-Type: 4,ENCRYPTED\n'
- data += 'DEK-Info: DES-EDE3-CBC,' + binascii.b2a_hex(iv).upper() + '\n\n'
- del(iv)
- blob = binascii.b2a_base64(blob)
- if len(blob) <= 0:
- return ''
- while blob:
- data += blob[:64] + '\n'
- blob = blob[64:]
- data = data.rstrip('\n') + '\n-----END ' + keytype + ' PRIVATE KEY-----\n'
- else:
- # Public key
- e = 0
- n = 0
- y = 0
- if isinstance(key, ElGamal.ElGamalobj):
- try:
- keytype = key.metadata['type']
- except:
- keytype = 'ssh-elgamal'
- e = key.p
- n = key.g
- y = key.y
- elif isinstance(key, RSA.RSAobj):
- try:
- keytype = key.metadata['type']
- except:
- keytype = 'ssh-rsa'
- e = key.e
- n = key.n
- blob += struct.pack('>I', len(keytype))
- blob += struct.pack('>' + str(len(keytype)) + 's', keytype)
- val = ''
- while 1:
- r = e % 256
- d = (e - r) / 256
- val = chr(r) + val
- if d >= 256:
- e = d
- continue
- elif d != 0:
- val = chr(d) + val
- break
- del(e)
- if ord(val[0]) & 0x80:
- val = '\0' + val
- blob += struct.pack('>I', len(val))
- blob += struct.pack('>' + str(len(val)) + 's', val)
- val = ''
- while 1:
- r = n % 256
- d = (n - r) / 256
- val = chr(r) + val
- if d >= 256:
- n = d
- continue
- elif d != 0:
- val = chr(d) + val
- break
- del(n)
- if ord(val[0]) & 0x80:
- val = '\0' + val
- blob += struct.pack('>I', len(val))
- blob += struct.pack('>' + str(len(val)) + 's', val)
- if y != 0:
- val = ''
- while 1:
- r = y % 256
- d = (y - r) / 256
- val = chr(r) + val
- if d >= 256:
- y = d
- continue
- elif d != 0:
- val = chr(d) + val
- break
- del(y)
- if ord(val[0]) & 0x80:
- val = '\0' + val
- blob += struct.pack('>I', len(val))
- blob += struct.pack('>' + str(len(val)) + 's', val)
- blob = binascii.b2a_base64(blob)
- try:
- data = keytype + ' ' + blob.rstrip('\n') + ' ' + key.metadata['comment'] + '\n'
- except:
- data = keytype + ' ' + blob.rstrip('\n') + '\n'
- return data
diff --git a/server/blackace/pySecureServer.py b/server/blackace/pySecureServer.py
deleted file mode 100644
index 05c7b1b..0000000
--- a/server/blackace/pySecureServer.py
+++ /dev/null
@@ -1,137 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# $Id: pySecureServer.py,v 0.1 2006/05/18 06:23:11 wolfwood Exp $
-#
-# pySecureServer 0.1 - Object oriented python SocketServer class.
-# http://starwind.homelinux.com/
-#
-# Copyright (c) 2006 Blackace Enterprises
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-#
-
-import sys, time, syslog, re, thread, select, SocketServer
-
-def log(priority, message):
- priorities = {
- 'emerg': syslog.LOG_EMERG,
- 'alert': syslog.LOG_ALERT,
- 'crit': syslog.LOG_CRIT,
- 'err': syslog.LOG_ERR,
- 'warn': syslog.LOG_WARNING,
- 'notice': syslog.LOG_NOTICE,
- 'info': syslog.LOG_INFO,
- 'debug': syslog.LOG_DEBUG
- }
- name = re.compile('^(?:[^/]*/)?(.*?)(?:\.py)?$').sub('\\1', sys.argv[0])
- syslog.openlog(name, syslog.LOG_PID, syslog.LOG_DAEMON)
- syslog.syslog(priorities[priority], message)
- syslog.closelog()
-
-class pySecureServer(SocketServer.ThreadingTCPServer):
- daemon_threads = True
- allow_reuse_address = True
-
- def __init__(self, crypto, handler, timeout, port, ip=''):
- self.crypto = crypto
- self.handler = handler
- self.timeout = timeout
- self.clients = {}
- SocketServer.ThreadingTCPServer.__init__(self, (ip, port), pySecureServerRequestHandler)
-
- def start(self):
- log('info', 'started')
- self.serve_forever()
-
-
-class pySecureServerRequestHandler(SocketServer.BaseRequestHandler):
- def __init__(self, request, client_address, server):
- self.timeout = 0
- SocketServer.BaseRequestHandler.__init__(self, request, client_address, server)
-
- def close(self):
- self.timeout = 0
-
- def startTimeout(self, inthread=False):
- if inthread:
- while self.timeout != 0 and self.timeout > time.time():
- time.sleep(1)
- return self.request.shutdown(2)
- else:
- self.delayTimeout()
- return thread.start_new_thread(self.startTimeout, (True,))
-
- def delayTimeout(self):
- self.timeout = time.time() + self.server.timeout
-
- def sendclear(self, message):
- self.request.sendall(message + '\n\r')
-
- def send(self, message, key=False):
- self.sendclear(self.server.crypto.encrypt(message, key))
-
- def handle(self):
- log('info', 'opening connection from ' + self.client_address[0] + ':' + str(self.client_address[1]))
- self.startTimeout()
- data = ''
- packet = { 'command': '', 'args': [], 'data': '' }
- ready_to_read, ready_to_write, in_error = select.select([self.request], [], [], None)
- while 1:
- if len(ready_to_read) == 1 and ready_to_read[0] == self.request:
- recv = self.request.recv(1024)
- if not recv or len(recv) <= 0:
- break
- else:
- data += str(recv)
- while data.find('\n') != -1:
- line, data = data.split('\n', 1)
- line = line.strip()
- if line == 'ip':
- self.sendclear(self.client_address[0])
- elif line == 'bye':
- self.sendclear('OK: Goodbye.')
- self.close()
- else:
- if len(packet['command']) == 0:
- i = line.rfind(' ')
- while i != -1:
- packet['args'].insert(0, line[i+1:])
- line = line[:i]
- i = line.rfind(' ')
- packet['command'] = line
- self.delayTimeout()
- else:
- if line != '':
- packet['data'] += line + '\n'
- self.delayTimeout()
- else:
- packet['data'] = packet['data'].rstrip('\n')
- retval = self.server.handler(self, packet)
- if isinstance(retval, basestring):
- if retval[0] == '!':
- self.sendclear(retval[1:])
- self.close()
- else:
- self.sendclear(retval)
- elif retval:
- self.sendclear('OK')
- elif not retval:
- self.sendclear('ERR: invalid command.')
- packet['command'] = ''
- packet['args'] = []
- packet['data'] = ''
- break
- log('info', 'closing connection from ' + self.client_address[0] + ':' + str(self.client_address[1]))
diff --git a/server/blackace/scireserver.py b/server/blackace/scireserver.py
deleted file mode 100755
index 3b920d0..0000000
--- a/server/blackace/scireserver.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# $Id: scireserver.py,v 0.2 2006/05/18 06:23:40 wolfwood Exp $
-#
-# ScireServer 0.2 - Server side communication component of Scire.
-# http://www.gentoo.org/proj/en/scire/
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-#
-
-import sys, time, syslog, re, pprint
-from pyCrypto import pyCrypto
-from pySecureServer import pySecureServer
-from Crypto.Hash import SHA, MD5
-
-def log(priority, message):
- priorities = {
- 'emerg': syslog.LOG_EMERG,
- 'alert': syslog.LOG_ALERT,
- 'crit': syslog.LOG_CRIT,
- 'err': syslog.LOG_ERR,
- 'warn': syslog.LOG_WARNING,
- 'notice': syslog.LOG_NOTICE,
- 'info': syslog.LOG_INFO,
- 'debug': syslog.LOG_DEBUG
- }
- name = re.compile('^(?:[^/]*/)?(.*?)(?:\.py)?$').sub('\\1', sys.argv[0])
- syslog.openlog(name, syslog.LOG_PID, syslog.LOG_DAEMON)
- syslog.syslog(priorities[priority], message)
- syslog.closelog()
-
-def handler(socket, request):
- crypto = socket.server.crypto
- clients = socket.server.clients
- if request['command'] == 'helo':
- pubkey = crypto.packKey(crypto.key.publickey()).rstrip('\n').split(' ')[1]
- socket.sendclear(MD5.new(pubkey).hexdigest() + ' ' + pubkey)
- return 'OK: Hello ' + socket.client_address[0]
- elif request['command'] == 'auth':
- data = crypto.decrypt(request['data'])
- digest = data[:32]
- data = data[33:]
- if MD5.new(data).hexdigest() != digest:
- return '!ERR: digest verification failed.'
- if len(request['args']) == 1 and len(request['args'][0]) == 32:
- # Response
- clientkeyhash = request['args'][0]
- if clients.has_key(clientkeyhash):
- if int(time.time()) > clients[clientkeyhash]['expiration']:
- del(clients[clientkeyhash])
- return '!ERR: challenge expired.'
- else:
- if MD5.new(clients[clientkeyhash]['challenge']).hexdigest() == data:
- clients[clientkeyhash]['auth'] = True
- clients[clientkeyhash]['expiration'] = int(time.time()) + socket.timeout
- return 'OK: authenticated.'
- else:
- # UPDATE `clients` SET `status` = 'disabled' WHERE `keyhash` = 'clientkeyhash';
- return '!ERR: invalid response.'
- else:
- return '!ERR: invalid challenge.'
- else:
- # Challenge
- clientkeyhash = MD5.new(data).hexdigest()
- clientkey = crypto.unpackKey(data)
- # if len(mysql_rows(SELECT `hostname` FROM `clients` WHERE `keyhash` = 'clientkeyhash';)) > 0:
- # we initiate the handshake and return True
- challenge = MD5.new(crypto.getRandom(32)).hexdigest()
- clients[clientkeyhash] = {'auth': False, 'key': clientkey, 'challenge': challenge, 'expiration': int(time.time()) + socket.timeout}
- socket.send(MD5.new(challenge).hexdigest() + ' ' + challenge, clientkey)
- return True
- # elif len(mysql_rows(SELECT `timestamp` FROM `pending_clients` WHERE `key` = 'clientkey';)) > 0:
- # return 'ERR: auth already pending.'
- # else:
- # stuff their clientkey into the pending_clients table and return '!OK: auth request queued.'
- else:
- if len(request['args']) >= 1 and len(request['args'][0]) == 32:
- clientkeyhash = request['args'][0]
- if clients.has_key(clientkeyhash):
- if int(time.time()) > clients[clientkeyhash]['expiration']:
- del(clients[clientkeyhash])
- return '!ERR: session expired.'
- elif not clients[clientkeyhash]['auth']:
- return '!ERR: not authenticated.'
- else:
- socket.sendclear('You sent: ' + pprint.pformat(request))
- socket.send('This should be encrypted!')
- return True
- else:
- return '!ERR: not authenticated.'
- else:
- return '!ERR: not authenticated.'
-
-if __name__ == '__main__':
- crypto = pyCrypto(1024, 'RSA', 'SHA', 'file://~/.ssh/id_rsa')
- server = pySecureServer(crypto, handler, 10, 7000)
-# print crypto.packKey(crypto.key.publickey()).rstrip('\n').split(' ')[1]
-# pubkey = crypto.getKey('file://~/.ssh/id_rsa.pub')
-# print crypto.packKey(pubkey)
-# encrypted = crypto.encrypt('Client to Server Connection Procedure\n=====================================\nClient connects to the Server.\n\nServer sends it\'s public key to the Client.\n\nClient encrypts it\'s public key using the Server\'s public\nkey and sends it to the Server.\n\nServer decrypts the Client\'s public key using it\'s own\nprivate key, and checks the clients table for a match.\n\nIf the Client\'s public key cannot be found in the clients\ntable, the Server stores the Client\'s public key in the\npending_clients table, sends a confirmation to the Client,\nand disconnects.\n\nIf the Client\'s public key was found in the clients table,\nimplying an Administrators trust, the Server generates a\nrandom challenge, encrypts it with the Client\'s public key,\nand sends it to the Client.\n\nThe Client decrypts the challenge, generates an MD5 hash of\nit, re-encrypts it with the Server\'s public key, and sends\nit back to the Server.\n\nThe Server decrypts the response using it\'s own private key\nand compares it to an MD5 hash of the challenge.\n\nIf the MD5 of the original challenge matches the response,\nthe Client and the Server have completed a handshake,\nverifying each other\'s identity, and can now either begin\nencrypted communication using the keys they have already\nexchanged, or they can exchange new connection-specific\nprivate keys and use symmetric encryption for the rest of\nthe session.\n\nIf the MD5 of the original challenge does not match the\nresponse, the Server should disable the Client in the\nclients table, flagging it for review by an Administrator,\nand then disconnect.\n', pubkey)
-# print 'Encrypted:\n', encrypted
-# decrypted = crypto.decrypt(encrypted)
-# print 'Decrypted:\n', decrypted
- try:
- server.start()
- except KeyboardInterrupt:
- log('info', 'keyboard interrupt')
- sys.exit(0)
diff --git a/server/certgen.py b/server/certgen.py
deleted file mode 100644
index 28454c6..0000000
--- a/server/certgen.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#
-# certgen.py
-#
-# Copyright (C) Martin Sjogren and AB Strakt 2001, All rights reserved
-#
-# $Id$
-#
-"""
-Certificate generation module.
-"""
-
-from OpenSSL import crypto
-
-TYPE_RSA = crypto.TYPE_RSA
-TYPE_DSA = crypto.TYPE_DSA
-
-def createKeyPair(type, bits):
- """
- Create a public/private key pair.
-
- Arguments: type - Key type, must be one of TYPE_RSA and TYPE_DSA
- bits - Number of bits to use in the key
- Returns: The public/private key pair in a PKey object
- """
- pkey = crypto.PKey()
- pkey.generate_key(type, bits)
- return pkey
-
-def createCertRequest(pkey, digest="md5", **name):
- """
- Create a certificate request.
-
- Arguments: pkey - The key to associate with the request
- digest - Digestion method to use for signing, default is md5
- **name - The name of the subject of the request, possible
- arguments are:
- C - Country name
- SP - State or province name
- L - Locality name
- O - Organization name
- OU - Organizational unit name
- CN - Common name
- email - E-mail address
- Returns: The certificate request in an X509Req object
- """
- req = crypto.X509Req()
- subj = req.get_subject()
-
- for (key,value) in name.items():
- setattr(subj, key, value)
-
- req.set_pubkey(pkey)
- req.sign(pkey, digest)
- return req
-
-def createCertificate(req, (issuerCert, issuerKey), serial, (notBefore, notAfter), digest="sha1"):
- """
- Generate a certificate given a certificate request.
-
- Arguments: req - Certificate reqeust to use
- issuerCert - The certificate of the issuer
- issuerKey - The private key of the issuer
- serial - Serial number for the certificate
- notBefore - Timestamp (relative to now) when the certificate
- starts being valid
- notAfter - Timestamp (relative to now) when the certificate
- stops being valid
- digest - Digest method to use for signing, default is md5
- Returns: The signed certificate in an X509 object
- """
- cert = crypto.X509()
- cert.set_serial_number(serial)
- cert.gmtime_adj_notBefore(notBefore)
- cert.gmtime_adj_notAfter(notAfter)
- cert.set_issuer(issuerCert.get_subject())
- cert.set_subject(req.get_subject())
- cert.set_pubkey(req.get_pubkey())
- cert.sign(issuerKey, digest)
- return cert
-
-def certgen(keytype, keylength, commonname, keyfile, certfile):
- key_types = { "RSA": TYPE_RSA, "DSA": TYPE_DSA }
- key = createKeyPair(key_types[keytype], keylength)
- req = createCertRequest(key, CN=commonname)
- cert = createCertificate(req, (req, key), 0, (0, 60*60*24*365*50)) # fifty years
- pkey_file = open(keyfile, 'w')
- pkey_file.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key))
- pkey_file.close()
- cert_file = open(certfile, 'w')
- cert_file.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
- cert_file.close()
-
diff --git a/server/cron.py b/server/cron.py
deleted file mode 100755
index 9e43b30..0000000
--- a/server/cron.py
+++ /dev/null
@@ -1,469 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: iso-8859-1 -*
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Library General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02110-1301, USA.
-#
-# Crontab-like string parse. Inspired on crontab.py of the
-# gnome-schedule-1.1.0 package.
-
-import re
-import datetime
-
-
-class SimpleCrontabEntry(object):
- """Contrab-like parser.
-
- Only deals with the first 5 fields of a normal crontab
- entry."""
-
- def __init__(self, entry, expiration = 0):
- self.__setup_timespec()
- self.set_value(entry)
- self.set_expiration(expiration)
-
- def set_expiration(self, val):
- self.expiration = datetime.timedelta(minutes=val)
-
- def set_value(self, entry):
- self.data = entry
- fields = re.findall("\S+", self.data)
- if len(fields) != 5 :
- raise ValueError("Crontab entry needs 5 fields")
- self.fields = {
- "minute" : fields[0],
- "hour" : fields[1],
- "day" : fields[2],
- "month" : fields[3],
- "weekday": fields[4],
- }
- if not self._is_valid():
- raise ValueError("Bad Entry")
-
- #### HERE BEGINS THE CODE BORROWED FROM gnome-schedule ###
- def __setup_timespec(self):
-
- self.special = {
- "@reboot" : '',
- "@hourly" : '0 * * * *',
- "@daily" : '0 0 * * *',
- "@weekly" : '0 0 * * 0',
- "@monthly" : '0 0 1 * *',
- "@yearly" : '0 0 1 1 *'
- }
-
- self.timeranges = {
- "minute" : range(0,60),
- "hour" : range(0,24),
- "day" : range(1,32),
- "month" : range(1,13),
- "weekday" : range(0,8)
- }
-
- self.timenames = {
- "minute" : "Minute",
- "hour" : "Hour",
- "day" : "Day of Month",
- "month" : "Month",
- "weekday" : "Weekday"
- }
-
- self.monthnames = {
- "1" : "Jan",
- "2" : "Feb",
- "3" : "Mar",
- "4" : "Apr",
- "5" : "May",
- "6" : "Jun",
- "7" : "Jul",
- "8" : "Aug",
- "9" : "Sep",
- "10" : "Oct",
- "11" : "Nov",
- "12" : "Dec"
- }
-
- self.downames = {
- "0" : "Sun",
- "1" : "Mon",
- "2" : "Tue",
- "3" : "Wed",
- "4" : "Thu",
- "5" : "Fri",
- "6" : "Sat",
- "7" : "Sun"
- }
-
- def checkfield (self, expr, type):
- """Verifies format of Crontab timefields
-
- Checks a single Crontab time expression.
- At first possibly contained alias names will be replaced by their
- corresponding numbers. After that every asterisk will be replaced by
- a "first to last" expression. Then the expression will be splitted
- into the komma separated subexpressions.
-
- Each subexpression will run through:
- 1. Check for stepwidth in range (if it has one)
- 2. Check for validness of range-expression (if it is one)
- 3. If it is no range: Check for simple numeric
- 4. If it is numeric: Check if it's in range
-
- If one of this checks failed, an exception is raised. Otherwise it will
- do nothing. Therefore this function should be used with
- a try/except construct.
- """
-
- timerange = self.timeranges[type]
-
- # Replace alias names only if no leading and following alphanumeric and
- # no leading slash is present. Otherwise terms like "JanJan" or
- # "1Feb" would give a valid check. Values after a slash are stepwidths
- # and shouldn't have an alias.
- if type == "month": alias = self.monthnames.copy()
- elif type == "weekday": alias = self.downames.copy()
- else: alias = None
- if alias != None:
- while True:
- try: key,value = alias.popitem()
- except KeyError: break
- expr = re.sub("(?<!\w|/)" + value + "(?!\w)", key, expr)
-
- expr = expr.replace("*", str(min(timerange)) + "-" + str(max(timerange)) )
-
- lst = expr.split(",")
- rexp_step = re.compile("^(\d+-\d+)/(\d+)$")
- rexp_range = re.compile("^(\d+)-(\d+)$")
-
- expr_range = []
- for field in lst:
- # Extra variables for time calculation
- step = None
- buff = None
-
- result = rexp_step.match(field)
- if result != None:
- field = result.groups()[0]
- # We need to take step in count
- step = int(result.groups()[1])
- if step not in timerange:
- raise ValueError("stepwidth",
- self.timenames[type],
- "Must be between %(min)s and %(max)s" % { "min": min(timerange),
- "max": max(timerange) } )
-
- result = rexp_range.match(field)
- if (result != None):
- if (int(result.groups()[0]) not in timerange) or (int(result.groups()[1]) not in timerange):
- raise ValueError("range",
- self.timenames[type],
- "Must be between %(min)s and %(max)s" % { "min": min(timerange),
- "max": max(timerange) } )
- # Now we deal with a range...
- if step != None :
- buff = range(int(result.groups()[0]), int(result.groups()[1])+1, step)
- else :
- buff = range(int(result.groups()[0]), int(result.groups()[1])+1)
-
- elif not field.isdigit():
- raise ValueError("fixed",
- self.timenames[type],
- "%s is not a number" % ( field ) )
- elif int(field) not in timerange:
- raise ValueError("fixed",
- self.timenames[type],
- "Must be between %(min)s and %(max)s" % { "min": min(timerange),
- "max": max(timerange) } )
- if buff != None :
- expr_range.extend(buff)
- else :
- expr_range.append(int(field))
-
- expr_range.sort()
- # Here we may need to check wether some elements have duplicates
- self.fields[type] = expr_range
-
-
- #### HERE ENDS THE CODE BORROWED FROM gnome-schedule ###
-
- def _is_valid(self):
- """Validates the data to check for a well-formated cron
- entry.
- Returns True or false"""
-
- try:
- for typ, exp in self.fields.items():
- self.checkfield(exp, typ)
- except ValueError,(specific,caused,explanation):
- print "PROBLEM TYPE: %s, ON FIELD: %s -> %s " % (specific,caused,explanation)
- return False
- return True
-
- def __next_time(self, time_list, time_now):
- """Little helper function to find next element on the list"""
- tmp = [x for x in time_list if x >= time_now]
- carry = False
- if len(tmp) == 0:
- carry = True
- sol = time_list[0]
- else:
- if not carry:
- sol = tmp[0]
- else :
- if len(tmp) == 1:
- carry = True
- sol = time_list[0]
- else :
- carry = False
- sol = tmp[1]
- return sol, carry
-
- def __prev_time(self, time_list, item):
- """Little helper function to find next element on the list"""
- pos = time_list.index(item)
- elem = time_list[pos-1]
- carry = elem >= time_list[pos]
- return elem, carry
-
- def __next_month(self, month, sol):
- """Find next month of execution given the month arg. If month
- is different than current calls all the other __next_*
- functions to set up the time."""
-
- sol['month'], carry = self.__next_time(self.fields['month'], month)
- if carry :
- sol['year'] += 1
- if sol['month'] != month :
- self.__next_day(1,sol)
- self.__next_hour(0,sol)
- self.__next_minute(0,sol)
- return False
- return True
-
- def __next_minute(self, minute, sol):
- """Find next minute of execution given the minute arg."""
- sol['minute'], carry = self.__next_time(self.fields['minute'], minute)
- if carry:
- self.__next_hour(sol['hour']+1, sol)
- return True
-
- def __next_hour(self, hour, sol):
- """Find next hour of execution given the hour arg. If hour is
- different than current calls the __next_hour function to set
- up the minute """
-
- sol['hour'], carry = self.__next_time(self.fields['hour'], hour)
- if carry:
- self.__next_day(sol['day']+1, sol)
- if sol['hour'] != hour:
- self.__next_minute(0,sol)
- return False
- return True
-
- #el weekday se calcula a partir del dia, el mes y aņo dentro de sol
- def __next_day(self, day, sol):
- """Find next day of execution given the day and the month/year
- information held on the sol arg. If day is different than
- current calls __next_hour and __next_minute functions to set
- them to the correct values"""
-
- now = datetime.date(sol['year'], sol['month'], day)
- # The way is handled on the system is monday = 0, but for crontab sunday =0
- weekday = now.weekday()+1
- # first calculate day
- day_tmp, day_carry = self.__next_time(self.fields['day'], day)
- day_diff = datetime.date(sol['year'], sol['month'], day_tmp) - now
-
- # if we have all days but we don't have all weekdays we need to
- # perform different
- if len(self.fields['day']) == 31 and len(self.fields['weekday']) != 8:
- weekday_tmp, weekday_carry = self.__next_time(self.fields['weekday'], weekday)
- # Both 0 and 7 represent sunday
- weekday_tmp -= 1
- if weekday_tmp < 0 : weekday_tmp = 6
- weekday_diff = datetime.timedelta(days=weekday_tmp - (weekday - 1))
- if weekday_carry :
- weekday_diff += datetime.timedelta(weeks=1)
- weekday_next_month = (now + weekday_diff).month != now.month
- # If next weekday is not on the next month
- if not weekday_next_month :
- sol['day'] = (now + weekday_diff).day
- if sol['day'] != day :
- self.__next_hour(0,sol)
- self.__next_minute(0, sol)
- return False
- return True
- else :
- flag = self.__next_month(sol['month']+1, sol)
- if flag :
- return self.__next_day(0, sol)
- return False
-
- # if we don't have all the weekdays means that we need to use
- # them to calculate next day
- if len(self.fields['weekday']) != 8:
- weekday_tmp, weekday_carry = self.__next_time(self.fields['weekday'], weekday)
- # Both 0 and 7 represent sunday
- weekday_tmp -= 1
- if weekday_tmp < 0 : weekday_tmp = 6
- weekday_diff = datetime.timedelta(days=weekday_tmp - (weekday - 1))
- if weekday_carry :
- weekday_diff += datetime.timedelta(weeks=1)
- weekday_next_month = (now + weekday_diff).month != now.month
- # If next weekday is not on the next month
- if not weekday_next_month :
- # If the next day is on other month, the next weekday
- # is closer to happen so is what we choose
- if day_carry:
- sol['day'] = (now + weekday_diff).day
- if sol['day'] != day :
- self.__next_hour(0,sol)
- self.__next_minute(0, sol)
- return False
- return True
- else :
- # Both day and weekday are good candidates, let's
- # find out who is going to happen
- # sooner
- diff = min(day_diff, weekday_diff)
- sol['day'] = (now+diff).day
- if sol['day'] != day :
- self.__next_hour(0,sol)
- self.__next_minute(0, sol)
- return False
- return True
-
- sol['day'] = day_tmp
- if day_carry :
- self.__next_month(sol['month']+1, sol)
- if sol['day'] != day :
- self.__next_hour(0,sol)
- self.__next_minute(0, sol)
- return False
- return True
-
-
- def next_run(self, time = datetime.datetime.now()):
- """Calculates when will the next execution be."""
- sol = {'minute': 0, 'hour': 0, 'day': 0, 'month' : 0, 'year' : time.year}
- # next_month if calculated first as next_day depends on
- # it. Also if next_month is different than time.month the
- # function will set up the rest of the fields
- self.__next_month(time.month, sol) and \
- self.__next_day(time.day, sol) and \
- self.__next_hour(time.hour, sol) and \
- self.__next_minute(time.minute, sol)
- return datetime.datetime(sol['year'], sol['month'], sol['day'], sol['hour'], sol['minute'])
-
- def prev_run(self, time = datetime.datetime.now()):
- """Calculates when the previous execution was."""
- base = self.next_run(time)
- # minute
- prev_minute, carry = self.__prev_time(self.fields['minute'], base.minute)
- min_diff = datetime.timedelta(minutes=(base.minute - prev_minute))
- base -= min_diff
- if not carry :
- return base
-
- # hour
- prev_hour, carry = self.__prev_time(self.fields['hour'], base.hour)
- hour_diff = datetime.timedelta(hours=(base.hour - prev_hour))
- base -= hour_diff
- if not carry :
- return base
-
- # day
- prev_day, carry_day = self.__prev_time(self.fields['day'], base.day)
- day_diff = datetime.timedelta(days=(base.day - prev_day))
- prev_weekday, carry_weekday = self.__prev_time(self.fields['weekday'], base.weekday()+1)
-
- # if we have all days but we don't have all weekdays we need to
- # perform different
- if len(self.fields['day']) == 31 and len(self.fields['weekday']) != 8:
- # Both 0 and 7 represent sunday
- prev_weekday -= 1
- if prev_weekday < 0 : prev_weekday = 6
-
- if carry_weekday :
- day_diff = datetime.timedelta(days=7+base.weekday() - prev_weekday)
- carry = base.month != (base - day_diff).month
- else :
- weekday_diff = datetime.timedelta(days=base.weekday() - prev_weekday)
- # weekday no es en el otro mes
- day_diff = min([day_diff, weekday_diff])
- carry = False
-
- elif len(self.fields['weekday']) != 8:
- # Both 0 and 7 represent sunday
- prev_weekday -= 1
- if prev_weekday < 0 : prev_weekday = 6
- weekday_diff = datetime.timedelta(days=base.weekday() - prev_weekday)
-
- if carry_weekday :
- weekday_diff += datetime.timedelta(weeks=1)
- if carry_day :
- # ambos son el otro mes
- day_diff = max([day_diff, weekday_diff])
- carry = True
- else :
- # el day simple esta en el mismo mes y el weekday en otro
- pass
- else :
- # weekday no es en el otro mes
- if carry_day :
- # el day esta en el otro mes y el weekday no
- prev_day = weekday_diff
- carry = False
- else :
- # ambos estan el el mero mes
- day_diff = min([day_diff, weekday_diff])
- carry = False
-
- else :
- carry = carry_day
- base -= day_diff
- if not carry :
- return base
-
- # month
- prev_month, carry = self.__prev_time(self.fields['month'], base.month)
- month_diff = datetime.date(base.year, base.month, base.day) - \
- datetime.date(base.year, prev_month, base.day)
- base -= month_diff
-
- return base
-
-
-
- def is_expired(self, time = datetime.datetime.now()):
- """If the expiration parameter has been set this will check
- wether too much time has been since the cron-entry. If the
- expiration has not been set, it throws ValueError."""
- if self.expiration == 0 :
- raise ValueError("Missing argument",
- "Expiration time has not been set")
- next_beg = self.next_run(time)
- next_end = next_beg + self.expiration
- prev_beg = self.prev_run(time)
- prev_end = prev_beg + self.expiration
- if (time >= next_beg and time <= next_end) or (time >= prev_beg and time <= prev_end) :
- return False
- return True
-
-def _test():
- import doctest
- doctest.testfile("cronTest.txt")
-
-if __name__ == "__main__" :
- _test()
diff --git a/server/cronTest.txt b/server/cronTest.txt
deleted file mode 100644
index 10df9d6..0000000
--- a/server/cronTest.txt
+++ /dev/null
@@ -1,137 +0,0 @@
-SimpleCrontrabEntry doctest file
-
- >>> from cron import *
-
- >>> c = SimpleCrontabEntry("* * * * *")
-
-Checks for Entry format (only size)
-
- >>> c = SimpleCrontabEntry("* * * * *")
- >>> c = SimpleCrontabEntry(" * * * * *")
- >>> c = SimpleCrontabEntry("* * * *")
- Traceback (most recent call last):
- ...
- ValueError: Crontab entry needs 5 fields
- >>> c = SimpleCrontabEntry("* * * *")
- Traceback (most recent call last):
- ...
- ValueError: Crontab entry needs 5 fields
- >>> c = SimpleCrontabEntry("5-10 2 -2 5")
- Traceback (most recent call last):
- ...
- ValueError: Crontab entry needs 5 fields
- >>> c = SimpleCrontabEntry("*-*-*-*")
- Traceback (most recent call last):
- ...
- ValueError: Crontab entry needs 5 fields
-
-Now check for entry validate (values inside ranges, etc.)
-
- >>> c = SimpleCrontabEntry("0 0 1 1 0")
- >>> c = SimpleCrontabEntry("-1 -4 -6 90 80")
- Traceback (most recent call last):
- ...
- ValueError: Bad Entry
- >>> c = SimpleCrontabEntry("0 0 0 0 0")
- Traceback (most recent call last):
- ...
- ValueError: Bad Entry
- >>> c = SimpleCrontabEntry("3/5 22 1 12 8")
- Traceback (most recent call last):
- ...
- ValueError: Bad Entry
- >>> c = SimpleCrontabEntry("1/4 * 20-11 1 1")
- Traceback (most recent call last):
- ...
- ValueError: Bad Entry
- >>> c = SimpleCrontabEntry("12/2 2 2 * *")
- Traceback (most recent call last):
- ...
- ValueError: Bad Entry
-
-The next entry is valid ?
-
- >>> c = SimpleCrontabEntry("10-30/20 2 1 9 1")
-
-Calculate next executions
-
- May 2007
-Mo Tu We Th Fr Sa Su
- 1 2 3 4 5 6
- 7 8 9 10 11 12 13
-14 15 16 17 18 19 20
-21 22 23 24 25 26 27
-28 29 30 31
-
- >>> import datetime
- >>> now = datetime.datetime(2007, 05, 25, 10, 0)
- >>> now.weekday()
- 4
-
- >>> c = SimpleCrontabEntry("*/15 * * * *")
- >>> c.next_run(now)
- datetime.datetime(2007, 5, 25, 10, 0)
- >>> c.prev_run(now)
- datetime.datetime(2007, 5, 25, 9, 45)
-
- >>> c.set_value("2 * * * *")
- >>> c.next_run(now)
- datetime.datetime(2007, 5, 25, 10, 2)
- >>> c.prev_run(now)
- datetime.datetime(2007, 5, 25, 9, 2)
-
- >>> c.set_value("1-30/15 * * * *")
- >>> c.next_run(now)
- datetime.datetime(2007, 5, 25, 10, 1)
- >>> c.prev_run(now)
- datetime.datetime(2007, 5, 25, 9, 16)
-
- >>> c.set_value("0 2,7 * * 1-5")
- >>> c.next_run(now)
- datetime.datetime(2007, 5, 28, 2, 0)
- >>> c.prev_run(now)
- datetime.datetime(2007, 5, 25, 7, 0)
-
- >>> c.set_value("9 2,9,22 */4 */3 *")
- >>> c.next_run(now)
- datetime.datetime(2007, 7, 1, 2, 9)
- >>> c.prev_run(now)
- datetime.datetime(2007, 4, 29, 22, 9)
-
- >>> c.set_value("5 12 9 5 *")
- >>> c.next_run(now)
- datetime.datetime(2008, 5, 9, 12, 5)
- >>> c.prev_run(now)
- datetime.datetime(2008, 5, 9, 12, 5)
-
-Expiration measurement
-
- May 2007
-Mo Tu We Th Fr Sa Su
- 1 2 3 4 5 6
- 7 8 9 10 11 12 13
-14 15 16 17 18 19 20
-21 22 23 24 25 26 27
-28 29 30 31
-
- >>> now = datetime.datetime(2007, 05, 25, 10, 0)
- >>> c.set_value("*/15 * * * *")
- >>> c.set_expiration(5)
- >>> c.next_run(now)
- datetime.datetime(2007, 5, 25, 10, 0)
- >>> c.prev_run(now)
- datetime.datetime(2007, 5, 25, 9, 45)
- >>> c.is_expired(now)
- False
-
- >>> c.set_value("0 9,12 * * *")
- >>> c.set_expiration(5)
- >>> c.next_run(now)
- datetime.datetime(2007, 5, 25, 12, 0)
- >>> c.prev_run(now)
- datetime.datetime(2007, 5, 25, 9, 0)
- >>> c.is_expired(now)
- True
- >>> c.set_expiration(60)
- >>> c.is_expired(now)
- False
diff --git a/server/modules/GACL_functions.py b/server/modules/GACL_functions.py
deleted file mode 100644
index 147fa28..0000000
--- a/server/modules/GACL_functions.py
+++ /dev/null
@@ -1,64 +0,0 @@
-from adodb import *
-from modules.client import *
-
-def get_client_groups(db,id)
- #options.
- option = 'NO_RECURSE' # * If $option == 'RECURSE' it will get all ancestor groups. defaults to only get direct parents.
- # * @return array Array of Group ID #'s, or FALSE if Failed
- print "get_object_groups(): Object ID: %s id Option: %s\n" % (id, option)
- object_type = 'axo'
- group_table = 'gacl_axo_groups'
- map_table = 'gacl_groups_axo_map'
-
- if not id:
- print "get_object_groups(): Object ID: (%s ) is empty, this is required" % str(id)
- return false
-
-
- if option == 'RECURSE':
- query = """
- SELECT DISTINCT g.id AS group_id
- FROM %s gm
- LEFT JOIN %s g1 ON g1.id=gm.group_id
- LEFT JOIN %s g ON g.lft<=g1.lft AND g.rgt>=g1.rgt
- """ % (map_table,group_table,group_table)
- else:
- query = """
- SELECT gm.group_id
- FROM %s gm
- """ % map_table
-
- query += " WHERE gm.axo_id=%s " % str(id)
- print query
- cursor = db.conn.Execute(query)
- #fixme error check the return
-
- while (not cursor.EOF):
- row = cursor.FetchRow()
- retarr.append(row[0])
-
- return retarr
-
-
-
-# # Add the client into the gacl AXO table
-# db.conn.Execute('LOCK TABLES `gacl_axo_seq` WRITE');
-# # we add one to get the next valid free id
-# id = db.conn.GetRow('SELECT id FROM `gacl_axo_seq`')[0] + 1;
-# result = db.conn.Execute('UPDATE `gacl_axo_seq` SET id=%s', id);
-# db.conn.Execute('UNLOCK TABLES');
-
-# result2 = db.conn.Execute('INSERT INTO `gacl_axo` (id,section_value,value,order_value,name,hidden) VALUES (%s,%s,%s,%s,%s,%s)', (id,'clients',client_info['hostname'],1,client_info['hostname'],0) );
-
-def get_group_clients(db, group):
- """This function gets the members of groups. Returns an array
- containing those clients, empty otherwise"""
-
- # Missing recursive groups
- members = []
- query = "SELECT axo_id FROM gacl_groups_axo_map WHERE group_id = %s" % group
- print query
- cursor = db.conn.Execute(query)
- while (not cursor.EOF):
- members.append(cursor.FetchRow()[0])
- return members
diff --git a/server/modules/__init__.py b/server/modules/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/server/modules/__init__.py
+++ /dev/null
diff --git a/server/modules/client.py b/server/modules/client.py
deleted file mode 100644
index 8023e89..0000000
--- a/server/modules/client.py
+++ /dev/null
@@ -1,76 +0,0 @@
-from ScireDB import *
-from OpenSSL import crypto
-import time;
-
-def register():
- return ['register_client', 'add_client']
- #return ['register_client']
-
-def register_client(client_digest):
- db = ScireDB()
- retval = False
- row = db.conn.GetRow('SELECT client_status.statusname FROM clients JOIN client_status on (clients.status = client_status.statusid) WHERE clients.digest=%s', client_digest )
-
- if not row:
- print 'Client is not in the table:\n %s' % client_digest
- else:
-# print 'Found the client in the database!'
- #This doesn't mean they're allowed to do things though.
- retval = row[0]
-
- db.Close()
- return retval
-
-
-
-def add_client(client_digest,certbuf,client_info):
-#def add_client(self,client_cert,client_info):
- db = ScireDB()
- retval = False
-
- client_cert = crypto.load_certificate(crypto.FILETYPE_PEM,certbuf)
- print 'CLIENT DIGEST: ' + client_cert.digest("sha1")
- print 'CLIENT CERT: ' + crypto.dump_certificate(crypto.FILETYPE_PEM,client_cert)
-
- try:
- status = db.conn.GetRow('SELECT statusid FROM client_status WHERE statusname = %s', ("Pending"))[0]
- except:
- print "Insert failed: Couldn't get status"
- db.Close()
- return retval
-
- try:
- # Add the client into the gacl AXO table
- db.conn.Execute('LOCK TABLES `gacl_axo_seq` WRITE');
- # we add one to get the next valid free id
- id = db.conn.GetRow('SELECT id FROM `gacl_axo_seq`')[0] + 1;
- result = db.conn.Execute('UPDATE `gacl_axo_seq` SET id=%s', id);
- db.conn.Execute('UNLOCK TABLES');
-
- result2 = db.conn.Execute('INSERT INTO `gacl_axo` (id,section_value,value,order_value,name,hidden) VALUES (%s,%s,%s,%s,%s,%s)', (id,'clients',client_info['hostname'],1,client_info['hostname'],0) );
-
- ## stub
- #
- db.conn.Execute('INSERT INTO clients (clientid,digest,cert,hostname,mac,ip,status) VALUES (%s,%s,%s,%s,%s,%s,%s)', (id, client_cert.digest("sha1"),crypto.dump_certificate(crypto.FILETYPE_PEM,client_cert),client_info['hostname'],client_info['mac'],client_info['ip'], status))
- print 'Insert successful'
- retval = True
- except Exception, inst:
- print "Insertion failed: ", inst
-
- db.Close()
- return retval
-
-
-def get_clientid(client_digest):
- # No need to expose this function via XML-RPC since it should be
- # used only internally, at least for now.
- db = ScireDB()
-
- row = db.conn.GetRow('SELECT clientid FROM clients WHERE digest=%s', client_digest )
- db.Close()
-
- if row:
- client_id = row[0]
- return client_id
- else:
- return False
diff --git a/server/modules/general.py b/server/modules/general.py
deleted file mode 100644
index e261308..0000000
--- a/server/modules/general.py
+++ /dev/null
@@ -1,8 +0,0 @@
-def register():
- return ['say_hello']
-
-def say_hello(client_digest):
- return "Hello %s!\n%s" % (client_digest, say_goodbye())
-
-def say_goodbye():
- return "Goodbye foo!"
diff --git a/server/modules/job.py b/server/modules/job.py
deleted file mode 100644
index ec0986c..0000000
--- a/server/modules/job.py
+++ /dev/null
@@ -1,354 +0,0 @@
-import sys
-sys.path.append("../postprocess")
-
-from ScireDB import *
-from adodb import *
-from client import *
-import GACL_functions
-import md5
-import datetime
-import pdb
-
-POSTPROCESS_DIR = "postprocess"
-
-debug = True
-
-def register():
- return ['recover_spool', 'gen_summary', 'get_jobs', 'spool_jobs', 'unspool_job', 'get_job', 'job_return', 'get_jstatus', 'job_cancelled', 'mark_job_as']
-
-def job_cancelled(client_digest,jobid):
-
- client_id = get_clientid(client_digest)
- db = ScireDB()
-
- # How to deal with a cancelled recurring job?
-
- # rlazo: I think that using the last stored value on the
- # job_history table will always be accurated. If the user
- # wants to rerun the job again, the status will change to
- # PENDING
-
- cursor = db.conn.Execute('''
- SELECT jobs_status.statusname
- FROM job_history LEFT JOIN jobs_status on (job_history.jobid)
- WHERE job_history.jobid=%s AND job_history.clientid=%s
- ''', (jobid,client_id))
- status = cursor.FetchRow()
- if status:
- return status[0] == "Cancelled"
- else:
- return False
-
-def recover_spool(self,jobs):
- # Here we will implement the logic to read in
- # the jobs in the spool on the client.
- self.gen_summary()
- return jobs
-
-def gen_summary(client_digest,jobs):
- # This is a stub for a method which the client will call.
- # It should take the spool and generate a list of hashes
- # of jobs it already has.
-
- client_summary = {}
-
- for jobid in jobs:
- job_digest = gen_job_digest(client_digest,jobid)
-# print "Adding md5 digest %s for job %s" % (job_digest,jobid)
- client_summary[job_digest] = {}
-
- return client_summary
-
-def gen_job_digest(client_digest,jobid):
-
- job = get_job(client_digest,jobid)
- if debug: print "Job is:\n"
- if debug: print job
- m = md5.new()
-
- keys = job.keys()
- keys.sort()
- for key in keys:
- if key != 'script':
- m.update(str(job[key]))
-
- keys = job['script'].keys()
- keys.sort()
- for key in keys:
- m.update(str(job['script'][key]))
-
- job_digest = m.hexdigest()
- if debug: print "Job digest is: "+str(job_digest)+"\n"
- return job_digest
-
-def get_jobs(client_digest,summary,jobs):
- """Returns all the job from the database for the client identified by
- client_digest that fulfill the following conditions:
- - It is time to execute it (deploy_time and expiration_time)
- - The latest entry on job_history for the job is marked as Pending"""
- client_id = get_clientid(client_digest)
-
- # pdb.set_trace()
- if not client_id:
- print 'Could not get client ID from database for %s.' % client_digest
- return False
-
- db = ScireDB()
-
- pending_id = get_status_id('Pending')
- if not pending_id:
- print 'Error... could not load client_status'
-
- # rlazo: Do the magic. This is all the need for a multijob to work. I hope
- expand_jobs(client_id)
-
- try:
- cursor = db.conn.Execute('''
-SELECT jobs.jobid, jobs.priority, job_conditions.job_dependency, job_conditions.deploy_time, job_conditions.expiration_time, job_history.statusid
-FROM jobs NATURAL JOIN jobs_clients NATURAL JOIN job_conditions NATURAL JOIN job_history
-WHERE jobs_clients.clientid = %s
-AND jobs.jobid = jobs_clients.jobid
-AND (job_conditions.deploy_time < now())
-AND (job_conditions.expiration_time > now())
-AND job_history.statusid = '%s'
-ORDER BY jobs.priority,jobs.created
-''', (str(client_id), pending_id))
- except:
- print 'Error ' + str(sys.exc_info()[1]); # retrieve the error message returned by database
- return False
-
-
- while not cursor.EOF:
- jobs_dict = cursor.GetRowAssoc(0) # 0 is lower, 1 is upper-case
- print jobs_dict
- jobid = jobs_dict['jobid']
-
- sjob_digest = gen_job_digest(client_digest,jobid)
- if debug: print 'sjob_digest is %s for jobid %s' % (sjob_digest,jobid)
-
-# # Pending approval as would make invalid the next 5 lines
-# print 'Adding jobid %s to the queue of jobs.' % str(jobid)
-# jobs.append(jobid)
-
- if not summary.has_key(sjob_digest):
- print 'Adding jobid %s to the queue of jobs.' % str(jobid)
- jobs.append(jobid)
- else:
- print '%s not added to the queue of jobs.' % sjob_digest
- cursor.MoveNext()
-
- #jobs = self.recover_spool(jobs)
- #self.spool_jobs(jobs)
-
- db.Close()
- return jobs
-
-def spool_jobs(self,jobs):
- # This is a stub for a method which will write out the
- # job in XML to the spool directory.
- return True
-
-def unspool_job(self,job):
- # This is a stub for a method which will remove the
- # job from the spool directory.
- return True
-
-def job_return(client_digest,jobid,success,eventmsg=None):
- output = eventmsg
- db = ScireDB()
- print "The job has returned. It was a %s\n" % success
- clientid = get_clientid(client_digest)
- if debug: print "The clientid is: %s\n" % clientid
- # Commented because is already executed on run_jobs
-# cursor = db.conn.Execute('''
-#INSERT INTO job_history
-#(jobid,clientid,status,eventmsg)
-#VALUES (%s,%s,%s,%s)
-#''', (jobid,clientid,success,eventmsg))
-
- if success == "Succeeded" and eventmsg != None: #If we have output and succeeded, check if we have a postprocess to run.
- #FIXME this should be a separate function.
- post_process = db.conn.GetRow('''SELECT s.pp_location, s.pp_script_data
- FROM jobs j, scripts s
- WHERE j.jobid=%s AND s.scriptid=j.script ''', (jobid) )
- print str(post_process) #DEBUGGING
- if post_process[0]:
- #now this is some dangerous freakin code if i've ever seen it!
- print "Running post process pp_"+str(post_process[0])
- template= __import__(POSTPROCESS_DIR+'/postprocess')
- result = getattr(template, "pp_"+str(post_process[0]))(clientid,output)
- if not result:
- print "Postprocessing failed!" + str(post_process[0])
-
- #elif post_process[1]:
- # status,output = commands.getstatusoutput(post_process[1],' 2>&1'])
-
- # if debug:
- # print 'Command Output:\n %s' % eventmsg
- # if output:
- # syslog.syslog('Jobid %s: ' % (jobid))
- # syslog.syslog(output)
-
-
- # Update the pending # for the job
- cursor = db.conn.Execute('UPDATE jobs SET pending=pending-1 WHERE jobid=%s', (jobid))
-
-
- # Clear this client out of the jobs_clients table so the job doesn't get re-fetched.
- # Not sure how this will work for recurring jobs. Schedule next now? FIXME!!
- cursor = db.conn.Execute('DELETE FROM jobs_clients WHERE jobid=%s AND clientid=%s', (jobid,clientid))
-
- # Here we probably need to put a check in to determine if
- # this report is the last one needed somehow. Add'l DB
- # fields may be necessary?
- db.Close()
- return True
-
-def get_jstatus(client_digest,jobid):
-
- db = ScireDB()
- clientid = get_clientid(client_digest)
-
- # Under the current scheduling model, all jobs are treated as
- # recurring, so just get the latest job entry
- jobstatus = db.conn.GetRow('''
-SELECT jobs_status.statusname
-FROM job_history LEFT JOIN jobs_status ON (job_history.statusid)
-WHERE job_history.clientid = %s AND job_history.jobid = %s
-ORDER BY job_history.eventtime DESC
-LIMIT 1
-''', (clientid, jobid))
-
- if jobstatus:
- return jobstatus
- else:
- return False
-
-
-def get_job(client_digest,jobid):
- '''
- Simplified version of get_job.
- '''
-
- job = {}
- script = {}
- jobrow = {}
- jobrow[jobid] = {}
- db = ScireDB()
-
- try:
- cursor = db.conn.Execute('''
-SELECT *
-FROM jobs LEFT JOIN job_conditions on (jobs.jobid)
-WHERE jobs.jobid = %d''' % jobid)
- job = cursor.GetRowAssoc(0) # 0 is lower, 1 is upper-case
- except:
- print sys.exc_info()[1]
- print "job: \n" + str(job)
- tmp = {}
- for e in job:
- tmp [e] = str(job[e])
- job = tmp
-
-
- try:
- scriptsql = db.conn.Execute('select * from scripts where scriptid=%s', str(job['script']))
- job['script'] = scriptsql.GetRowAssoc(0)
-
- except:
- print sys.exc_info()[1]; # retrieve the error message returned by database
-
- job['jobid'] = jobid # Fix jobid since it's written twice from select
-
- db.Close()
- return job
-
-def get_latest_job_entry(clientid, jobid):
- """
- Helper Function to get the latest entry on the job_history
- table"""
- db = ScireDB()
- eventtime = db.conn.GetRow('''
-SELECT eventtime
-FROM job_history
-WHERE clientid = %s AND jobid = %s
-ORDER BY eventtime DESC
-LIMIT 1
-''', (clientid, jobid))
- if eventtime:
- return eventtime[0]
- else:
- return false
-
-def get_status_id(status):
- """
- Returns the jobstatus id for the job with the statusname
- 'status', false otherwise"""
- db = ScireDB()
- statusid = db.conn.GetRow('''
-SELECT statusid
-FROM jobs_status
-WHERE statusname = \'%s\''''%status)
- if statusid:
- return statusid[0]
- else:
- return False
-
-def mark_job_as(client_digest, status, jobid, message = ""):
- """
- Modifies the entry on job_history for the lastest registred
- job."""
- db = ScireDB()
- clientid = get_clientid(client_digest)
- now = db.conn.DBTimeStamp(datetime.datetime.now())
- statusid = get_status_id(status)
- if not statusid:
- print "ERROR! couldn't find %s statusid" % status
- return False
-
- eventtime = db.conn.DBTimeStamp(get_latest_job_entry(clientid, jobid))
- if eventtime:
- cursor = db.conn.Execute('''
-UPDATE job_history
-SET eventtime = %s, statusid = %d, eventmsg = \'%s\'
-WHERE eventtime = %s and jobid = %d and clientid = %d
-''' % (now, int(statusid), message, eventtime, int(jobid), int(clientid)))
- else:
- cursor = db.conn.Execute('''
-INSERT INTO job_history
-VALUES (%s,%s,%s,%s,%s)
-''', (jobid, clientid, now, statusid, message))
- return True
-
-def expand_jobs(clientid):
- """
- Search for the group jobs that the client must be into and
- does the expansion"""
- db = ScireDB()
- groups = GACL_functions.get_client_groups(db, clientid)
- pendingid = get_status_id('Pending')
- for groupid in groups:
- members = GACL_functions.get_group_clients(db, groupid)
- try :
- cursor = db.conn.Execute('''
-SELECT DISTINCT(jobs_clients.jobid)
-FROM jobs_clients LEFT JOIN job_conditions on (jobs_clients.jobid=job_conditions.jobid)
-WHERE jobs_clients.groupid = %d
-AND (job_conditions.deploy_time < now())
-AND (job_conditions.expiration_time > now())
-AND job_conditions.last_run_date < job_conditions.deploy_time ''' % groupid)
- except:
- print sys.exc_info()[1]
- print "################### TERRIBLE ERROR #################"
- db.conn.Execute('LOCK TABLES `jobs_clients` WRITE, `job_conditions` WRITE, `job_history` WRITE''')
- while (not cursor.EOF):
- jobid = cursor.GetRowAssoc(0)['jobid']
- for memberid in members:
- db.conn.Execute("INSERT INTO job_history values ('%s','%s',now(),'%s','%s')" %
- (jobid, memberid, pendingid, "Job Expanded"))
- db.conn.Execute('INSERT INTO jobs_clients (jobid, clientid) values (%d,%d)' %
- (jobid, memberid))
- db.conn.Execute('UPDATE `job_conditions` SET last_run_date = now() WHERE jobid = %d' % jobid)
- cursor.MoveNext()
- db.conn.Execute('UNLOCK TABLES')
- return True
diff --git a/server/postprocess/postprocess.py b/server/postprocess/postprocess.py
deleted file mode 100644
index 17460c2..0000000
--- a/server/postprocess/postprocess.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from client import *
-import DB_functions
-import md5
-import datetime
-import re
-
-def pp_get_package_list(clientid, output):
- print "The clientid is: %s\n" % clientid
- #print "full output: %s\n" % output
- for line in output.split("\\n"):
- line = re.sub("\'",'',line)
- package = line.split("|")[0]
- #if package[0] == "'": package = package[1:]
- version = line.split("|")[1]
- # print "Package: "+package+"\tVersion: "+version+"\n"
- DB_functions.add_package(clientid,package,version)
- return True
diff --git a/server/pygacl.py b/server/pygacl.py
deleted file mode 100644
index 656c1e7..0000000
--- a/server/pygacl.py
+++ /dev/null
@@ -1,246 +0,0 @@
-#
-# Bad translation from the original phpgacl class
-#
-# implemented: acl_check, acl_query, acl_get_groups
-#
-# NOTES:
-# Changed initialization to use either default settings, object init options
-# for adodb connection or optionally a passed in adodb connection object.
-#
-# TODO: implement complete interface, right now it only queries DB for permissions
-# TODO: make this good python code, right now its a rough copy of php (YUCK!).
-# TODO: implement caching and debug (debug is called in code, keeping var to false to prevent errors)
-#
-
-from sets import Set
-
-class gacl:
-
-
- __debug = False
- __db_table_prefix = 'gacl_'
- __db_type = 'mysql'
- __db_host = 'localhost'
- __db_user = ''
- __db_password = ''
- __db_name = 'gacl'
- __caching = False
- __force_cache_expire = True
- __cache_dir = '/tmp/pygacl_cache'
- __cache_expire_time=600
- __group_switch = '_group_'
-
- __conn = None
-
-
- def __init__(self, conn=None, opts=None):
-
- useropts = Set()
-
- # valid options
- options = Set(['debug','items_per_page','max_select_box_items','max_search_return_items',
- 'db_table_prefix','db_type','db_host','db_user','db_password','db_name',
- 'caching','force_cache_expire','cache_dir','cache_expire_time'])
-
- # keep only valid options
- if (opts):
- useropts = Set([key for key in opts.items()])
-
- useropts = options & useropts
-
- for k in useropts:
- myexpr = "self.__" + k + " = opts['" + k + "']"
- eval(myexpr)
-
- # get passed connection or create new one from config
- if conn:
- self.__conn = conn
-
- else:
- import adodb
- self.__conn = adodb.NewADOConnection(__db_type)
- self.__conn.Connect(__db_host,__db_user,__db_password,__db_name)
-
-
-
- def acl_check(self, aco_section_value, aco_value, aro_section_value, aro_value, axo_section_value=None, axo_value=None, root_aro_group=None, root_axo_group=None):
- acl_result = self.acl_query(aco_section_value, aco_value, aro_section_value, aro_value, axo_section_value, axo_value, root_aro_group, root_axo_group)
-
- if acl_result:
- return acl_result['allow']
- else:
- return False
-
-
-
- def acl_query(self, aco_section_value, aco_value, aro_section_value, aro_value, axo_section_value=None, axo_value=None, root_aro_group=None, root_axo_group=None, debug=None):
-
- #TODO: implement caching
- cresult = ''
-
- if not cresult:
-
- sql_aro_group_ids = ""
- sql_axo_group_ids = ""
-
- # Get groups
- aro_group_ids = self.acl_get_groups(aro_section_value, aro_value, root_aro_group, 'ARO')
-
- if aro_group_ids:
- sql_aro_group_ids = ','.join(aro_group_ids.items())
-
- if axo_section_value != '' and axo_value != '':
- axo_group_ids = self.acl_get_groups(axo_section_value, axo_value, root_axo_group, 'AXO')
-
- if axo_group_ids:
- sql_axo_group_ids = ','.join(axo_group_ids.items())
-
-
- # Create query
- order_by = []
-
- query = 'SELECT a.id,a.allow,a.return_value '
- query +=' FROM ' + self.__db_table_prefix + 'acl a '
- query +=' LEFT JOIN ' + self.__db_table_prefix + 'aco_map ac ON ac.acl_id=a.id '
-
- if aro_section_value != self.__group_switch:
- query += ' LEFT JOIN ' + self.__db_table_prefix + 'aro_map ar ON ar.acl_id=a.id '
-
- if axo_section_value != self.__group_switch:
- query += ' LEFT JOIN ' + self.__db_table_prefix + 'axo_map ax ON ax.acl_id=a.id '
-
- # Only join by group if we have any
- if sql_aro_group_ids:
- query += ' LEFT JOIN ' + self.__db_table_prefix + 'aro_groups_map arg ON arg.acl_id=a.id \
- LEFT JOIN ' + self.__db_table_prefix + 'aro_groups rg ON rg.id=arg.group_id \
- LEFT JOIN ' + self.__db_table_prefix + 'axo_groups_map axg ON axg.acl_id=a.id '
-
- # Only join by group if we have any
- if sql_axo_group_ids:
- query += ' LEFT JOIN ' + self.__db_table_prefix + 'axo_groups xg ON xg.id=axg.group_id '
-
-
- # Here comes the Where
- query += ' WHERE a.enabled=1 and (ac.section_value=' + self.__conn.quote(aco_section_value) + \
- ' and ac.value=' + self.__conn.quote(aco_value) + ') '
-
- if aro_section_value == self.__group_switch:
- if not sql_aro_group_ids:
- self.debug_text ('acl_query(): Invalid ARO Group: ' + aro_value)
- return False
-
- query += ' AND rg.id IN (' + sql_aro_group_ids + ') '
- order_by.append('(rg.rgt-rg.lft) ASC')
- else:
- query += ' AND ((ar.section_value=' + self.__conn.quote(aro_section_value) + \
- ' AND ar.value=' + self.__conn.quote(aro_value) + ') '
-
- if sql_aro_group_ids:
- order_by.append('(CASE WHEN ar.value IS NULL THEN 0 ELSE 1 END) DESC')
- order_by.append('(rg.rgt-rg.lft) ASC')
-
- query += ')'
-
- # for axo groups
- if axo_section_value == self.__group_switch:
- if not sql_axo_group_ids:
- self.debug_text('acl_query(): Invalid AXO Group: ' + axo_value)
- return False
-
- query += 'AND xg.id IN (' + sql_axo_group_ids + ')'
- order_by.append('(xg.rgt-xg.lft) ASC')
- query += 'AND ('
-
- if (axo_section_value == '' and axo_value == ''):
- query += '(ax.section_value IS NULL and ax.value IS NULL)'
- else:
- query += '(ax.section_value=' + self.__conn.quote(axo_section_value) + \
- ' and ax.value=' + self.__conn.quote(axo_value) + ')' + ' OR xg.id IN (' + sql_axo_group_ids + ')'
- order_by.append('(CASE WHEN ax.value IS NULL THEN 0 ELSE 1 END) DESC')
- order_by.append('(xg.rgt-xg.lft) ASC')
-
- query += ')'
-# else:
-# query += ' AND axg.group_id IS NULL'
-
-
- order_by.append('a.updated_date DESC')
- query += ' ORDER BY ' + ','.join(order_by) + ' '
-
- rs = self.__conn.Execute(query)
-
- if not rs:
- self.debug_text('acl_query empty: ' + query)
- return False
-
- row = rs.fields
- rs.MoveNext()
-
- row = rs.fields
-
- if (row):
- if row[1] and row[1] == 1:
- allow = True
- else:
- allow = False
- retarr = {'acl_id': row[0], 'return_value': row[2], 'allow': allow}
- else:
- retarr = {'acl_id': None, 'return_value': None , 'allow': False}
-
- if self.__debug == True:
- retarr['query'] = query
- self.debug_text("<b>acl_query():</b> ACO Section: aco_section_value ACO Value: aco_value ARO Section: aro_section_value ARO Value aro_value ACL ID: " + retarr['acl_id'] + ' Result: ' + retarr['allow'])
-
-
- return retarr
-
-
- def acl_get_groups(self, section_value, value, root_group=None, group_type='ARO'):
-
- if (group_type == 'axo' or group_type == 'AXO'):
- group_type = 'axo'
- object_table = self.__db_table_prefix + 'axo'
- group_table = self.__db_table_prefix + 'axo_groups'
- group_map_table = self.__db_table_prefix +'groups_axo_map'
- else:
- group_type = 'aro'
- object_table = self.__db_table_prefix + 'aro'
- group_table = self.__db_table_prefix + 'aro_groups'
- group_map_table = self.__db_table_prefix + 'groups_aro_map'
-
- # TODO: implement cache
-
- # Make sure we get the groups
- query = 'SELECT DISTINCT g2.id'
-
- if (section_value == self.__group_switch):
- query += ' FROM ' + group_table + ' g1,' + group_table + ' g2'
- where = ' WHERE g1.value=' + value
- else:
- query += ' FROM ' + object_table + ' o,' + group_map_table + ' gm,' + group_table + ' g1,' + group_table + ' g2'
- where = ' WHERE (o.section_value=' + self.__conn.quote(section_value) + ' AND o.value=' + \
- self.__conn.quote(value) + ') AND gm.' + group_type + '_id=o.id AND g1.id=gm.group_id'
-
- if ( root_group != ''):
- query += ','+ group_table +' g3'
- where += ' AND g3.value=' + self.__conn.quote(root_group) + \
- ' AND ((g2.lft BETWEEN g3.lft AND g1.lft) AND (g2.rgt BETWEEN g1.rgt AND g3.rgt))'
- else:
- where += ' AND (g2.lft <= g1.lft AND g2.rgt >= g1.rgt)'
-
- query += where
-
- rs = self.__conn.Execute(query)
-
- if rs == None:
- print conn.ErrorMsg()
- return False
-
- retarr = Set()
-
- while not rs.EOF:
- retarr.append(reset(rs.fields))
- rs.MoveNext()
-
- return retarr
-
diff --git a/server/scired.py b/server/scired.py
deleted file mode 100755
index 6ef89c9..0000000
--- a/server/scired.py
+++ /dev/null
@@ -1,178 +0,0 @@
-#!/usr/bin/env python
-
-import certgen
-import os
-import getopt
-import sys
-import traceback
-from SecureXMLRPCServer import SecureXMLRPCServer
-from ScireDB import *
-from adodb import *
-
-bind_address = "0.0.0.0"
-bind_port = 9876
-config_dir = "/etc/scire"
-private_key = None
-public_key = None
-pubkey_obj = None
-module_dir = "modules"
-
-modules = {}
-xmlrpc_functions = {}
-
-class ScireXMLRPCServer(SecureXMLRPCServer):
-
- def verify_client_cert(self, conn, cert, errnum, depth, ok):
- self.client_digest = cert.digest("sha1")
-# print "Client CN: " + str(cert.get_subject().commonName)
-# print "Client digest: " + self.client_digest
- return True
-
- def _dispatch(self, method, params):
- print 'Method: %s' % method
-
- regfunc = xmlrpc_functions['register_client']['func']
- status = regfunc(self.client_digest)
-# print 'Status: %s' % status
-
- if not status:
-# print 'Client is not in the table:\n %s' % self.client_digest
- registered = False
- else:
-# print 'Found the client in the database!'
- registered = True
-
-
- try:
- module = modules[xmlrpc_functions[method]['module_name']]
- module_mtime = os.stat(module['path']).st_mtime
- if module_mtime > module['mtime']:
- reload_module(xmlrpc_functions[method]['module_name'])
- func = xmlrpc_functions[method]['func']
- except AttributeError:
- raise Exception('method "%s" is not implemented' % method)
- try:
- if status == 'Active':
- print 'Client is accepted, proceeding as requested.'
- return func(self.client_digest, *params)
- elif method in ['add_client', 'register_client']:
- return func(self.client_digest, *params)
- else:
- print "Client is not accepted. Exiting. They were trying to call method \"%s\"" % method
- sys.exit(1)
- except:
- print "==========================="
- print "Error during function call!"
- display_traceback()
- raise
-
- def db_version(self):
- db = ScireDB()
- value = db.version()
- db.Close()
- return value
-
-# def Close(self):
-# db.close()
-
-def display_traceback():
- etype, value, tb = sys.exc_info()
- s = traceback.format_exception(etype, value, tb)
- for line in s:
- print line.strip()
-
-def generate_cert_and_key(keytype="RSA", keylength=1024):
- # Generate CA cert/key
-# sys.stdout.write("Generating CA certificate...")
-# certgen.certgen(keytype, keylength, "Certificate Authority", config_dir + '/CA.key', config_dir + '/CA.cert')
-# print "done"
- # Generate server cert/key
- sys.stdout.write("Generating server certificate...")
- certgen.certgen(keytype, keylength, "Scire Server", config_dir + '/server.key', config_dir + '/server.cert')
- print "done"
-
-def load_modules(module_dir):
- sys.path.insert(0, module_dir)
- for f in os.listdir(os.path.abspath(module_dir)):
- if f.endswith('.py'): # We only want .py files
- load_module(f.rsplit(".", 1)[0], module_dir + "/" + f)
-
-def load_module(module_name, module_path):
- global modules
- try:
- module = __import__(module_name)
- modules[module_name] = {
- 'mtime': os.stat(module_path).st_mtime,
- 'module': module,
- 'path': module_path
- }
- register_module(module_name)
- except:
- print "Couldn't load module %s...continuing" % module_path
- display_traceback()
-
-def reload_module(module_name):
- global modules
- try:
- modules[module_name]['module'] = reload(modules[module_name]['module'])
- modules[module_name]['mtime'] = os.stat(modules[module_name]['path']).st_mtime
- register_module(module_name)
- except:
- print "Couldn't reload module %s...continuing" % modules[module_name]['path']
-
-def register_module(module_name):
- global xmlrpc_functions, modules
- # Remove any previously registered functions from this module
- remove_funcs = []
- for func in xmlrpc_functions:
- if xmlrpc_functions[func]['module_name'] == module_name:
- remove_funcs.append(func)
- for func in remove_funcs:
- del xmlrpc_functions[func]
- try:
- for func in getattr(modules[module_name]['module'], 'register')():
- xmlrpc_functions[func] = {
- 'func': getattr(modules[module_name]['module'], func),
- 'module_name': module_name
- }
- except:
- print "Couldn't register functions in module %s...continuing" % module_name
- del modules[module_name]
-
-if __name__ == "__main__":
-
- try:
- opts, args = getopt.getopt(sys.argv[1:], 'dh:p:c:')
- except getopt.error, msg:
- print msg
- print """usage: python %s [-d] [-h host] [-p port] [-c configdir]
- [-d] Turn on debugging
- [-h host] Define the bind IP address/host
- [-p port] Set the port to run on.
- [-c configdir] Set the config directory. (defaults to /etc/scire)
- """ % sys.argv[0]
- sys.exit(2)
- for o, a in opts:
- if o == '-h': bind_address = a
- elif o == '-p' : bind_port = int(a)
- elif o == '-c' :
- if os.path.isdir(a):
- config_dir = a
- else :
- print "ERROR: Config dir doesn't exist\n"
- sys.exit(2)
- elif o == '-d': debug = True
-
- # Load modules
- load_modules(module_dir)
-
- # Check for public/private keypair and generate if they don't exist
- if not os.path.isfile(config_dir + "/server.key") or not os.path.isfile(config_dir + "/server.cert"):
- generate_cert_and_key()
-
- xmlrpc_server = ScireXMLRPCServer((bind_address, bind_port), config_dir + "/server.cert", config_dir + "/server.key")
-
- try:
- xmlrpc_server.serve_forever()
- finally:
- xmlrpc_server.server_close()