Commit 5b16c94a authored by Marcel Rieger's avatar Marcel Rieger
Browse files

Add new GUI draft (not completed yet).

parent 635e9850
from __future__ import with_statement
from alembic import context
from sqlalchemy import create_engine, pool
from logging.config import fileConfig
import ConfigParser
import sys, os
# New instance with 'bar' and 'baz' defaulting to 'Life' and 'hard' each
config = ConfigParser.SafeConfigParser()
config.read(context.config.config_file_name)
url = config.get('database', 'sqlalchemy.url')
# Interpret the config file for Python logging.
# This line sets up loggers basically.
#fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),"..")))
from vispa import models
target_metadata = models.Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = create_engine(url, poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision}
Create Date: ${create_date}
"""
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}
"""Initial setup
Revision ID: 1e0a9d82ddc3
Revises: None
Create Date: 2012-09-25 17:43:11.860161
"""
# revision identifiers, used by Alembic.
revision = '1e0a9d82ddc3'
down_revision = None
from alembic import op
import sqlalchemy as sa
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table(u'shared_folder')
op.drop_table(u'access_statistics')
op.drop_table(u'user')
op.drop_table(u'workspace')
op.drop_table(u'shared_folder_allocation')
### end Alembic commands ###
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(u'shared_folder_allocation',
sa.Column(u'folder_id', sa.INTEGER(), nullable=False),
sa.Column(u'user_id', sa.INTEGER(), nullable=False),
sa.ForeignKeyConstraint(['folder_id'], [u'shared_folder.id'], ),
sa.ForeignKeyConstraint(['user_id'], [u'user.id'], ),
sa.PrimaryKeyConstraint(u'folder_id', u'user_id')
)
op.create_table(u'workspace',
sa.Column(u'id', sa.INTEGER(), nullable=False, primary_key=True),
sa.Column(u'name', sa.UnicodeText(), nullable=False),
sa.Column(u'host', sa.UnicodeText(), nullable=False),
sa.Column(u'login', sa.UnicodeText(), nullable=True),
sa.Column(u'key', sa.UnicodeText(), nullable=False),
sa.Column(u'password', sa.UnicodeText(), nullable=False),
sa.Column(u'command', sa.UnicodeText(), nullable=False),
sa.Column(u'basedir', sa.UnicodeText(), nullable=False),
)
op.create_table(u'user',
sa.Column(u'id', sa.INTEGER(), nullable=False, primary_key=True),
sa.Column(u'name', sa.UnicodeText(), nullable=False),
sa.Column(u'password', sa.UnicodeText(), nullable=False),
sa.Column(u'email', sa.UnicodeText(), nullable=False),
sa.Column(u'created', sa.DATETIME(), nullable=False),
sa.Column(u'last_request', sa.UnicodeText(), nullable=False),
sa.Column(u'status', sa.INTEGER(), nullable=False),
)
op.create_table(u'access_statistics',
sa.Column(u'id', sa.INTEGER(), nullable=False, primary_key=True),
sa.Column(u'user_ip', sa.UnicodeText(), nullable=False),
sa.Column(u'user_agent', sa.UnicodeText(), nullable=False),
sa.Column(u'date', sa.DATE(), nullable=False),
sa.Column(u'pis', sa.INTEGER(), nullable=False),
)
op.create_table(u'shared_folder',
sa.Column(u'id', sa.INTEGER(), nullable=False, primary_key=True),
sa.Column(u'name', sa.UnicodeText(), nullable=False),
sa.Column(u'created', sa.DATETIME(), nullable=False),
)
### end Alembic commands ###
"""Add hash to user
Revision ID: 7d1e9dd251c
Revises: 1e0a9d82ddc3
Create Date: 2012-09-25 17:44:10.314270
"""
# revision identifiers, used by Alembic.
revision = '7d1e9dd251c'
down_revision = '1e0a9d82ddc3'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', sa.Column('hash', sa.UnicodeText()))
def downgrade():
op.drop_column('user', 'hash')
\ No newline at end of file
import pickle
import logging
import logging.handlers
import logging.config
import SocketServer
import struct
class LogRecordStreamHandler(SocketServer.StreamRequestHandler):
"""Handler for a streaming logging request.
This basically logs the record using whatever logging policy is
configured locally.
"""
def handle(self):
"""
Handle multiple requests - each expected to be a 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally.
"""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack('>L', chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unPickle(chunk)
record = logging.makeLogRecord(obj)
self.handleLogRecord(record)
def unPickle(self, data):
return pickle.loads(data)
def handleLogRecord(self, record):
# if a name is specified, we use the named logger rather than the one
# implied by the record.
if self.server.logname is not None:
name = self.server.logname
else:
name = record.name
logger = logging.getLogger("net")
# N.B. EVERY record gets logged. This is because Logger.handle
# is normally called AFTER logger-level filtering. If you want
# to do filtering, do it at the client end to save wasting
# cycles and network bandwidth!
logger.handle(record)
class LogRecordSocketReceiver(SocketServer.ThreadingTCPServer):
"""
Simple TCP socket-based logging receiver suitable for testing.
"""
allow_reuse_address = 1
def __init__(self, host='localhost',
port=50000,
handler=LogRecordStreamHandler):
SocketServer.ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = 0
self.timeout = 1
self.logname = None
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
abort = self.abort
def main():
logging_conf_filename = 'conf/logging.conf'
try:
logging.config.fileConfig(logging_conf_filename)
except Exception as e:
print "WARNING: Could not configure logging with '%s'." % logging_conf_filename
tcpserver = LogRecordSocketReceiver()
print('Starting Logserver...')
tcpserver.serve_until_stopped()
if __name__ == '__main__':
main()
#!/usr/bin/env python
import sys
import os
import pwd, grp
def drop_privileges(uid_name='nobody', gid_name='nogroup'):
if os.getuid() != 0:
# We're not root so, like, whatever dude
return
# Get the uid/gid from the name
running_uid = pwd.getpwnam(uid_name).pw_uid
running_gid = grp.getgrnam(gid_name).gr_gid
# Remove group privileges
os.setgroups([])
# Try setting the new uid/gid
os.setgid(running_gid)
os.setuid(running_uid)
# Ensure a very conservative umask
old_umask = os.umask(077)
''' setup null devive '''
null_device = '/dev/null'
if getattr(sys, "getwindowsversion", None) is not None:
null_device = 'nul'
''' Parse Options '''
from optparse import OptionParser
parser=OptionParser()
parser.add_option("-d", "--dir", dest="dir", help="Directory in which vispa is located")
parser.add_option("-i", "--input", dest="stdin", default=null_device, help="input stream")
parser.add_option("-o", "--output", dest="stdout", default=null_device, help="output stream")
parser.add_option("-e", "--error", dest="stderr", default=null_device, help="error stream")
parser.add_option("-u", "--user", dest="user", help="username")
parser.add_option("-g", "--group", dest="group", default='nogroup', help="group name")
parser.add_option("-l", "--listen", dest="listen", default='localhost', help="host to listen on")
parser.add_option("-b", "--base", dest="basedir", help="change to directory")
options, args = parser.parse_args()
''' redirect i/o '''
sys.stdout.flush()
sys.stderr.flush()
_stdout = os.fdopen(os.dup(1), 'w', 0)
if options.stdin and options.stdin != '-':
si = file(options.stdin, 'r')
os.dup2(si.fileno(), sys.stdin.fileno())
if options.stdout and options.stdout != '-':
so = file(options.stdout, 'a+')
os.dup2(so.fileno(), sys.stdout.fileno())
if options.stderr and options.stderr != '-':
se = file(options.stderr, 'a+', 0)
os.dup2(se.fileno(), sys.stderr.fileno())
''' Add path '''
if options.dir:
sys.path.insert(0, os.path.abspath(options.dir))
if options.basedir:
os.chdir(options.basedir)
if options.user:
drop_privileges(options.user, options.group)
import pxl
pxl.core.LogDispatcher.instance().enableConsoleLogHandler(3)
''' Start rpc server process '''
import vispa.plugins.rpc_main as rpc
rpc.serve_rpc_main(options.listen, stdout=_stdout)
#!/usr/bin/env python
"""
Daemon-Python (0.2)
Lightweight and no-nonsense POSIX daemon library
https://github.com/stackd/daemon-py
DOCUMENTATION
----------------------------------------
see README.md
LICENSE
----------------------------------------
MIT/X11, see LICENSE
DEPENDENCIES
----------------------------------------
Python 2.x.x
"""
import sys
import os
import time
import atexit
from signal import SIGTERM, SIGKILL
sys.path.append(os.path.normpath(os.path.abspath(os.path.curdir)) )
from vispa.server import Server
class Daemon(object):
""" A generic daemon class for Python 2.x.x
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""UNIX double fork mechanism.
See Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177).
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
#os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""Start the daemon."""
#Check for a pidfile to see if the daemon is already running.
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exists. %s running?\n"
sys.stderr.write(message % (self.pidfile, self.__class__.__name__))
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""Stop the daemon."""
#Get the pid from the pidfile.
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. %s not running?\n"
sys.stderr.write(message % (self.pidfile, self.__class__.__name__))
return # not an error in a restart
# Try killing the daemon process
try:
for i in range(50):
os.kill(pid, SIGTERM)
time.sleep(0.1)
print "Try killing..."
while 1:
os.kill(pid, SIGKILL)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""Restart the daemon."""
self.stop()
self.start()
def status(self):
"""Return the daemon state."""
# Check for a pidfile to see if the daemon is already running
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "%s (%s) is running.\n"
sys.stdout.write(message % (self.__class__.__name__, self.pidfile))
elif not pid:
message = "pidfile %s does not exist. %s not running.\n"
sys.stdout.write(message % (self.pidfile, self.__class__.__name__))
def run(self):
"""Override this method when you subclass Daemon.
It will be called after the process has been daemonized by
start() or restart().
"""
class VispaDaemon(Daemon):
def __init__(self, basedir):
Daemon.__init__(self, os.path.join(basedir, 'server.pid'), stdout=os.path.join(basedir, 'out.txt'), stderr=os.path.join(basedir, 'err.txt'))
self.basedir = basedir
def run(self):
Server(self.basedir).run()
if __name__ == "__main__":
curdir = os.path.normpath(os.path.abspath(os.path.curdir))
daemon = VispaDaemon(curdir)
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
[loggers]
keys=root,RPCmain
[handlers]
keys=net
[formatters]
keys=simpleFormatter
[logger_root]
level=ERROR
handlers=
[logger_RPCmain]
level=INFO
handlers=
qualname=RPCmain
[handler_net]
class=logging.handlers.SocketHandler
level=DEBUG
formatter=simpleFormatter
args=("localhost",50000)
[formatter_simpleFormatter]
format=%(asctime)s - %(name)s - %(levelname)s - %(message)s
datefmt=
[loggers]
keys=root,system,error,access,net
[handlers]
keys=consoleHandler,net,systemFileLog,errorFileLog,accessFileLog
[formatters]
keys=simpleFormatter
[logger_root]
level=ERROR
handlers=
[logger_net]
level=DEBUG
handlers=consoleHandler
qualname=net
[logger_system]
level=DEBUG
handlers=systemFileLog
qualname=system
[logger_error]
level=DEBUG
handlers=errorFileLog
qualname=error
[logger_access]
level=DEBUG
handlers=accessFileLog