In [1]:
"""
The Log manager

Should be placed at <jupyter notebook root>/axtoools

__author__ = "Alex Xiao <http://www.alexxiao.me/>"
__date__ = "2017-02-08"
__version__ = "0.5"

"""


Out[1]:
'\nThe Log manager\n\nShould be placed at <jupyter notebook root>/axtoools\n\n__author__ = "Alex Xiao <http://www.alexxiao.me/>"\n__date__ = "2017-02-08"\n__version__ = "0.5"\n\n'

In [1]:
DEBUG=True
SQL_ERR_NoLogTable='no such table'
DAEMON_INTERVAL=3600 #default to every 10 mins
DAEMON=True
DAEMON_STATUS='Init'
MAX_NUM_LOGS_RETAIN=50000

In [1]:
from DB import get_im_memory_share, Column
from tools import tryrun
import datetime,time
from threading import Thread

In [3]:
import queue
Q=queue.Queue()

In [4]:
class Logger(Thread):
    
    def __init__(self,queue):
        Thread.__init__(self)
        self.Q=queue
        
        
    def run(self):     
               
        cache=get_im_memory_share()
        self.log(cache,'Logger init...','System') 
        while DAEMON:
            while not self.Q.empty():
                #Get the items from queue and do log
                log_type,info,service=self.Q.get()
                #print(info,service,log_type)
                self.log(cache,info,service,log_type)
            time.sleep(1)
        self.log(cache,'Logger is stopped','System') 
        if DEBUG:
            print('Logger is stopped')
    
    def warp_time(self,in_time):
        return datetime.datetime.fromtimestamp(in_time).strftime('%Y-%m-%d %H:%M:%S')

    def log(self,cache,info,service=None,log_type='Info'):
        #curTS = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
        curTS=self.warp_time(time.time())
        collist=['time','type','info','service']
        if service!=None:        
            if str(type(service))=="<class 'str'>":
                servicename=service
            else:
                servicename=service.name
        else:
            servicename='Unknown'
        inp=[(curTS,log_type,info,servicename)]

        rtn,rcode,rtype=cache.insert('logs',collist,inp)
        if rtype=='error':
            if SQL_ERR_NoLogTable in rtn.args[0]:
                #in case of log table has not been created yet, create the table
                if DEBUG:
                    print("Init, creating logs table")

                coldef=[Column(name='time',type='datetime',nullable=False),
                        Column(name='type',type='text',nullable=False),
                        Column(name='service',type='text',nullable=False),
                        Column(name='info',type='text',nullable=False)]
                cache.create_table('logs',coldef)
                cache.insert('logs',['time','type','service','info'],[(self.warp_time(time.time()),'Info','System','Table logs is created.')])
                cache.insert('logs',['time','type','service','info'],[(self.warp_time(time.time()),'Info','System',info)])
            else:
                if DEBUG:
                    print('Error:',rtn)
                raise ValueError(rtn[0].args)
                
def stop():
    global DAEMON
    DAEMON=False

def log(info,service=None,log_type='Info'):
    Q.put([log_type,info,service])

In [5]:
def start_logger():
    global DAEMON
    logger=Logger(Q)
    DAEMON=True
    logger.daemon=True
    logger.start()
    return logger

In [6]:
class log_daemon(Thread):
    """
    The demon thread that make sure log prcoess working
    """
    def run(self):     
        self.cache=get_im_memory_share()
        
        while DAEMON:
            if DEBUG:
                print('Demon is checking...')
            log('Log Demon Checking...','Log Demon')
            global DAEMON_STATUS
            rtn,rcd,rtype=self.cache.run('select count(1) cnt from logs')
            #print(rtn)
            if rtype=='data':
                DAEMON_STATUS='Checking'
                cnum=rtn[0].cnt
                ENUM=(MAX_NUM_LOGS_RETAIN*2)
                if cnum>ENUM:
                    #purge data more than expected number
                    rtn,rcd,rtype=self.cache.run('select max(id) mid from logs')
                    smid=rtn[0].mid-MAX_NUM_LOGS_RETAIN
                    msg='Demon is cleaning up logs id less than '+str(smid)
                    
                    log(msg,'Log Demon')
                    sql='delete from logs where id<='+str(smid)
                    if DEBUG:
                        print(msg)
                        print(sql)
                    self.cache.run(sql)
            else:
                DAEMON_STATUS='Error'
                #raise ValueError('Unable to check logs table','Log Demon')
                log('[ERROR] Demon Failed, reason:'+rtn.args[0],'Log Demon','Error')
                
            DAEMON_STATUS='Sleepinng'    
                
            
            if DEBUG:
                print('Demon is going to sleep',DAEMON_INTERVAL/60,'minutes before next check.')
            log('Log Demon Sleeping, will be awake in '+str(DAEMON_INTERVAL/60)+' minutes','Log Demon')
            time.sleep(DAEMON_INTERVAL)
        #End of process
        if DEBUG:
                print('Demon is stopped')
        log('Log Demon Stopped','Log Demon')
    
    
def start_daemon():
    global DAEMON
    DAEMON=True
    log('Log Demon Starting...','Log Demon')
    t=log_daemon()
    t.daemon=True
    t.start()
    return t

In [9]:
def start(dug=False):
    global logger,daemon,DEBUG
    DEBUG=dug
    logger=start_logger()
    daemon=start_daemon()
#logger.join()
#daemon.join()


Init, creating logs table
-1 rows
-1 rows
1 rows
1 rows
1 rows
Demon is checking...
1 rows
Demon is going to sleep 0.16666666666666666 minutes before next check.
1 rows
1 rows
Demon is checking...
1 rows
Demon is going to sleep 0.16666666666666666 minutes before next check.
1 rows
1 rows
Demon is checking...1 
rows1
 1rows 
rowsDemon is going to sleep
 0.16666666666666666 minutes before next check.
Demon is checking...
1 rows
Demon is going to sleep 0.16666666666666666 minutes before next check.
1 rows
1 rows
Demon is checking...
1 rows
1 rows
Demon is cleaning up logs id less than 6
delete from logs where id<=6
6 rows
Demon is going to sleep 0.16666666666666666 minutes before next check.
1 rows
1 rows
1 rows
Demon is checking...
1 rows
Demon is going to sleep 0.16666666666666666 minutes before next check.
1 rows
1 rows
Demon is checking...
1 rows
Demon is going to sleep 0.16666666666666666 minutes before next check.
1 rows
1 rows
Demon is checking...
1 rows
1 rows
Demon is cleaning up logs id less than 13
delete from logs where id<=13
7 rows
Demon is going to sleep 0.16666666666666666 minutes before next check.
1 rows
1 rows
1 rows
Demon is checking...
1 rows
Demon is going to sleep 0.16666666666666666 minutes before next check.
1 rows
1 rows
Demon is checking...
1 rows
Demon is going to sleep 0.16666666666666666 minutes before next check.
1 rows
1 rows
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-9-1ba7ca0bef03> in <module>()
      1 logger=start()
      2 daemon=start_daemon()
----> 3 logger.join()
      4 daemon.join()

/usr/lib/python3.5/threading.py in join(self, timeout)
   1052 
   1053         if timeout is None:
-> 1054             self._wait_for_tstate_lock()
   1055         else:
   1056             # the behavior of a negative timeout isn't documented, but

/usr/lib/python3.5/threading.py in _wait_for_tstate_lock(self, block, timeout)
   1068         if lock is None:  # already determined that the C code is done
   1069             assert self._is_stopped
-> 1070         elif lock.acquire(block, timeout):
   1071             lock.release()
   1072             self._stop()

KeyboardInterrupt: 

In [ ]:
start(False)