diff -uNr a/logotron/MANIFEST.TXT b/logotron/MANIFEST.TXT --- a/logotron/MANIFEST.TXT 5a0fb740265d9cbed4638a8d65b1509db23094969ecf783d97ec77e11fc1e4962eb8976a9c6bc12da098293e8c1c9084c34b78a149bdcff8fded569e88c8144b +++ b/logotron/MANIFEST.TXT 65b06852de68a2c839b687f0863e261cd387a25a94f10d8c84beb58da455057e6ffd0244a664c1cc64a6004cc4dd1a44b5440ebfcb83bdbaf62c605bee42737f @@ -5,3 +5,4 @@ 589783 irssi2tmsr diana_coman "Converter of irssi logs to the tmsr format used by the logotron. Added authors in MANIFEST.TXT." 590448 uniturds_etc asciilifeform "Phf's algo for uniturd digestion; cosmetic improvements to WWW displayer." 590458 line_wraps asciilifeform "Trinque's method to force wrap in long lines; Removed some commented rubbish from reader.py." +590714 znc2tmsr_etc lobbes "Converter of znc logs to the tmsr format used by the logotron. Small fixes to eat_dump.py." diff -uNr a/logotron/eat_dump.py b/logotron/eat_dump.py --- a/logotron/eat_dump.py 5614d6523b1512656953c12732db5daa56b49288251b879427a9b8e33da7db95847e441d2ad007896182c5acb27f0ed808b072a25c12b4789cf85cc186e68f68 +++ b/logotron/eat_dump.py e2818c8381e2a08187a2b3859fe79ac92e2694f14d9214c3fc008e52a48dbe2bb17b40601c30950d3035bbb892b21d2c83d1ab1a45bb595cd1e3ebaf26c97cba @@ -62,9 +62,16 @@ if speaker == "*": spl = payload.split(' ', 1) speaker = spl[0] - payload = spl[1] + try: + payload = spl[1] + except IndexError: + payload = "" self_speak = True - + + ## Handle uniturds using the phf algorithm + payload = payload.decode('latin-1') + payload = payload.encode('utf-8') + ## Put in DB: try: exec_db('''insert into loglines (idx, t, chan, era, speaker, self, payload) diff -uNr a/logotron/logconverters/znc2tmsr/convert.py b/logotron/logconverters/znc2tmsr/convert.py --- a/logotron/logconverters/znc2tmsr/convert.py false +++ b/logotron/logconverters/znc2tmsr/convert.py 32c82b3d22623bd85715e1b568bec222b708cf817f00558d41dab821c4b1769b7fa78465cb19671146c780041090ab6595233ba7dfa260c6b1b1d413ac158443 @@ -0,0 +1,58 @@ +#!/usr/bin/env python + +from os import listdir +from os.path import isfile, join +from datetime import datetime + +# Set the dir you'd like to eat, the suffix of each file, and the filename to shit into +dir_to_eat = '/path/to/eat/dir/' +dir_files_end_with = '.log' +shit_into = '/path/to/shitfile.txt' + +# Set the top limit of the line index +archive_top_inx = 999999 + +# Leave these variables alone +archive_bottom_inx = 0 +dir_data = [f for f in listdir(dir_to_eat) if isfile(join(dir_to_eat, f))] +logline_array = [] + +# Sort the dir_data in chronological order +date_format = '%Y-%m-%d' +dir_data_chrono = [] +for logfile in dir_data: + if dir_files_end_with in logfile: + logdate = logfile.replace('.log','') + dir_data_chrono.append(logdate) +dir_data_chrono = sorted(dir_data_chrono, key=lambda d:datetime.strptime(d,date_format)) + +# Shit lines into a single file +def shit_lines(line_inx): + f = open(shit_into, 'a') + for line in logline_array: + indexed_line = ('%s;%s') % (line_inx,line) + f.write(indexed_line) + line_inx = line_inx + 1 + f.close() + +# Eat each file in the dir +for logfile in dir_data_chrono: + logfile_full_path = join(dir_to_eat,('%s.log' % logfile)) + with open(logfile_full_path, 'r') as logfile_data: + for logline in logfile_data: + znc_timestamp = ('%s %s') % (logfile, logline[1:9]) + znc_datetimestamp = datetime.strptime(znc_timestamp, '%Y-%m-%d %H:%M:%S') + epoch_datetimestamp = znc_datetimestamp.strftime('%s') + if '<' in logline[11:12]: + #normal line + speaker = logline[12:logline.find('>')] + logline_array.append(('%s;%s;%s') % (epoch_datetimestamp, speaker, logline[logline.find('>')+2:])) + elif '*' in logline[12:13]: + #connectolade; ignore + pass + else: + #action + logline_array.append(('%s;*;%s') % (epoch_datetimestamp, logline[13:])) +archive_bottom_inx = archive_top_inx - (len(logline_array) - 1) +shit_lines(archive_bottom_inx) +logline_array = []