#!/usr/bin/env python

from RECT import *

import sys, os
from clogger import *
from makeXml import *
import time
from decimal import *
import string
import traceback
from rectparams import *
import types

class TestResult(unittest.TestResult):
    """Holder for test result information.

    Test results are automatically managed by the TestCase and TestSuite
    classes, and do not need to be explicitly manipulated by writers of tests.

    Each instance holds the total number of tests run, and collections of
    failures and errors that occurred among those test runs. The collections
    contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
    formatted traceback of the error that occurred.
    """
    def __init__(self):
        self.failures = []
        self.errors = []
        self.results = []
        self.testsRun = 0
        self.shouldStop = 0
        self.starttime = 0
        self.stoptime = 0
        self.timetest = []

    def startTest(self, test):
        "Called when the given test is about to be run"
        self.starttime = time.time()
        self.testsRun = self.testsRun + 1

    def stopTest(self, test):
        "Called when the given test has been run"
        pass

    def addError(self, test, err):
        """Called when an error has occurred. 'err' is a tuple of values as
        returned by sys.exc_info().
        """
        self.stoptime = time.time()
        self.timetest.append(self.stoptime-self.starttime)
        self.errors.append((test, self._exc_info_to_string(err, test)))
        self.results.append((test, self._exc_info(err, test)))

    def addFailure(self, test, err):
        """Called when an error has occurred. 'err' is a tuple of values as
        returned by sys.exc_info()."""
        self.stoptime = time.time()
        self.timetest.append(self.stoptime-self.starttime)
        self.failures.append((test, self._exc_info_to_string(err, test)))
        self.results.append((test, self._exc_info(err, test)))

    def addSuccess(self, test):
        "Called when a test has completed successfully"
        self.stoptime = time.time()
        self.timetest.append(self.stoptime-self.starttime)
        self.results.append((test, "DONE!"))
        pass
        #print "SUCCESS!!!!"

    def wasSuccessful(self):
        "Tells whether or not this result was a success"
        return len(self.failures) == len(self.errors) == 0

    def stop(self):
        "Indicates that the tests should be aborted"
        self.shouldStop = True

    def _exc_info_to_string(self, err, test):
        """Converts a sys.exc_info()-style tuple of values into a string."""
        exctype, value, tb = err
        # Skip test runner traceback levels
        while tb and self._is_relevant_tb_level(tb):
            tb = tb.tb_next
        if exctype is test.failureException:
            # Skip assert*() traceback levels
            length = self._count_relevant_tb_levels(tb)
            return ''.join(traceback.format_exception(exctype, value, tb, length))
        return ''.join(traceback.format_exception(exctype, value, tb))

    def _exc_info(self, err, test):
        """Converts a sys.exc_info()-style tuple of values into a tuple."""
        exctype, value, tb = err

        # Skip test runner traceback levels
        while tb and self._is_relevant_tb_level(tb):
            tb = tb.tb_next

        return (exctype, value, tb)

    def _is_relevant_tb_level(self, tb):
        return tb.tb_frame.f_globals.has_key('__unittest')

    def _count_relevant_tb_levels(self, tb):
        length = 0
        while tb and not self._is_relevant_tb_level(tb):
            length += 1
            tb = tb.tb_next
        return length

    def __repr__(self):
        return "<%s run=%i errors=%i failures=%i>" % \
               (_strclass(self.__class__), self.testsRun, len(self.errors),
                len(self.failures))

class _TextTestResult(TestResult):
    """A test result class that can print formatted text results to a stream.

    Used by TextTestRunner.
    """
    separator1 = '=' * 70
    separator2 = '-' * 70

    def __init__(self, stream, descriptions, verbosity):
        TestResult.__init__(self)
        self.stream = stream
        self.showAll = verbosity > 1
        self.dots = verbosity == 1
        self.descriptions = descriptions
        #self.testtime = ''

    def getDescription(self, test):
        if self.descriptions:
            return test.shortDescription() or str(test)
        else:
            return str(test)

    def testTime(self, test):
        return self.timetest

    def startTest(self, test):
        TestResult.startTest(self, test)
        if self.showAll:
            self.stream.write(self.getDescription(test))
            self.stream.write(" ... ")

    def addSuccess(self, test):
        TestResult.addSuccess(self, test)
        if self.showAll:
            self.stream.writeln("ok")
        elif self.dots:
            self.stream.write('.')

    def addError(self, test, err):
        TestResult.addError(self, test, err)
        if self.showAll:
            self.stream.writeln("ERROR")
        elif self.dots:
            self.stream.write('E')

    def addFailure(self, test, err):
        TestResult.addFailure(self, test, err)
        if self.showAll:
            self.stream.writeln("FAIL")
        elif self.dots:
            self.stream.write('F')

    def printErrors(self):
        if self.dots or self.showAll:
            self.stream.writeln()
        self.printErrorList('ERROR', self.errors)
        self.printErrorList('FAIL', self.failures)

    def printErrorList(self, flavour, errors):
        for test, err in errors:
            self.stream.writeln(self.separator1)
            self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
            self.stream.writeln(self.separator2)
            self.stream.writeln("%s" % err)

    def printResults(self):
        for test, err in self.results:
            print test, err


class TextTestRunner:
    """A test runner class that displays results in textual form.

    It prints out the names of tests as they are run, errors as they
    occur, and a summary of the results at the end of the test run.
    """
    def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1):
        self.stream = unittest._WritelnDecorator(stream)
        self.descriptions = descriptions
        self.verbosity = verbosity

    def _makeResult(self):
        return _TextTestResult(self.stream, self.descriptions, self.verbosity)

    def run(self, test):
        "Run the given test case or test suite."
        result = self._makeResult()
        startTime = time.time()
        test(result)
        stopTime = time.time()
        timeTaken = stopTime - startTime
        result.printErrors()
        self.stream.writeln(result.separator2)
        run = result.testsRun
        self.stream.writeln("Ran %d test%s in %.3fs" %
                            (run, run != 1 and "s" or "", timeTaken))
        self.stream.writeln()
        if not result.wasSuccessful():
            self.stream.write("FAILED (")
            failed, errored = map(len, (result.failures, result.errors))
            if failed:
                self.stream.write("failures=%d" % failed)
            if errored:
                if failed: self.stream.write(", ")
                self.stream.write("errors=%d" % errored)
            self.stream.writeln(")")
        else:
            self.stream.writeln("OK")
        #result.printResults()
        return result

def import_libs(dir):
    """ Imports the libs, returns a list of the libraries. 
    Pass in dir to scan """
    import glob
    library_list = [] 

    for f in glob.glob(os.path.join(dir,"*.py")):
        #print f
        module_name, ext = os.path.splitext(os.path.split(f)[1])# Handles no-extension files, etc.
        #print module_name, ext
        if ext == '.py': # Important, ignore .pyc/other files.
            module = __import__(module_name)
            library_list.append(module)

    return library_list

def filter_modules(modules, testnames):
    """ Filter out the builtin functions, methods from module """
    filtered_modules = []
    # Default builtin list

    for module in modules:
        for name in dir(module):
            obj = getattr(module, name)
            if (isinstance(obj, (type, types.ClassType)) and
                issubclass(obj, TestCase) and (name in testnames)):
                filtered_modules.append(module.__name__)

    return filtered_modules


def main():
    import unittest
    if len(sys.argv) > 3:
        raise RuntimeError('Too many command line arguments!!!!!!!!!!')

    if len(sys.argv) == 3:
        cfgfile = sys.argv[1]
        cfgfile2 = sys.argv[2]
        config = initialize(cfgfile, cfgfile2)

    elif len(sys.argv) == 2:
        cfgfile = sys.argv[1]
        config = initialize(cfgfile)
    else:
        cfgfile = 'config.yaml'
        config = initialize(cfgfile)

    tests_load_by_names = []
    testDescr = []
    for g in config.keys():

        if (getattr(config[g],'__class__').__name__ == 'list'):
            for h in config[g]:
                if (hasattr(h, "keys")):
                    if h.keys() is ['shares', 'slaves'] or ['slaves', 'shares']:
                        if (g.split(".")) and (getattr(g.split(".")[0],'__class__').__name__ == 'module'):
                            pass
                        else:
                            tests_load_by_names.append(g)
                        testDescr.append({g : h})

    curdir = os.path.abspath(os.curdir)
    if 'modules' not in config:
        choose_dir = self.config.get('path',curdir)
        lib_list = import_libs(choose_dir)
        filtered_modules = (filter_modules(lib_list, tests_load_by_names))

        if (filtered_modules == []):
            lib_list = import_libs(datadir)
            filtered_modules = (filter_modules(lib_list, tests_load_by_names))
        suite = TestLoader(config).loadTestsFromNames(filtered_modules)
    else:
        suite = TestLoader(config).loadTests()
    result = []
    slaves = []
    for name in config['proxies']:
        if ('system' not in config['slaves'][name]):
            config['slaves'][name]['system'] = ''
        slaves.append({name:{'start': config['proxies'][name], 'system': config['slaves'][name]['system']}})

    test_config = []
    test_config.append({'shares': config['shares'], 'slaves': slaves, 'testDescr' : testDescr})
    xml_result = []
    for i in suite.__iter__():

        startTime = time.time()
        xmltimeStart = time.strftime("%H:%M:%S", time.localtime())
        s = TextTestRunner(verbosity=2).run(i)

        test_result = []
        test_errors = []
        idtest = 1

        for y in i._tests:

            count1 = 1
            count2 = 1
            for z in  y._tests:
                z_index = y._tests.index(z)
                test_mountOptions = []
                testResult = "<P style='color:green'>OK</P>"
                testResultXml = {"OK" : {}}

                testtime = s.testTime(z)
                timeStr = str(testtime)
                for i_result in s.results:
                    if (z == i_result[0]):
                        for i_error in s.errors:
                            if (z == i_error[0]):
                                testResultXml = {"ERROR": {'exception' : {'code': str(i_result[1][0]), 'reason': str(i_result[1][1])}}}

                        for i_fail in s.failures:
                            if (z == i_fail[0]):
                                testResultXml = {"FAIL": {'exception' : {'code': str(i_result[1][0]), 'reason': str(i_result[1][1])}}}

                for i_error in s.errors:
                    if (z == i_error[0]):
                        testResult = "<A href='#error_" + str(count1) + "'><P style='color:red'>ERROR</P></A>"
                        count1 += 1


                for i_fail in s.failures:
                    if (z == i_fail[0]):
                        testResult = "<A href='#fail_" + str(count2) + "'><P style='color:red'>FAIL</P></A>"
                        count2 += 1

                test_slaves = []
                for name in z.slave_names:

                    test_slaves.append({'description': str(config['proxies'][name]), 'mountoptions': z.getOption(name, 'mountOptions', '').split(","), 'sysinfo': {z.getOption(name, 'system', 'Windows') : {'Cifstype':str(config['proxies'][name].sysinfo().cifstype), 'Sysname':str(config['proxies'][name].sysinfo().sysname), 'Nodename':str(config['proxies'][name].sysinfo().nodename), 'Release':str(config['proxies'][name].sysinfo().release),   'Version':str(config['proxies'][name].sysinfo().version), 'Machine':str(config['proxies'][name].sysinfo().machine)}}})



                test_shares = []
                for name in z.share_names:
                    if ('system' in config['shares'][name]):
                        test_shares.append({'description':config['shares'][name]['start'], 'system':config['shares'][name]['system']})
                    else:
                        test_shares.append({'description':config['shares'][name]['start'], 'system':'Windows'})

                for slave in z.slave_names:
                    test_mountOptions.append( {slave : z.getOption(slave, 'mountOptions', '')})
 


                test = {'name':{'modulename':z.__module__, 'classname':z.tID, 'methodname':z._testMethodName}, 'description': {'shortdescr':z.shortDescription(), 'fulldescr':z.__doc__}}

                xmltimeTest = Context(prec=3, rounding=ROUND_UP).create_decimal(str(testtime[z_index]))

                xml_result.append({'timendate': {'testtime':xmltimeTest, 'suitestarttime': xmltimeStart}, 'shares': test_shares, 'slaves': test_slaves, 'testdescr': test, 'result': testResultXml, 'id' : str(idtest) })

                test_result.append({'tID' : z.tID, 'testResult' : testResult, 'description' : str(z.shortDescription()).split(" (s")[0], 'test method name': z._testMethodName, 'module name' : z.__module__, 'slaves' : [z.slave_names, test_mountOptions], 'shares' : z.share_names})

                idtest += 1
        stopTime = time.time()
        timeTaken = stopTime - startTime
        timeStr = str(timeTaken)
        test_errors.append({'errors':s.errors, 'failures': s.failures, 'tests_run': idtest-1, 'time_run': timeStr})

        result.append((test_result, test_errors))


    o_Log = CLogger()
    o_Log.WriteInfoInTemplate( capture_result = result, capture_config = test_config )

    o_Xml = CMakexml()
    o_Xml.WriteInfoInXML (capture_result = xml_result)

if __name__ == '__main__':
    main()

