twisted.logger.FilteringLogObserver - python examples

Here are the examples of the python api twisted.logger.FilteringLogObserver taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

12 Examples 7

3 View Complete Implementation : log.py
Copyright Apache License 2.0
Author : apple
    @clastmethod
    def addFilteredObserver(cls, observer):
        log.addObserver(FilteringLogObserver(
            observer,
            [cls.filterPredicate]
        ))

3 View Complete Implementation : log.py
Copyright Apache License 2.0
Author : apple
    def beginLoggingTo(
        self, observers, discardBuffer=False, redirectStandardIO=True
    ):
        new_observers = []
        for observer in observers:
            new_observers.append(FilteringLogObserver(observer, [Logger.filterPredicate]))
        self.beginner.beginLoggingTo(new_observers, discardBuffer, redirectStandardIO)

3 View Complete Implementation : usage.py
Copyright Apache License 2.0
Author : apple
    def startLogging(self):
        logFile = self.options.get("logFile", sys.stderr)

        fileLogObserverFactory = self.options.get(
            "fileLogObserverFactory", textFileLogObserver
        )

        fileObserver = fileLogObserverFactory(logFile)

        logLevelPredicate = LogLevelFilterPredicate(
            defaultLogLevel=self.options.get("logLevel", LogLevel.info)
        )

        filteringObserver = FilteringLogObserver(
            fileObserver, [logLevelPredicate]
        )

        globalLogBeginner.beginLoggingTo([filteringObserver])

3 View Complete Implementation : logger.py
Copyright GNU General Public License v2.0
Author : TheTorProject
def setup_logging(log_level, log_name, log_directory=""):
    """
    Configure the logger to use the specified log file and log level
    """
    log_filter = LogLevelFilterPredicate()
    log_filter.setLogLevelForNamespace("bwscanner", LogLevel.levelWithName(log_level.lower()))

    # Set up logging
    log_file = DailyLogFile(log_name, log_directory)
    file_observer = FileLogObserver(log_file, log_event_format)
    console_observer = FileLogObserver(sys.stdout, log_event_format)

    file_filter_observer = FilteringLogObserver(file_observer, (log_filter,))
    console_filter_observer = FilteringLogObserver(console_observer, (log_filter,))

    globalLogPublisher.addObserver(file_filter_observer)
    globalLogPublisher.addObserver(console_filter_observer)

3 View Complete Implementation : _runner.py
Copyright MIT License
Author : wistbean
    def startLogging(self):
        """
        Start the L{twisted.logger} logging system.
        """
        logFile = self._logFile

        fileLogObserverFactory = self._fileLogObserverFactory

        fileLogObserver = fileLogObserverFactory(logFile)

        logLevelPredicate = LogLevelFilterPredicate(
            defaultLogLevel=self._defaultLogLevel
        )

        filteringObserver = FilteringLogObserver(
            fileLogObserver, [logLevelPredicate]
        )

        globalLogBeginner.beginLoggingTo([filteringObserver])

0 View Complete Implementation : log.py
Copyright GNU General Public License v3.0
Author : AnemoneLabs
def get_filtering_observer(observer, log_level):
    return FilteringLogObserver(observer,
                                predicates=[LogLevelFilterPredicate(log_level),
                                            filter_unmessage_event])

0 View Complete Implementation : upgrade.py
Copyright Apache License 2.0
Author : apple
def main(argv=sys.argv, stderr=sys.stderr, reactor=None):
    """
    Do the export.
    """
    from twistedcaldav.config import config
    if reactor is None:
        from twisted.internet import reactor

    options = UpgradeOptions()
    try:
        options.parseOptions(argv[1:])
    except UsageError, e:
        usage(e)

    try:
        output = options.openOutput()
    except IOError, e:
        stderr.write("Unable to open output file for writing: %s\n" % (e))
        sys.exit(1)

    if options.merge:
        def setMerge(data):
            data.MergeUpgrades = True
        config.addPostUpdateHooks([setMerge])

    def makeService(store):
        return UpgraderService(store, options, output, reactor, config)

    def onlyUpgradeEvents(eventDict):
        text = formatEvent(eventDict)
        output.write(formatTime(eventDict.get("log_time", time.time())) + " " + text + "\n")
        output.flush()

    if not options["status"] and not options["check"]:
        # When doing an upgrade always send L{LogLevel.warn} logging to the tool output
        log.observer.addObserver(FilteringLogObserver(
            onlyUpgradeEvents,
            [LogLevelFilterPredicate(defaultLogLevel=LogLevel.warn), ]
        ))

    def customServiceMaker():
        customService = CalDAVServiceMaker()
        customService.doPostImport = options["postprocess"]
        return customService

    def _patchConfig(config):
        config.FailIfUpgradeNeeded = options["status"] or options["check"]
        config.CheckExistingSchema = options["check"]
        if options["prefix"]:
            config.UpgradeHomePrefix = options["prefix"]
        if not options["status"] and not options["check"]:
            config.DefaultLogLevel = "debug"

    def _onShutdown():
        if not UpgraderService.started:
            print("Failed to start service.")

    utilityMain(options["config"], makeService, reactor, customServiceMaker, patchConfig=_patchConfig, onShutdown=_onShutdown, verbose=options["debug"])

0 View Complete Implementation : logger.py
Copyright GNU Affero General Public License v3.0
Author : daq-tools
def startLogging(settings, stream=None, level=LogLevel.debug):
    global predicate

    fileObserver = logObserver(stream)
    predicate    = LogLevelFilterPredicate(defaultLogLevel=level)

    if settings.options.debug_mqtt:
        predicate.setLogLevelForNamespace('kotori.daq.services.mig', LogLevel.debug)
        predicate.setLogLevelForNamespace('kotori.daq.application.mqttkit', LogLevel.debug)

    if settings.options.debug_mqtt_driver:
        predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt', LogLevel.debug)
        predicate.setLogLevelForNamespace('mqtt', LogLevel.debug)
        predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.debug)
    else:
        predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt', LogLevel.info)
        predicate.setLogLevelForNamespace('mqtt', LogLevel.info)
        predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.info)

    if settings.options.debug_influx:
        predicate.setLogLevelForNamespace('kotori.daq.storage.influx', LogLevel.debug)

    if settings.options.debug_io:
        predicate.setLogLevelForNamespace('kotori.io', LogLevel.debug)

    observers    = [ FilteringLogObserver(observer=fileObserver, predicates=[predicate]) ]
    globalLogBeginner.beginLoggingTo(observers)

0 View Complete Implementation : log.py
Copyright MIT License
Author : Fluent-networks
    def start(self, console, logfile, debug):
        """Configure and start logging based on user preferences
        
        Args:
            console (bool): Console logging enabled
            logfile (str): Logfile path
            debug (bool): Debugging flag
        """
        global predicate
        
        # Set logging level
        level = LogLevel.debug if debug else LogLevel.info
        predicate = LogLevelFilterPredicate(defaultLogLevel=level)
        
        # Log to console option
        if console:
            f = sys.stdout
        
        # Log to file option
        else:
            # Check the file is valid and can be opened in append mode
            if os.path.exists(logfile) and not os.path.isfile(logfile):
                print "Logfile %s is not a valid file. Exiting." % logfile
                return False
            try:
                f = open(logfile, 'a')
            except IOError:
                print "Can't open logfile %s. Exiting." % logfile
                return False
        
        # Set the observer
        observer = textFileLogObserver(f)
        observers = [FilteringLogObserver(observer=observer,
                                          predicates=[predicate])]
        # Begin logging
        globalLogBeginner.beginLoggingTo(observers)
        return True

0 View Complete Implementation : server.py
Copyright GNU General Public License v3.0
Author : piqueserver
    def __init__(self, interface: bytes, config_dict: Dict[str, Any]) -> None:
        # logfile path relative to config dir if not abs path
        log_filename = logfile.get()
        if log_filename.strip():  # catches empty filename
            if not os.path.isabs(log_filename):
                log_filename = os.path.join(config.config_dir, log_filename)
            ensure_dir_exists(log_filename)
            if logging_rotate_daily.get():
                logging_file = DailyLogFile(log_filename, '.')
            else:
                logging_file = open(log_filename, 'a')
            predicate = LogLevelFilterPredicate(
                LogLevel.levelWithName(loglevel.get()))
            observers = [
                FilteringLogObserver(
                    textFileLogObserver(sys.stderr), [predicate]),
                FilteringLogObserver(
                    textFileLogObserver(logging_file), [predicate])
            ]
            globalLogBeginner.beginLoggingTo(observers)
            log.info('piqueserver started on %s' % time.strftime('%c'))

        self.config = config_dict
        if random_rotation.get():
            self.map_rotator_type = random_choice_cycle
        else:
            self.map_rotator_type = itertools.cycle
        self.default_time_limit = default_time_limit.get()
        self.default_cap_limit = cap_limit.get()
        self.advance_on_win = int(advance_on_win.get())
        self.win_count = itertools.count(1)
        self.bans = NetworkDict()

        # attempt to load a saved bans list
        try:
            with open(os.path.join(config.config_dir, bans_file.get()), 'r') as f:
                self.bans.read_list(json.load(f))
            log.debug("loaded {count} bans", count=len(self.bans))
        except FileNotFoundError:
            log.debug("skip loading bans: file unavailable",
                      count=len(self.bans))
        except IOError as e:
            log.error('Could not read bans file ({}): {}'.format(bans_file.get(), e))
        except ValueError as e:
            log.error('Could not parse bans file ({}): {}'.format(bans_file.get(), e))

        self.hard_bans = set()  # possible DDoS'ers are added here
        self.player_memory = deque(maxlen=100)
        if len(self.name) > MAX_SERVER_NAME_SIZE:
            log.warn('(server name too long; it will be truncated to "%s")' % (
                self.name[:MAX_SERVER_NAME_SIZE]))
        self.respawn_time = respawn_time_option.get()
        self.respawn_waves = respawn_waves.get()

        # since AoS only supports CTF and TC at a protocol level, we need to get
        # the base game mode if we are using a custom game mode.
        game_mode_name = game_mode.get()
        if game_mode_name == 'ctf':
            self.game_mode = CTF_MODE
        elif game_mode.get() == 'tc':
            self.game_mode = TC_MODE
        elif self.game_mode not in [CTF_MODE, TC_MODE]:
            raise ValueError(
                'invalid game mode: custom game mode "{}" does not set '
                'protocol.game_mode to one of TC_MODE or CTF_MODE. Are '
                'you sure the thing you have specified is a game mode?'.format(
                    game_mode_name))

        self.game_mode_name = game_mode.get().split('.')[-1]
        self.team1_name = team1_name.get()[:9]
        self.team2_name = team2_name.get()[:9]
        self.team1_color = tuple(team1_color.get())
        self.team2_color = tuple(team2_color.get())
        self.friendly_fire = friendly_fire.get()
        self.friendly_fire_on_grief = friendly_fire_on_grief.get()
        self.friendly_fire_time = grief_friendly_fire_time.get()
        self.spade_teamkills_on_grief = spade_teamkills_on_grief.get()
        self.fall_damage = fall_damage.get()
        self.teamswitch_interval = teamswitch_interval.get()
        self.teamswitch_allowed = teamswitch_allowed.get()
        self.max_players = max_players.get()
        self.melee_damage = melee_damage.get()
        self.max_connections_per_ip = max_connections_per_ip.get()
        self.pastwords = pastwords.get()
        self.server_prefix = server_prefix.get()
        self.time_announcements = time_announcements.get()
        self.balanced_teams = balanced_teams.get()
        self.login_retries = login_retries.get()

        # voting configuration
        self.default_ban_time = default_ban_duration.get()

        self.speedhack_detect = speedhack_detect.get()
        self.rubberband_distance = rubberband_distance.get()
        if user_blocks_only.get():
            self.user_blocks = set()
        self.set_god_build = set_god_build.get()
        self.debug_log = debug_log_enabled.get()
        if self.debug_log:
            # TODO: make this configurable
            pyspades.debug.open_debug_log(
                os.path.join(config.config_dir, 'debug.log'))
        if ssh_enabled.get():
            from piqueserver.ssh import RemoteConsole
            self.remote_console = RemoteConsole(self)
        irc = irc_options.get()
        if irc.get('enabled', False):
            from piqueserver.irc import IRCRelay
            self.irc_relay = IRCRelay(self, irc)
        if status_server_enabled.get():
            from piqueserver.statusserver import StatusServer
            self.status_server = StatusServer(self)
            ensureDeferred(self.status_server.listen())
        if ban_publish.get():
            from piqueserver.banpublish import PublishServer
            self.ban_publish = PublishServer(self, ban_publish_port.get())
        if bans_config_urls.get():
            from piqueserver import bansubscribe
            self.ban_manager = bansubscribe.BanManager(self)
        self.start_time = time.time()
        self.end_calls = []
        # TODO: why is this here?
        create_console(self)

        for user_type, func_names in rights.get().items():
            for func_name in func_names:
                commands.add_rights(user_type, func_name)

        if everyone_is_admin.get():
            self.everyone_is_admin = True

        self.port = port_option.get()
        ServerProtocol.__init__(self, self.port, interface)
        self.host.intercept = self.receive_callback

        try:
            self.set_map_rotation(self.config['rotation'])
        except MapNotFound as e:
            log.critical('Invalid map in map rotation (%s), exiting.' % e.map)
            raise SystemExit

        map_load_d = self.advance_rotation()
        # discard the result of the map advance for now
        map_load_d.addCallback(lambda x: self._post_init())

        ip_getter = ip_getter_option.get()
        if ip_getter:
            ensureDeferred(as_deferred(self.get_external_ip(ip_getter)))

        self.new_release = None
        notify_new_releases = config.option(
            "release_notifications", default=True)
        if notify_new_releases.get():
            ensureDeferred(as_deferred(self.watch_for_releases()))

        self.vacuum_loop = LoopingCall(self.vacuum_bans)
        # Run the vacuum every 6 hours, and kick it off it right now
        self.vacuum_loop.start(60 * 60 * 6, True)

        reactor.addSystemEventTrigger(
            'before', 'shutdown', lambda: ensureDeferred(self.shutdown()))