Scrapy-0.14.4/0000700000016101777760000000000011754532100013125 5ustar buildbotnogroupScrapy-0.14.4/setup.py0000600000016101777760000001251011754531743014654 0ustar buildbotnogroup# Scrapy setup.py script # # It doesn't depend on setuptools, but if setuptools is available it'll use # some of its features, like package dependencies. from __future__ import with_statement from distutils.command.install_data import install_data from distutils.command.install import INSTALL_SCHEMES from subprocess import Popen, PIPE import os import sys class osx_install_data(install_data): # On MacOS, the platform-specific lib dir is /System/Library/Framework/Python/.../ # which is wrong. Python 2.5 supplied with MacOS 10.5 has an Apple-specific fix # for this in distutils.command.install_data#306. It fixes install_lib but not # install_data, which is why we roll our own install_data class. def finalize_options(self): # By the time finalize_options is called, install.install_lib is set to the # fixed directory, so we set the installdir to install_lib. The # install_data class uses ('install_data', 'install_dir') instead. self.set_undefined_options('install', ('install_lib', 'install_dir')) install_data.finalize_options(self) if sys.platform == "darwin": cmdclasses = {'install_data': osx_install_data} else: cmdclasses = {'install_data': install_data} def fullsplit(path, result=None): """ Split a pathname into components (the opposite of os.path.join) in a platform-neutral way. """ if result is None: result = [] head, tail = os.path.split(path) if head == '': return [tail] + result if head == path: return result return fullsplit(head, [tail] + result) # Tell distutils to put the data_files in platform-specific installation # locations. See here for an explanation: # http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb for scheme in INSTALL_SCHEMES.values(): scheme['data'] = scheme['purelib'] # Compile the list of packages available, because distutils doesn't have # an easy way to do this. packages, data_files = [], [] root_dir = os.path.dirname(__file__) if root_dir != '': os.chdir(root_dir) def is_not_module(filename): return os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo'] for scrapy_dir in ['scrapy', 'scrapyd']: for dirpath, dirnames, filenames in os.walk(scrapy_dir): # Ignore dirnames that start with '.' for i, dirname in enumerate(dirnames): if dirname.startswith('.'): del dirnames[i] if '__init__.py' in filenames: packages.append('.'.join(fullsplit(dirpath))) data = [f for f in filenames if is_not_module(f)] if data: data_files.append([dirpath, [os.path.join(dirpath, f) for f in data]]) elif filenames: data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]]) # Small hack for working with bdist_wininst. # See http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html if len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst': for file_info in data_files: file_info[0] = '\\PURELIB\\%s' % file_info[0] scripts = ['bin/scrapy'] if os.name == 'nt': scripts.append('extras/scrapy.bat') if os.environ.get('SCRAPY_VERSION_FROM_HG'): rev = Popen(["hg", "tip", "--template", "{rev}"], stdout=PIPE).communicate()[0] with open('scrapy/__init__.py', 'a') as f: f.write("\n__version__ = '.'.join(map(str, version_info)) + '.%s'" % rev) elif os.environ.get('SCRAPY_VERSION_FROM_GIT'): rev = Popen("git log --oneline | wc -l", shell=True, stdout=PIPE).communicate()[0] with open('scrapy/__init__.py', 'a') as f: f.write("\n__version__ = '.'.join(map(str, version_info)) + '.%s'" % rev.strip()) version = __import__('scrapy').__version__ setup_args = { 'name': 'Scrapy', 'version': version, 'url': 'http://scrapy.org', 'description': 'A high-level Python Screen Scraping framework', 'long_description': 'Scrapy is a high level scraping and web crawling framework for writing spiders to crawl and parse web pages for all kinds of purposes, from information retrieval to monitoring or testing web sites.', 'author': 'Scrapy developers', 'maintainer': 'Pablo Hoffman', 'maintainer_email': 'pablo@pablohoffman.com', 'license': 'BSD', 'packages': packages, 'cmdclass': cmdclasses, 'data_files': data_files, 'scripts': scripts, 'classifiers': [ 'Programming Language :: Python', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Environment :: Console', 'Topic :: Software Development :: Libraries :: Application Frameworks', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Internet :: WWW/HTTP', ] } try: from setuptools import setup except ImportError: from distutils.core import setup else: setup_args['install_requires'] = ['Twisted>=2.5', 'w3lib', 'pyOpenSSL'] if sys.version_info < (2, 6): setup_args['install_requires'] += ['simplejson'] try: import libxml2 except ImportError: setup_args['install_requires'] += ['lxml'] setup(**setup_args) Scrapy-0.14.4/MANIFEST.in0000600000016101777760000000102011754531743014672 0ustar buildbotnogroupinclude README include AUTHORS include INSTALL include LICENSE include MANIFEST.in include scrapy/mime.types include scrapyd/default_scrapyd.conf recursive-include scrapyd license.txt recursive-include scrapyd/tests *.egg recursive-include scrapy/templates * recursive-include scrapy/tests/sample_data * recursive-include scrapy license.txt recursive-include scrapy/tests *.egg recursive-include docs * prune docs/build recursive-include scripts * recursive-include examples * recursive-include extras * recursive-include bin * Scrapy-0.14.4/setup.cfg0000600000016101777760000000034211754532100014747 0ustar buildbotnogroup[bdist_rpm] install-script = extras/rpm-install.sh doc_files = docs examples extras AUTHORS INSTALL LICENSE README [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 [bdist_wininst] bitmap = extras/setup_wininst.bmp Scrapy-0.14.4/bin/0000700000016101777760000000000011754532077013712 5ustar buildbotnogroupScrapy-0.14.4/bin/runtests.sh0000700000016101777760000000237511754531743016146 0ustar buildbotnogroup#!/bin/sh # look for twisted trial command if type trial >/dev/null 2>&1; then trial="trial" elif [ -x /usr/lib/twisted/bin/trial ]; then trial="/usr/lib/twisted/bin/trial" elif [ -x /usr/lib64/twisted/bin/trial ]; then trial="/usr/lib64/twisted/bin/trial" else echo "Unable to run tests: trial command (included with Twisted) not found" exit 1 fi # use vsftpd (if available) for testing ftp feed storage if type vsftpd >/dev/null 2>&1; then vsftpd_conf=$(mktemp /tmp/vsftpd-XXXX) cat >$vsftpd_conf < Scrapyd

Scrapyd

Available projects: %(projects)s

How to schedule a spider?

To schedule a spider you need to use the API (this web UI is only for monitoring)

Example using curl:

curl http://localhost:6800/schedule.json -d project=default -d spider=somespider

For more information about the API, see the Scrapyd documentation

""" % vars class ProcessMonitor(resource.Resource): def __init__(self, root): resource.Resource.__init__(self) self.root = root def render(self, txrequest): s = "Scrapyd" s += "" s += "

Process monitor

" s += "

Go back

" s += "" s += "" s += "" s += "" for p in self.root.launcher.processes.values(): s += "" for a in ['project', 'spider', 'job', 'pid']: s += "" % getattr(p, a) s += "" % (datetime.now() - p.start_time) s += "" % (p.project, p.spider, p.job) s += "" s += "
ProjectSpiderJobPIDRuntimeLog
%s%sLog
" s += "" s += "" return s Scrapy-0.14.4/scrapyd/launcher.py0000600000016101777760000000574511754531743016776 0ustar buildbotnogroupimport sys, os from datetime import datetime from twisted.internet import reactor, defer, protocol, error from twisted.application.service import Service from twisted.python import log from scrapy.utils.py26 import cpu_count from scrapy.utils.python import stringify_dict from scrapyd.utils import get_crawl_args from .interfaces import IPoller, IEnvironment class Launcher(Service): name = 'launcher' def __init__(self, config, app): self.processes = {} self.max_proc = config.getint('max_proc', 0) if not self.max_proc: self.max_proc = cpu_count() * config.getint('max_proc_per_cpu', 4) self.runner = config.get('runner', 'scrapyd.runner') self.app = app def startService(self): for slot in range(self.max_proc): self._wait_for_project(slot) log.msg("%s started: max_proc=%r, runner=%r" % (self.parent.name, \ self.max_proc, self.runner), system="Launcher") def _wait_for_project(self, slot): poller = self.app.getComponent(IPoller) poller.next().addCallback(self._spawn_process, slot) def _spawn_process(self, message, slot): msg = stringify_dict(message, keys_only=False) project = msg['_project'] args = [sys.executable, '-m', self.runner, 'crawl'] args += get_crawl_args(msg) e = self.app.getComponent(IEnvironment) env = e.get_environment(msg, slot) env = stringify_dict(env, keys_only=False) pp = ScrapyProcessProtocol(slot, project, msg['_spider'], \ msg['_job'], env) pp.deferred.addBoth(self._process_finished, slot) reactor.spawnProcess(pp, sys.executable, args=args, env=env) self.processes[slot] = pp def _process_finished(self, _, slot): self.processes.pop(slot) self._wait_for_project(slot) class ScrapyProcessProtocol(protocol.ProcessProtocol): def __init__(self, slot, project, spider, job, env): self.slot = slot self.pid = None self.project = project self.spider = spider self.job = job self.start_time = datetime.now() self.env = env self.logfile = env['SCRAPY_LOG_FILE'] self.deferred = defer.Deferred() def outReceived(self, data): log.msg(data.rstrip(), system="Launcher,%d/stdout" % self.pid) def errReceived(self, data): log.msg(data.rstrip(), system="Launcher,%d/stderr" % self.pid) def connectionMade(self): self.pid = self.transport.pid self.log("Process started: ") def processEnded(self, status): if isinstance(status.value, error.ProcessDone): self.log("Process finished: ") else: self.log("Process died: exitstatus=%r " % status.value.exitCode) self.deferred.callback(self) def log(self, msg): msg += "project=%r spider=%r job=%r pid=%r log=%r" % (self.project, \ self.spider, self.job, self.pid, self.logfile) log.msg(msg, system="Launcher") Scrapy-0.14.4/scrapyd/tests/0000700000016101777760000000000011754532100015734 5ustar buildbotnogroupScrapy-0.14.4/scrapyd/tests/test_scheduler.py0000600000016101777760000000311111754531743021335 0ustar buildbotnogroupimport os from twisted.trial import unittest from zope.interface.verify import verifyObject from scrapyd.interfaces import ISpiderScheduler from scrapyd.config import Config from scrapyd.scheduler import SpiderScheduler from scrapyd.utils import get_spider_queues class SpiderSchedulerTest(unittest.TestCase): def setUp(self): d = self.mktemp() eggs_dir = self.eggs_dir = os.path.join(d, 'eggs') dbs_dir = os.path.join(d, 'dbs') os.mkdir(d) os.makedirs(eggs_dir) os.makedirs(dbs_dir) os.makedirs(os.path.join(eggs_dir, 'mybot1')) os.makedirs(os.path.join(eggs_dir, 'mybot2')) config = Config(values={'eggs_dir': eggs_dir, 'dbs_dir': dbs_dir}) self.queues = get_spider_queues(config) self.sched = SpiderScheduler(config) def test_interface(self): verifyObject(ISpiderScheduler, self.sched) def test_list_update_projects(self): self.assertEqual(sorted(self.sched.list_projects()), sorted(['mybot1', 'mybot2'])) os.makedirs(os.path.join(self.eggs_dir, 'mybot3')) self.sched.update_projects() self.assertEqual(sorted(self.sched.list_projects()), sorted(['mybot1', 'mybot2', 'mybot3'])) def test_schedule(self): q = self.queues['mybot1'] self.failIf(q.count()) self.sched.schedule('mybot1', 'myspider1', a='b') self.sched.schedule('mybot2', 'myspider2', c='d') self.assertEqual(q.pop(), {'name': 'myspider1', 'a': 'b'}) q = self.queues['mybot2'] self.assertEqual(q.pop(), {'name': 'myspider2', 'c': 'd'}) Scrapy-0.14.4/scrapyd/tests/test_environ.py0000600000016101777760000000224511754531743021046 0ustar buildbotnogroupimport os from twisted.trial import unittest from zope.interface.verify import verifyObject from scrapyd.interfaces import IEnvironment from scrapyd.config import Config from scrapyd.environ import Environment class EnvironmentTest(unittest.TestCase): def setUp(self): d = self.mktemp() os.mkdir(d) config = Config(values={'eggs_dir': d, 'logs_dir': d}) config.cp.add_section('settings') config.cp.set('settings', 'newbot', 'newbot.settings') self.environ = Environment(config, initenv={}) def test_interface(self): verifyObject(IEnvironment, self.environ) def test_get_environment_with_eggfile(self): msg = {'_project': 'mybot', '_spider': 'myspider', '_job': 'ID'} slot = 3 env = self.environ.get_environment(msg, slot) self.assertEqual(env['SCRAPY_PROJECT'], 'mybot') self.assertEqual(env['SCRAPY_SLOT'], '3') self.assertEqual(env['SCRAPY_SPIDER'], 'myspider') self.assertEqual(env['SCRAPY_JOB'], 'ID') self.assert_(env['SCRAPY_LOG_FILE'].endswith(os.path.join('mybot', 'myspider', 'ID.log'))) self.failIf('SCRAPY_SETTINGS_MODULE' in env) Scrapy-0.14.4/scrapyd/tests/test_poller.py0000600000016101777760000000261011754531743020657 0ustar buildbotnogroupimport os from twisted.trial import unittest from twisted.internet.defer import Deferred from zope.interface.verify import verifyObject from scrapyd.interfaces import IPoller from scrapyd.config import Config from scrapyd.poller import QueuePoller from scrapyd.utils import get_spider_queues class QueuePollerTest(unittest.TestCase): def setUp(self): d = self.mktemp() eggs_dir = os.path.join(d, 'eggs') dbs_dir = os.path.join(d, 'dbs') os.makedirs(eggs_dir) os.makedirs(dbs_dir) os.makedirs(os.path.join(eggs_dir, 'mybot1')) os.makedirs(os.path.join(eggs_dir, 'mybot2')) config = Config(values={'eggs_dir': eggs_dir, 'dbs_dir': dbs_dir}) self.queues = get_spider_queues(config) self.poller = QueuePoller(config) def test_interface(self): verifyObject(IPoller, self.poller) def test_poll_next(self): self.queues['mybot1'].add('spider1') self.queues['mybot2'].add('spider2') d1 = self.poller.next() d2 = self.poller.next() self.failUnless(isinstance(d1, Deferred)) self.failIf(hasattr(d1, 'result')) self.poller.poll() self.queues['mybot1'].pop() self.poller.poll() self.failUnlessEqual(d1.result, {'_project': 'mybot1', '_spider': 'spider1'}) self.failUnlessEqual(d2.result, {'_project': 'mybot2', '_spider': 'spider2'}) Scrapy-0.14.4/scrapyd/tests/mybot.egg0000600000016101777760000001201111754531743017563 0ustar buildbotnogroupPKGЇ3=[Цд $EGG-INFO/entry_points.txt‹.N.J,ЈŒх*N-)ЩЬK/VАUШ­LЪ/бƒ pqPKGЇ3=“з2EGG-INFO/zip-safeуPKGЇ3=Јп‰Рˆ.EGG-INFO/SOURCES.txtuЯб Т0 …с{пeњ Ђ"‚“ ЏКуviXЃиЗw RѓЊфџЁіPЇiбЇKА’ˆ…(6є1Ъ Я‚ŸaЦвхтаuЫ-”ЇУЎиЗеД7еЙ^og/›R …ДkЂёЮ=Юэ@lHЄХfн‚’Ч>c•[ qц‡_љМЫАс PKGЇ3=рйHˆEGG-INFO/top_level.txtЫ­LЪ/сPKGЇ3=“з2EGG-INFO/dependency_links.txtуPKGЇ3=…?oБEGG-INFO/PKG-INFOѓM-ILI,Iд K-*ЮЬЯГR0д3рђKЬMЕRШ­LЪ/сB‘.ЭЭM,ЊДRѕѓіѓїуђШЯMе-HLOE9––dфЁѓuSs3sЂ>™ЩЉyХHк\R‹“‹2 JРvСrKвђ‹r"PKGЇ3=Sрђmybot/items.pycЅOAnТ0;€€ЊWоЉjТЁB=TUХЉRлC8‘‹EА‹Ќ:€jG"gњ†~ /ъWњv7)|€•wД;^ЯxП.‡гЏЯЇўЃCљ@щЧšŽРрr'‘K-a^t„р,ђКƒyмЅGі—"–T{ І ЬЯЌq: ЈzЎ‹m`ўф*(Їьк'0`ГlѓM+ФДR›ei” УІ)ЗКrдЖS'№WEeN m}HœнTћы§эDMnRГ^Ї%ЛЇ–ь}ВЋ3жgyЯE/z‰љгс‚™ећrW'<™БtЦПlцЯєlжЙkИяНGђPKЇ3=Жi‘7­mybot/items.pyMБ У0 DwХA–JВК•B‡N§‚4VSл2Ж2фяk;-TƒOКудрB‹ё„•"AV‚cM6aсˆЗˆ4Ч)†rI5ЊСƒšчЭ‘—I {?fОŠ„qђЊЏЂНчј„ƒ™гPх§*Ю*ЕDvјžуGС-Я'\ Y­дlЇ”pпŸ,…ЗЅuЃBЎњ]ђ.хќ/o5ЌпXѓІŸРOŽp>МлЎТ§еPKGЇ3=Oщ№smybot/settings.pycUmJУ@†'IДRAoPш-ў15Ћђ!нD!qзH?p6а^Ф x"Џт œ­Uqaоyй™ййЯAђў=УіX7xAб€ТaBMТдЂА@XPє@є@H^L8.l6№‘C•Ъ&ЬжеBЁюxцžтрЧqqйљŠxєы4JЮаЕЕдеCє†˜|wл!LвЌLќ˜ЉНmђШІ`X,Ё&1 yм)uŸPRІжЙяСПЛšкƒЪ{qmЊВ*.х­ЈI ъЧчBЈœzеg™c›Šm˜г…˜)o%[ŸўМДзї`ŒZКЮжъ—ьљё‚еCnh‘CG!В…HГі‚Ё;n;Ž$С;Ф_sU„NqмRѓPKGЇ3=ш>Le…mybot/__init__.pycЛј‰—ЫЬЎм'™ иˆ‹Y€D CАHФO$‡Fы‰ЄвЬœ§Є”ЬтНœЬМв н Гx3§дєt§мЪЄќ§јјЬМЬ’јxН‚Ъ ›мќ”вœT;СХ ƒPKGЇ3=4‘aпmybot/pipelines.pyc­OANУ0\ЇU@=pхЙP_PХЁЊНB…вS{БHМ­Œ’ЦЊЉ•ИСј/т+М€];эЈЅ{НЛ3Г?ПУСєћыЉ„ює(&ю†@ РZ€№ №А\Q’Р"у>?$x>1+ГХ ˜2Э5цX%/2.—НNŽ“їі оФђєcэаИ}„ЦЪкG‹№€ЊЩєцuЅЉ%ЭXщlхQ С Zgќ Уw ЩЂ•šbЅ>П eУ9ќ”бМmtžЂМfZšБч|SfqЪс ‚ё ƒQzчы"ЩГA} ЦŠЗdСзeš/Ж+^ўyТCoe#œОXЦ?ўQWЫ+У|oW›2й№Д|YХEqƒ$ хУt8't иЖ Ћј•g›šњhъ˜ТнЅђ ў4НgьPK- =mybot/__init__.pyPK-Ї3=­ˆœыDWmybot/spiders/spider2.pyK+ЪЯU(N.J,Јд+.ШLI-RШЬ-Ш/*QpJ,N ‹pq%ч$+@xFM+.. ШKЬMUАUP‡˜`ЄЮPKGЇ3= =o-і­mybot/spiders/spider2.pyc­AnТ0EП -UUj"ЁuQUkФ"Y••`ƒЌ&сD"ыr.Р‰z.31m/€eЯипѓЦ?ЧAqиOцИŒ.­Zю‘Dгјr™€аF`! ;ипРчLBw‘7dЗ' Јъ“ŒSg’вjГЉz”њpјGbп˜Iї$ е‹_ƒ9џ(ш№С‰RЋД0JЕЅŠЕЎsNЙoО ИєПИ’ЌЖЙŽ2m]цvUoŸЖЏ#5zŽЬrMЖЎ"Яq—}–MЬ=0жI’[9ѕ= јdОIЫ&єо˜1­љjшіЃoўkятЗ…yPKGЇ3=> Rmmybot/spiders/__init__.pycЛј‰—ЫЬЎм'™ иˆ‹Y€D CАHФO$‡F›‰ЄвЬœ§Є”ЬтНœЬМв н Гx3§дєt§мЪЄќ§т‚Ь”дЂb§јјЬМЬ’јxН‚Ъ V›мќ”вœT;Х PKGЇ3=ЖЮ:ї­mybot/spiders/spider1.pyc­AnТ0EП … ‰ЊЧˆšЉE]TUХБH6-+С.Вš@„‰Ќл3pNдЋє0гіXіїŒ§=oќ§3МіГ%ЮЃKы…–‘hš@.АZТМ шО€Oрm!ЁЛH‚+Вл#@PT HІЉ3IiЕйV}J}8ў#БoЪЄъХ‰ЏСœtјрšDЉuZЅZ‚RХFз9Їм7п\њ_мIVл\G™ЖЎ sЛЎwwЛЧ‰šмGfЕŠŠ&лT‘чИѓ>Ы&цы$IOЮ}C>YnгВ Н7fLkОК§ш“џкГјmсVžPK&Ї3=” ЛГDWmybot/spiders/spider1.pyK+ЪЯU(N.J,Јд+.ШLI-RШЬ-Ш/*QpJ,N ‹pq%ч$+@x†M+.. ШKЬMUАUP‡˜`ЈЮPK- =cŽmžјmybot/spiders/__init__.py]1У {Пb%зС}Ф@p6$†CЧY‘bSЅЛbgvoФbEБюmWТ'ngЕ1CЁ–шI*xССЛрщФ–EјEN‡q13œU:%JеŽaaЙАžЧ^ЉVш8%›§§4ѕвЎ”;™ŽПуц9ЕQІ'ѓhъФBˆyaTЂІBP-їiђьЬe4,ыЄ\ЂЋSХMл№PKGЇ3=[Цд $ЄEGG-INFO/entry_points.txtPKGЇ3=“з2ЄWEGG-INFO/zip-safePKGЇ3=Јп‰Рˆ.Є‰EGG-INFO/SOURCES.txtPKGЇ3=рйHˆЄCEGG-INFO/top_level.txtPKGЇ3=“з2ЄEGG-INFO/dependency_links.txtPKGЇ3=…?oБЄНEGG-INFO/PKG-INFOPKGЇ3=SрђЄ[mybot/items.pycPKЇ3=Жi‘7­Єzmybot/items.pyPKGЇ3=Oщ№sЄSmybot/settings.pycPKЇ3=-НБљЄ—mybot/pipelines.pyPKGЇ3=ш>Le…Єxmybot/__init__.pycPKGЇ3=4‘aпЄ mybot/pipelines.pycPKЇ3=xgoSOЄDmybot/settings.pyPK- =ЄТ mybot/__init__.pyPK-Ї3=­ˆœыDWЄѓ mybot/spiders/spider2.pyPKGЇ3= =o-і­Єm mybot/spiders/spider2.pycPKGЇ3=> RmЄš mybot/spiders/__init__.pycPKGЇ3=ЖЮ:ї­Є? mybot/spiders/spider1.pycPK&Ї3=” ЛГDWЄm mybot/spiders/spider1.pyPK- =cŽmžјЄч mybot/spiders/__init__.pyPK7МScrapy-0.14.4/scrapyd/tests/test_spiderqueue.py0000600000016101777760000000367411754531743021730 0ustar buildbotnogroupfrom twisted.internet.defer import inlineCallbacks, maybeDeferred from twisted.trial import unittest from zope.interface.verify import verifyObject from scrapyd.interfaces import ISpiderQueue from scrapyd.spiderqueue import SqliteSpiderQueue class SpiderQueueTest(unittest.TestCase): """This test case can be used easily for testing other SpiderQueue's by just changing the _get_queue() method. It also supports queues with deferred methods. """ def setUp(self): self.q = self._get_queue() self.name = 'spider1' self.args = {'arg1': 'val1', 'arg2': 2} self.msg = self.args.copy() self.msg['name'] = self.name def _get_queue(self): return SqliteSpiderQueue(':memory:') def test_interface(self): verifyObject(ISpiderQueue, self.q) @inlineCallbacks def test_add_pop_count(self): c = yield maybeDeferred(self.q.count) self.assertEqual(c, 0) yield maybeDeferred(self.q.add, self.name, **self.args) c = yield maybeDeferred(self.q.count) self.assertEqual(c, 1) m = yield maybeDeferred(self.q.pop) self.assertEqual(m, self.msg) c = yield maybeDeferred(self.q.count) self.assertEqual(c, 0) @inlineCallbacks def test_list(self): l = yield maybeDeferred(self.q.list) self.assertEqual(l, []) yield maybeDeferred(self.q.add, self.name, **self.args) yield maybeDeferred(self.q.add, self.name, **self.args) l = yield maybeDeferred(self.q.list) self.assertEqual(l, [self.msg, self.msg]) @inlineCallbacks def test_clear(self): yield maybeDeferred(self.q.add, self.name, **self.args) yield maybeDeferred(self.q.add, self.name, **self.args) c = yield maybeDeferred(self.q.count) self.assertEqual(c, 2) yield maybeDeferred(self.q.clear) c = yield maybeDeferred(self.q.count) self.assertEqual(c, 0) Scrapy-0.14.4/scrapyd/tests/test_eggstorage.py0000600000016101777760000000245611754531743021521 0ustar buildbotnogroupfrom cStringIO import StringIO from twisted.trial import unittest from zope.interface.verify import verifyObject from scrapyd.interfaces import IEggStorage from scrapyd.config import Config from scrapyd.eggstorage import FilesystemEggStorage class EggStorageTest(unittest.TestCase): def setUp(self): d = self.mktemp() config = Config(values={'eggs_dir': d}) self.eggst = FilesystemEggStorage(config) def test_interface(self): verifyObject(IEggStorage, self.eggst) def test_put_get_list_delete(self): self.eggst.put(StringIO("egg01"), 'mybot', '01') self.eggst.put(StringIO("egg03"), 'mybot', '03') self.eggst.put(StringIO("egg02"), 'mybot', '02') self.assertEqual(self.eggst.list('mybot'), ['01', '02', '03']) self.assertEqual(self.eggst.list('mybot2'), []) v, f = self.eggst.get('mybot') self.assertEqual(v, "03") self.assertEqual(f.read(), "egg03") f.close() v, f = self.eggst.get('mybot', '02') self.assertEqual(v, "02") self.assertEqual(f.read(), "egg02") f.close() self.eggst.delete('mybot', '02') self.assertEqual(self.eggst.list('mybot'), ['01', '03']) self.eggst.delete('mybot') self.assertEqual(self.eggst.list('mybot'), []) Scrapy-0.14.4/scrapyd/tests/test_dont_load_settings.py0000600000016101777760000000117711754531743023254 0ustar buildbotnogroupimport sys import unittest class SettingsSafeModulesTest(unittest.TestCase): # these modules must not load scrapy.conf SETTINGS_SAFE_MODULES = [ 'scrapy.utils.project', 'scrapy.utils.conf', 'scrapyd.interfaces', 'scrapyd.eggutils', ] def test_modules_that_shouldnt_load_settings(self): sys.modules.pop('scrapy.conf', None) for m in self.SETTINGS_SAFE_MODULES: __import__(m) assert 'scrapy.conf' not in sys.modules, \ "Module %r must not cause the scrapy.conf module to be loaded" % m if __name__ == "__main__": unittest.main() Scrapy-0.14.4/scrapyd/tests/__init__.py0000600000016101777760000000000011754531743020051 0ustar buildbotnogroupScrapy-0.14.4/scrapyd/tests/test_utils.py0000600000016101777760000000367211754531743020533 0ustar buildbotnogroupfrom __future__ import with_statement import os from cStringIO import StringIO from twisted.trial import unittest from scrapy.utils.py26 import get_data from scrapyd.interfaces import IEggStorage from scrapyd.utils import get_crawl_args, get_spider_list from scrapyd import get_application __package__ = 'scrapyd.tests' # required for compatibility with python 2.5 class UtilsTest(unittest.TestCase): def test_get_crawl_args(self): msg = {'_project': 'lolo', '_spider': 'lala'} self.assertEqual(get_crawl_args(msg), ['lala']) msg = {'_project': 'lolo', '_spider': 'lala', 'arg1': u'val1'} cargs = get_crawl_args(msg) self.assertEqual(cargs, ['lala', '-a', 'arg1=val1']) assert all(isinstance(x, str) for x in cargs), cargs def test_get_crawl_args_with_settings(self): msg = {'_project': 'lolo', '_spider': 'lala', 'arg1': u'val1', 'settings': {'ONE': 'two'}} cargs = get_crawl_args(msg) self.assertEqual(cargs, ['lala', '-a', 'arg1=val1', '-s', 'ONE=two']) assert all(isinstance(x, str) for x in cargs), cargs class GetSpiderListTest(unittest.TestCase): def test_get_spider_list(self): path = os.path.abspath(self.mktemp()) j = os.path.join eggs_dir = j(path, 'eggs') os.makedirs(eggs_dir) dbs_dir = j(path, 'dbs') os.makedirs(dbs_dir) logs_dir = j(path, 'logs') os.makedirs(logs_dir) os.chdir(path) with open('scrapyd.conf', 'w') as f: f.write("[scrapyd]\n") f.write("eggs_dir = %s\n" % eggs_dir) f.write("dbs_dir = %s\n" % dbs_dir) f.write("logs_dir = %s\n" % logs_dir) app = get_application() eggstorage = app.getComponent(IEggStorage) eggfile = StringIO(get_data(__package__, 'mybot.egg')) eggstorage.put(eggfile, 'mybot', 'r1') self.assertEqual(sorted(get_spider_list('mybot')), ['spider1', 'spider2']) Scrapy-0.14.4/scrapyd/tests/test_sqlite.py0000600000016101777760000001121011754531743020657 0ustar buildbotnogroupimport unittest from datetime import datetime from decimal import Decimal from scrapy.http import Request from scrapyd.sqlite import SqlitePriorityQueue, JsonSqlitePriorityQueue, \ PickleSqlitePriorityQueue, SqliteDict, JsonSqliteDict, PickleSqliteDict class SqliteDictTest(unittest.TestCase): dict_class = SqliteDict test_dict = {'hello': 'world', 'int': 1, 'float': 1.5} def test_basic_types(self): test = self.test_dict d = self.dict_class() d.update(test) self.failUnlessEqual(d.items(), test.items()) d.clear() self.failIf(d.items()) def test_in(self): d = self.dict_class() self.assertFalse('test' in d) d['test'] = 123 self.assertTrue('test' in d) def test_keyerror(self): d = self.dict_class() self.assertRaises(KeyError, d.__getitem__, 'test') def test_replace(self): d = self.dict_class() self.assertEqual(d.get('test'), None) d['test'] = 123 self.assertEqual(d.get('test'), 123) d['test'] = 456 self.assertEqual(d.get('test'), 456) class JsonSqliteDictTest(SqliteDictTest): dict_class = JsonSqliteDict test_dict = SqliteDictTest.test_dict.copy() test_dict.update({'list': ['a', 'world'], 'dict': {'some': 'dict'}}) class PickleSqliteDictTest(JsonSqliteDictTest): dict_class = PickleSqliteDict test_dict = JsonSqliteDictTest.test_dict.copy() test_dict.update({'decimal': Decimal("10"), 'datetime': datetime.now()}) def test_request_persistance(self): r1 = Request("http://www.example.com", body="some") d = self.dict_class() d['request'] = r1 r2 = d['request'] self.failUnless(isinstance(r2, Request)) self.failUnlessEqual(r1.url, r2.url) self.failUnlessEqual(r1.body, r2.body) class SqlitePriorityQueueTest(unittest.TestCase): queue_class = SqlitePriorityQueue supported_values = ["bytes", u"\xa3", 123, 1.2, True] def setUp(self): self.q = self.queue_class() def test_empty(self): self.failUnless(self.q.pop() is None) def test_one(self): msg = "a message" self.q.put(msg) self.failIf("_id" in msg) self.failUnlessEqual(self.q.pop(), msg) self.failUnless(self.q.pop() is None) def test_multiple(self): msg1 = "first message" msg2 = "second message" self.q.put(msg1) self.q.put(msg2) out = [] out.append(self.q.pop()) out.append(self.q.pop()) self.failUnless(msg1 in out) self.failUnless(msg2 in out) self.failUnless(self.q.pop() is None) def test_priority(self): msg1 = "message 1" msg2 = "message 2" msg3 = "message 3" msg4 = "message 4" self.q.put(msg1, priority=1.0) self.q.put(msg2, priority=5.0) self.q.put(msg3, priority=3.0) self.q.put(msg4, priority=2.0) self.failUnlessEqual(self.q.pop(), msg2) self.failUnlessEqual(self.q.pop(), msg3) self.failUnlessEqual(self.q.pop(), msg4) self.failUnlessEqual(self.q.pop(), msg1) def test_iter_len_clear(self): self.failUnlessEqual(len(self.q), 0) self.failUnlessEqual(list(self.q), []) msg1 = "message 1" msg2 = "message 2" msg3 = "message 3" msg4 = "message 4" self.q.put(msg1, priority=1.0) self.q.put(msg2, priority=5.0) self.q.put(msg3, priority=3.0) self.q.put(msg4, priority=2.0) self.failUnlessEqual(len(self.q), 4) self.failUnlessEqual(list(self.q), \ [(msg2, 5.0), (msg3, 3.0), (msg4, 2.0), (msg1, 1.0)]) self.q.clear() self.failUnlessEqual(len(self.q), 0) self.failUnlessEqual(list(self.q), []) def test_types(self): for x in self.supported_values: self.q.put(x) self.failUnlessEqual(self.q.pop(), x) class JsonSqlitePriorityQueueTest(SqlitePriorityQueueTest): queue_class = JsonSqlitePriorityQueue supported_values = SqlitePriorityQueueTest.supported_values + [ ["a", "list", 1], {"a": "dict"}, ] class PickleSqlitePriorityQueueTest(JsonSqlitePriorityQueueTest): queue_class = PickleSqlitePriorityQueue supported_values = JsonSqlitePriorityQueueTest.supported_values + [ Decimal("10"), datetime.now(), ] def test_request_persistance(self): r1 = Request("http://www.example.com", body="some") self.q.put(r1) r2 = self.q.pop() self.failUnless(isinstance(r2, Request)) self.failUnlessEqual(r1.url, r2.url) self.failUnlessEqual(r1.body, r2.body) Scrapy-0.14.4/scrapyd/spiderqueue.py0000600000016101777760000000127011754531743017515 0ustar buildbotnogroupfrom zope.interface import implements from scrapyd.interfaces import ISpiderQueue from scrapyd.sqlite import JsonSqlitePriorityQueue class SqliteSpiderQueue(object): implements(ISpiderQueue) def __init__(self, database=None, table='spider_queue'): self.q = JsonSqlitePriorityQueue(database, table) def add(self, name, **spider_args): d = spider_args.copy() d['name'] = name priority = float(d.pop('priority', 0)) self.q.put(d, priority) def pop(self): return self.q.pop() def count(self): return len(self.q) def list(self): return [x[0] for x in self.q] def clear(self): self.q.clear() Scrapy-0.14.4/Scrapy.egg-info/0000700000016101777760000000000011754532077016075 5ustar buildbotnogroupScrapy-0.14.4/Scrapy.egg-info/top_level.txt0000644000016101777760000000001711754532077020637 0ustar buildbotnogroupscrapy scrapyd Scrapy-0.14.4/Scrapy.egg-info/dependency_links.txt0000644000016101777760000000000111754532077022155 0ustar buildbotnogroup Scrapy-0.14.4/Scrapy.egg-info/SOURCES.txt0000644000016101777760000003232311754532077017776 0ustar buildbotnogroupAUTHORS INSTALL LICENSE MANIFEST.in README setup.cfg setup.py Scrapy.egg-info/PKG-INFO Scrapy.egg-info/SOURCES.txt Scrapy.egg-info/dependency_links.txt Scrapy.egg-info/requires.txt Scrapy.egg-info/top_level.txt bin/runtests.bat bin/runtests.sh bin/scrapy bin/scrapyd docs/Makefile docs/README docs/conf.py docs/contributing.rst docs/faq.rst docs/index.rst docs/versioning.rst docs/_ext/scrapydocs.py docs/_static/scrapydoc.css docs/_static/selectors-sample1.html docs/experimental/djangoitems.rst docs/experimental/index.rst docs/intro/examples.rst docs/intro/install.rst docs/intro/overview.rst docs/intro/tutorial.rst docs/topics/architecture.rst docs/topics/commands.rst docs/topics/downloader-middleware.rst docs/topics/email.rst docs/topics/exceptions.rst docs/topics/exporters.rst docs/topics/extensions.rst docs/topics/feed-exports.rst docs/topics/firebug.rst docs/topics/firefox.rst docs/topics/images.rst docs/topics/item-pipeline.rst docs/topics/items.rst docs/topics/jobs.rst docs/topics/leaks.rst docs/topics/link-extractors.rst docs/topics/loaders.rst docs/topics/logging.rst docs/topics/request-response.rst docs/topics/scrapyd.rst docs/topics/selectors.rst docs/topics/settings.rst docs/topics/shell.rst docs/topics/signals.rst docs/topics/spider-middleware.rst docs/topics/spiders.rst docs/topics/stats.rst docs/topics/telnetconsole.rst docs/topics/ubuntu.rst docs/topics/webservice.rst docs/topics/_images/firebug1.png docs/topics/_images/firebug2.png docs/topics/_images/firebug3.png docs/topics/_images/scrapy_architecture.odg docs/topics/_images/scrapy_architecture.png extras/coverage-report.sh extras/makedeb.py extras/rpm-install.sh extras/scrapy-ws.py extras/scrapy.1 extras/scrapy.bat extras/scrapy_bash_completion extras/scrapyd.tac extras/setup_wininst.bmp extras/test-scrapyd.sh scrapy/__init__.py scrapy/cmdline.py scrapy/command.py scrapy/conf.py scrapy/crawler.py scrapy/dupefilter.py scrapy/exceptions.py scrapy/extension.py scrapy/interfaces.py scrapy/item.py scrapy/link.py scrapy/linkextractor.py scrapy/log.py scrapy/logformatter.py scrapy/mail.py scrapy/middleware.py scrapy/mime.types scrapy/project.py scrapy/resolver.py scrapy/responsetypes.py scrapy/shell.py scrapy/signals.py scrapy/spider.py scrapy/spidermanager.py scrapy/squeue.py scrapy/stats.py scrapy/statscol.py scrapy/telnet.py scrapy/webservice.py scrapy/commands/__init__.py scrapy/commands/crawl.py scrapy/commands/deploy.py scrapy/commands/edit.py scrapy/commands/fetch.py scrapy/commands/genspider.py scrapy/commands/list.py scrapy/commands/parse.py scrapy/commands/runspider.py scrapy/commands/server.py scrapy/commands/settings.py scrapy/commands/shell.py scrapy/commands/startproject.py scrapy/commands/version.py scrapy/commands/view.py scrapy/contrib/__init__.py scrapy/contrib/closespider.py scrapy/contrib/corestats.py scrapy/contrib/debug.py scrapy/contrib/feedexport.py scrapy/contrib/httpcache.py scrapy/contrib/logstats.py scrapy/contrib/memdebug.py scrapy/contrib/memusage.py scrapy/contrib/spiderstate.py scrapy/contrib/statsmailer.py scrapy/contrib/throttle.py scrapy/contrib/downloadermiddleware/__init__.py scrapy/contrib/downloadermiddleware/chunked.py scrapy/contrib/downloadermiddleware/cookies.py scrapy/contrib/downloadermiddleware/defaultheaders.py scrapy/contrib/downloadermiddleware/downloadtimeout.py scrapy/contrib/downloadermiddleware/httpauth.py scrapy/contrib/downloadermiddleware/httpcache.py scrapy/contrib/downloadermiddleware/httpcompression.py scrapy/contrib/downloadermiddleware/httpproxy.py scrapy/contrib/downloadermiddleware/redirect.py scrapy/contrib/downloadermiddleware/retry.py scrapy/contrib/downloadermiddleware/robotstxt.py scrapy/contrib/downloadermiddleware/stats.py scrapy/contrib/downloadermiddleware/useragent.py scrapy/contrib/exporter/__init__.py scrapy/contrib/exporter/jsonlines.py scrapy/contrib/linkextractors/__init__.py scrapy/contrib/linkextractors/htmlparser.py scrapy/contrib/linkextractors/image.py scrapy/contrib/linkextractors/lxmlhtml.py scrapy/contrib/linkextractors/regex.py scrapy/contrib/linkextractors/sgml.py scrapy/contrib/loader/__init__.py scrapy/contrib/loader/common.py scrapy/contrib/loader/processor.py scrapy/contrib/pipeline/__init__.py scrapy/contrib/pipeline/images.py scrapy/contrib/pipeline/media.py scrapy/contrib/spidermiddleware/__init__.py scrapy/contrib/spidermiddleware/depth.py scrapy/contrib/spidermiddleware/httperror.py scrapy/contrib/spidermiddleware/offsite.py scrapy/contrib/spidermiddleware/referer.py scrapy/contrib/spidermiddleware/urllength.py scrapy/contrib/spiders/__init__.py scrapy/contrib/spiders/crawl.py scrapy/contrib/spiders/feed.py scrapy/contrib/spiders/init.py scrapy/contrib/spiders/sitemap.py scrapy/contrib/webservice/__init__.py scrapy/contrib/webservice/crawler.py scrapy/contrib/webservice/enginestatus.py scrapy/contrib/webservice/stats.py scrapy/contrib_exp/__init__.py scrapy/contrib_exp/djangoitem.py scrapy/contrib_exp/iterators.py scrapy/contrib_exp/downloadermiddleware/__init__.py scrapy/contrib_exp/downloadermiddleware/decompression.py scrapy/core/__init__.py scrapy/core/engine.py scrapy/core/scheduler.py scrapy/core/scraper.py scrapy/core/spidermw.py scrapy/core/downloader/__init__.py scrapy/core/downloader/middleware.py scrapy/core/downloader/webclient.py scrapy/core/downloader/handlers/__init__.py scrapy/core/downloader/handlers/file.py scrapy/core/downloader/handlers/http.py scrapy/core/downloader/handlers/s3.py scrapy/http/__init__.py scrapy/http/common.py scrapy/http/cookies.py scrapy/http/headers.py scrapy/http/request/__init__.py scrapy/http/request/form.py scrapy/http/request/rpc.py scrapy/http/response/__init__.py scrapy/http/response/dammit.py scrapy/http/response/html.py scrapy/http/response/text.py scrapy/http/response/xml.py scrapy/selector/__init__.py scrapy/selector/document.py scrapy/selector/dummysel.py scrapy/selector/factories.py scrapy/selector/libxml2sel.py scrapy/selector/list.py scrapy/selector/lxmlsel.py scrapy/settings/__init__.py scrapy/settings/default_settings.py scrapy/templates/project/scrapy.cfg scrapy/templates/project/module/__init__.py scrapy/templates/project/module/items.py.tmpl scrapy/templates/project/module/pipelines.py.tmpl scrapy/templates/project/module/settings.py.tmpl scrapy/templates/project/module/spiders/__init__.py scrapy/templates/spiders/basic.tmpl scrapy/templates/spiders/crawl.tmpl scrapy/templates/spiders/csvfeed.tmpl scrapy/templates/spiders/xmlfeed.tmpl scrapy/tests/__init__.py scrapy/tests/test_clientform.py scrapy/tests/test_command_fetch.py scrapy/tests/test_command_shell.py scrapy/tests/test_command_version.py scrapy/tests/test_commands.py scrapy/tests/test_contrib_exporter.py scrapy/tests/test_contrib_feedexport.py scrapy/tests/test_contrib_linkextractors.py scrapy/tests/test_contrib_loader.py scrapy/tests/test_contrib_spiderstate.py scrapy/tests/test_dependencies.py scrapy/tests/test_downloader_handlers.py scrapy/tests/test_downloadermiddleware_cookies.py scrapy/tests/test_downloadermiddleware_decompression.py scrapy/tests/test_downloadermiddleware_defaultheaders.py scrapy/tests/test_downloadermiddleware_downloadtimeout.py scrapy/tests/test_downloadermiddleware_httpauth.py scrapy/tests/test_downloadermiddleware_httpcache.py scrapy/tests/test_downloadermiddleware_httpcompression.py scrapy/tests/test_downloadermiddleware_httpproxy.py scrapy/tests/test_downloadermiddleware_redirect.py scrapy/tests/test_downloadermiddleware_retry.py scrapy/tests/test_downloadermiddleware_stats.py scrapy/tests/test_downloadermiddleware_useragent.py scrapy/tests/test_dupefilter.py scrapy/tests/test_engine.py scrapy/tests/test_http_cookies.py scrapy/tests/test_http_headers.py scrapy/tests/test_http_request.py scrapy/tests/test_http_response.py scrapy/tests/test_item.py scrapy/tests/test_libxml2.py scrapy/tests/test_link.py scrapy/tests/test_log.py scrapy/tests/test_logformatter.py scrapy/tests/test_mail.py scrapy/tests/test_middleware.py scrapy/tests/test_pipeline_images.py scrapy/tests/test_pipeline_media.py scrapy/tests/test_responsetypes.py scrapy/tests/test_selector.py scrapy/tests/test_selector_dummy.py scrapy/tests/test_selector_libxml2.py scrapy/tests/test_selector_lxml.py scrapy/tests/test_settings.py scrapy/tests/test_spider.py scrapy/tests/test_spidermiddleware_depth.py scrapy/tests/test_spidermiddleware_httperror.py scrapy/tests/test_spidermiddleware_offsite.py scrapy/tests/test_spidermiddleware_referer.py scrapy/tests/test_spidermiddleware_urllength.py scrapy/tests/test_squeue.py scrapy/tests/test_stats.py scrapy/tests/test_urlparse_monkeypatches.py scrapy/tests/test_utils_conf.py scrapy/tests/test_utils_datatypes.py scrapy/tests/test_utils_defer.py scrapy/tests/test_utils_encoding.py scrapy/tests/test_utils_gz.py scrapy/tests/test_utils_http.py scrapy/tests/test_utils_httpobj.py scrapy/tests/test_utils_iterators.py scrapy/tests/test_utils_jsonrpc.py scrapy/tests/test_utils_memory.py scrapy/tests/test_utils_pqueue.py scrapy/tests/test_utils_python.py scrapy/tests/test_utils_queue.py scrapy/tests/test_utils_reqser.py scrapy/tests/test_utils_request.py scrapy/tests/test_utils_response.py scrapy/tests/test_utils_serialize.py scrapy/tests/test_utils_signal.py scrapy/tests/test_utils_sitemap.py scrapy/tests/test_utils_spider.py scrapy/tests/test_utils_template.py scrapy/tests/test_utils_url.py scrapy/tests/test_webclient.py scrapy/tests/sample_data/compressed/feed-sample1.tar scrapy/tests/sample_data/compressed/feed-sample1.xml scrapy/tests/sample_data/compressed/feed-sample1.xml.bz2 scrapy/tests/sample_data/compressed/feed-sample1.xml.gz scrapy/tests/sample_data/compressed/feed-sample1.zip scrapy/tests/sample_data/compressed/html-gzip.bin scrapy/tests/sample_data/compressed/html-rawdeflate.bin scrapy/tests/sample_data/compressed/html-zlibdeflate.bin scrapy/tests/sample_data/compressed/truncated-crc-error.gz scrapy/tests/sample_data/feeds/feed-sample1.xml scrapy/tests/sample_data/feeds/feed-sample2.xml scrapy/tests/sample_data/feeds/feed-sample3.csv scrapy/tests/sample_data/feeds/feed-sample4.csv scrapy/tests/sample_data/feeds/feed-sample5.csv scrapy/tests/sample_data/link_extractor/image_linkextractor.html scrapy/tests/sample_data/link_extractor/linkextractor_latin1.html scrapy/tests/sample_data/link_extractor/linkextractor_noenc.html scrapy/tests/sample_data/link_extractor/sgml_linkextractor.html scrapy/tests/sample_data/test_site/index.html scrapy/tests/sample_data/test_site/item1.html scrapy/tests/sample_data/test_site/item2.html scrapy/tests/test_cmdline/__init__.py scrapy/tests/test_cmdline/extensions.py scrapy/tests/test_cmdline/settings.py scrapy/tests/test_djangoitem/__init__.py scrapy/tests/test_djangoitem/models.py scrapy/tests/test_djangoitem/settings.py scrapy/tests/test_spidermanager/__init__.py scrapy/tests/test_spidermanager/test_spiders/__init__.py scrapy/tests/test_spidermanager/test_spiders/spider0.py scrapy/tests/test_spidermanager/test_spiders/spider1.py scrapy/tests/test_spidermanager/test_spiders/spider2.py scrapy/tests/test_spidermanager/test_spiders/spider3.py scrapy/tests/test_utils_misc/__init__.py scrapy/tests/test_utils_misc/test.egg scrapy/tests/test_utils_misc/test_walk_modules/__init__.py scrapy/tests/test_utils_misc/test_walk_modules/mod1.py scrapy/tests/test_utils_misc/test_walk_modules/mod/__init__.py scrapy/tests/test_utils_misc/test_walk_modules/mod/mod0.py scrapy/utils/__init__.py scrapy/utils/conf.py scrapy/utils/console.py scrapy/utils/datatypes.py scrapy/utils/decorator.py scrapy/utils/defer.py scrapy/utils/deprecate.py scrapy/utils/display.py scrapy/utils/encoding.py scrapy/utils/engine.py scrapy/utils/ftp.py scrapy/utils/gz.py scrapy/utils/http.py scrapy/utils/httpobj.py scrapy/utils/iterators.py scrapy/utils/job.py scrapy/utils/jsonrpc.py scrapy/utils/markup.py scrapy/utils/memory.py scrapy/utils/misc.py scrapy/utils/multipart.py scrapy/utils/ossignal.py scrapy/utils/pqueue.py scrapy/utils/project.py scrapy/utils/py26.py scrapy/utils/py27.py scrapy/utils/python.py scrapy/utils/queue.py scrapy/utils/reactor.py scrapy/utils/reqser.py scrapy/utils/request.py scrapy/utils/response.py scrapy/utils/serialize.py scrapy/utils/signal.py scrapy/utils/sitemap.py scrapy/utils/spider.py scrapy/utils/template.py scrapy/utils/test.py scrapy/utils/testproc.py scrapy/utils/testsite.py scrapy/utils/trackref.py scrapy/utils/txweb.py scrapy/utils/url.py scrapy/xlib/BeautifulSoup.py scrapy/xlib/ClientForm.py scrapy/xlib/__init__.py scrapy/xlib/lsprofcalltree.py scrapy/xlib/ordereddict.py scrapy/xlib/twisted_250_monkeypatches.py scrapy/xlib/urlparse_monkeypatches.py scrapy/xlib/pydispatch/__init__.py scrapy/xlib/pydispatch/dispatcher.py scrapy/xlib/pydispatch/errors.py scrapy/xlib/pydispatch/license.txt scrapy/xlib/pydispatch/robust.py scrapy/xlib/pydispatch/robustapply.py scrapy/xlib/pydispatch/saferef.py scrapyd/__init__.py scrapyd/app.py scrapyd/config.py scrapyd/default_scrapyd.conf scrapyd/eggstorage.py scrapyd/eggutils.py scrapyd/environ.py scrapyd/interfaces.py scrapyd/launcher.py scrapyd/poller.py scrapyd/runner.py scrapyd/scheduler.py scrapyd/script.py scrapyd/spiderqueue.py scrapyd/sqlite.py scrapyd/utils.py scrapyd/webservice.py scrapyd/website.py scrapyd/tests/__init__.py scrapyd/tests/mybot.egg scrapyd/tests/test_dont_load_settings.py scrapyd/tests/test_eggstorage.py scrapyd/tests/test_environ.py scrapyd/tests/test_poller.py scrapyd/tests/test_scheduler.py scrapyd/tests/test_spiderqueue.py scrapyd/tests/test_sqlite.py scrapyd/tests/test_utils.pyScrapy-0.14.4/Scrapy.egg-info/requires.txt0000644000016101777760000000003411754532077020504 0ustar buildbotnogroupTwisted>=2.5 w3lib pyOpenSSLScrapy-0.14.4/Scrapy.egg-info/PKG-INFO0000644000016101777760000000204211754532077017202 0ustar buildbotnogroupMetadata-Version: 1.0 Name: Scrapy Version: 0.14.4 Summary: A high-level Python Screen Scraping framework Home-page: http://scrapy.org Author: Pablo Hoffman Author-email: pablo@pablohoffman.com License: BSD Description: Scrapy is a high level scraping and web crawling framework for writing spiders to crawl and parse web pages for all kinds of purposes, from information retrieval to monitoring or testing web sites. Platform: UNKNOWN Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.5 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Environment :: Console Classifier: Topic :: Software Development :: Libraries :: Application Frameworks Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: Internet :: WWW/HTTP Scrapy-0.14.4/AUTHORS0000600000016101777760000000133611754531743014216 0ustar buildbotnogroupScrapy was brought to life by Shane Evans while hacking a scraping framework prototype for Mydeco (mydeco.com). It soon became maintained, extended and improved by Insophia (insophia.com), with the initial sponsorship of Mydeco to bootstrap the project. Here is the list of the primary authors & contributors: * Pablo Hoffman * Daniel GraУБa * Martin Olveyra * Gabriel GarcУ­a * Michael Cetrulo * Artem Bogomyagkov * Damian Canabal * Andres Moreira * Ismael Carnales * MatУ­as Aguirre * German Hoffmann * Anibal Pacheco * Bruno Deferrari * Shane Evans * Ezequiel Rivero * Patrick Mezard * Rolando Espinoza * Ping Yin * Lucian Ursu * Shuaib Khan * Didier Deshommes * Vikas Dhiman * Jochen Maes * Darian Moody Scrapy-0.14.4/INSTALL0000600000016101777760000000022011754531743014166 0ustar buildbotnogroupFor information about installing Scrapy see: * docs/intro/install.rst (local file) * http://doc.scrapy.org/intro/install.html (online version) Scrapy-0.14.4/LICENSE0000600000016101777760000000276111754531743014156 0ustar buildbotnogroupCopyright (c) Scrapy developers. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Scrapy nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Scrapy-0.14.4/scrapy/0000700000016101777760000000000011754532077014443 5ustar buildbotnogroupScrapy-0.14.4/scrapy/interfaces.py0000600000016101777760000000101511754531743017136 0ustar buildbotnogroupfrom zope.interface import Interface class ISpiderManager(Interface): def create(spider_name, **spider_args): """Returns a new Spider instance for the given spider name, and using the given spider arguments. If the spider name is not found, it must raise a KeyError.""" def list(): """Return a list with the names of all spiders available in the project""" def find_by_request(request): """Returns the list of spiders names that can handle the given request""" Scrapy-0.14.4/scrapy/middleware.py0000600000016101777760000000507311754531743017140 0ustar buildbotnogroupfrom collections import defaultdict from scrapy import log from scrapy.exceptions import NotConfigured from scrapy.utils.misc import load_object from scrapy.utils.defer import process_parallel, process_chain, process_chain_both class MiddlewareManager(object): """Base class for implementing middleware managers""" component_name = 'foo middleware' def __init__(self, *middlewares): self.middlewares = middlewares self.methods = defaultdict(list) for mw in middlewares: self._add_middleware(mw) @classmethod def _get_mwlist_from_settings(cls, settings): raise NotImplementedError @classmethod def from_settings(cls, settings, crawler=None): mwlist = cls._get_mwlist_from_settings(settings) middlewares = [] for clspath in mwlist: try: mwcls = load_object(clspath) if crawler and hasattr(mwcls, 'from_crawler'): mw = mwcls.from_crawler(crawler) elif hasattr(mwcls, 'from_settings'): mw = mwcls.from_settings(settings) else: mw = mwcls() middlewares.append(mw) except NotConfigured, e: if e.args: clsname = clspath.split('.')[-1] log.msg("Disabled %s: %s" % (clsname, e.args[0]), log.WARNING) enabled = [x.__class__.__name__ for x in middlewares] log.msg("Enabled %ss: %s" % (cls.component_name, ", ".join(enabled)), \ level=log.DEBUG) return cls(*middlewares) @classmethod def from_crawler(cls, crawler): return cls.from_settings(crawler.settings, crawler) def _add_middleware(self, mw): if hasattr(mw, 'open_spider'): self.methods['open_spider'].append(mw.open_spider) if hasattr(mw, 'close_spider'): self.methods['close_spider'].insert(0, mw.close_spider) def _process_parallel(self, methodname, obj, *args): return process_parallel(self.methods[methodname], obj, *args) def _process_chain(self, methodname, obj, *args): return process_chain(self.methods[methodname], obj, *args) def _process_chain_both(self, cb_methodname, eb_methodname, obj, *args): return process_chain_both(self.methods[cb_methodname], \ self.methods[eb_methodname], obj, *args) def open_spider(self, spider): return self._process_parallel('open_spider', spider) def close_spider(self, spider): return self._process_parallel('close_spider', spider) Scrapy-0.14.4/scrapy/contrib_exp/0000700000016101777760000000000011754532077016757 5ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib_exp/downloadermiddleware/0000700000016101777760000000000011754532077023153 5ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib_exp/downloadermiddleware/decompression.py0000600000016101777760000000472411754531743026407 0ustar buildbotnogroup""" This module implements the DecompressionMiddleware which tries to recognise and extract the potentially compressed responses that may arrive. """ import bz2 import gzip import zipfile import tarfile from cStringIO import StringIO from tempfile import mktemp from scrapy import log from scrapy.http import Response from scrapy.responsetypes import responsetypes class DecompressionMiddleware(object): """ This middleware tries to recognise and extract the possibly compressed responses that may arrive. """ def __init__(self): self._formats = { 'tar': self._is_tar, 'zip': self._is_zip, 'gz': self._is_gzip, 'bz2': self._is_bzip2 } def _is_tar(self, response): archive = StringIO(response.body) try: tar_file = tarfile.open(name=mktemp(), fileobj=archive) except tarfile.ReadError: return body = tar_file.extractfile(tar_file.members[0]).read() respcls = responsetypes.from_args(filename=tar_file.members[0].name, body=body) return response.replace(body=body, cls=respcls) def _is_zip(self, response): archive = StringIO(response.body) try: zip_file = zipfile.ZipFile(archive) except zipfile.BadZipfile: return namelist = zip_file.namelist() body = zip_file.read(namelist[0]) respcls = responsetypes.from_args(filename=namelist[0], body=body) return response.replace(body=body, cls=respcls) def _is_gzip(self, response): archive = StringIO(response.body) try: body = gzip.GzipFile(fileobj=archive).read() except IOError: return respcls = responsetypes.from_args(body=body) return response.replace(body=body, cls=respcls) def _is_bzip2(self, response): try: body = bz2.decompress(response.body) except IOError: return respcls = responsetypes.from_args(body=body) return response.replace(body=body, cls=respcls) def process_response(self, request, response, spider): if not response.body: return response for fmt, func in self._formats.iteritems(): new_response = func(response) if new_response: log.msg('Decompressed response with format: %s' % \ fmt, log.DEBUG, spider=spider) return new_response return response Scrapy-0.14.4/scrapy/contrib_exp/downloadermiddleware/__init__.py0000600000016101777760000000000011754531743025253 0ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib_exp/djangoitem.py0000600000016101777760000000173611754531743021462 0ustar buildbotnogroupfrom scrapy.item import Field, Item, ItemMeta class DjangoItemMeta(ItemMeta): def __new__(mcs, class_name, bases, attrs): cls = super(DjangoItemMeta, mcs).__new__(mcs, class_name, bases, attrs) cls.fields = cls.fields.copy() if cls.django_model: cls._model_fields = [] cls._model_meta = cls.django_model._meta for model_field in cls._model_meta.fields: if model_field.auto_created == False: if model_field.name not in cls.fields: cls.fields[model_field.name] = Field() cls._model_fields.append(model_field.name) return cls class DjangoItem(Item): __metaclass__ = DjangoItemMeta django_model = None def save(self, commit=True): modelargs = dict((f, self.get(f, None)) for f in self._model_fields) model = self.django_model(**modelargs) if commit: model.save() return model Scrapy-0.14.4/scrapy/contrib_exp/__init__.py0000600000016101777760000000044411754531743021073 0ustar buildbotnogroup""" This module contains experimental code that may go into scrapy.contrib in the future, but it's not yet stable enough to go there (either API stable or functionality stable). Subscribe to Scrapy developers mailing list or join the IRC channel if you want to discuss about this code. """ Scrapy-0.14.4/scrapy/contrib_exp/iterators.py0000600000016101777760000000254711754531743021356 0ustar buildbotnogroupfrom scrapy.http import Response from scrapy.selector import XmlXPathSelector def xmliter_lxml(obj, nodename, namespace=None): from lxml import etree reader = _StreamReader(obj) tag = '{%s}%s' % (namespace, nodename) if namespace else nodename iterable = etree.iterparse(reader, tag=tag, encoding=reader.encoding) selxpath = '//' + ('x:%s' % nodename if namespace else nodename) for _, node in iterable: nodetext = etree.tostring(node) node.clear() xs = XmlXPathSelector(text=nodetext) if namespace: xs.register_namespace('x', namespace) yield xs.select(selxpath)[0] class _StreamReader(object): def __init__(self, obj): self._ptr = 0 if isinstance(obj, Response): self._text, self.encoding = obj.body, obj.encoding else: self._text, self.encoding = obj, 'utf-8' self._is_unicode = isinstance(self._text, unicode) def read(self, n=65535): self.read = self._read_unicode if self._is_unicode else self._read_string return self.read(n).lstrip() def _read_string(self, n=65535): s, e = self._ptr, self._ptr + n self._ptr = e return self._text[s:e] def _read_unicode(self, n=65535): s, e = self._ptr, self._ptr + n self._ptr = e return self._text[s:e].encode('utf-8') Scrapy-0.14.4/scrapy/linkextractor.py0000600000016101777760000000112211754531743017703 0ustar buildbotnogroup""" Common code and definitions used by Link extractors (located in scrapy.contrib.linkextractor). """ # common file extensions that are not followed if they occur in links IGNORED_EXTENSIONS = [ # images 'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif', 'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg', # audio 'mp3', 'wma', 'ogg', 'wav', 'ra', 'aac', 'mid', 'au', 'aiff', # video '3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv', 'm4a', # other 'css', 'pdf', 'doc', 'exe', 'bin', 'rss', 'zip', 'rar', ] Scrapy-0.14.4/scrapy/http/0000700000016101777760000000000011754532077015422 5ustar buildbotnogroupScrapy-0.14.4/scrapy/http/cookies.py0000600000016101777760000000624411754531743017437 0ustar buildbotnogroupfrom cookielib import CookieJar as _CookieJar, DefaultCookiePolicy from scrapy.utils.httpobj import urlparse_cached class CookieJar(object): def __init__(self, policy=None): self.jar = _CookieJar(policy or DefaultCookiePolicy()) self.jar._cookies_lock = _DummyLock() def extract_cookies(self, response, request): wreq = WrappedRequest(request) wrsp = WrappedResponse(response) return self.jar.extract_cookies(wrsp, wreq) def add_cookie_header(self, request): wreq = WrappedRequest(request) self.jar.add_cookie_header(wreq) @property def _cookies(self): return self.jar._cookies def clear_session_cookies(self, *args, **kwargs): return self.jar.clear_session_cookies(*args, **kwargs) def clear(self): return self.jar.clear() def __iter__(self): return iter(self.jar) def __len__(self): return len(self.jar) def set_policy(self, pol): return self.jar.set_policy(pol) def make_cookies(self, response, request): wreq = WrappedRequest(request) wrsp = WrappedResponse(response) return self.jar.make_cookies(wrsp, wreq) def set_cookie(self, cookie): self.jar.set_cookie(cookie) def set_cookie_if_ok(self, cookie, request): self.jar.set_cookie_if_ok(cookie, WrappedRequest(request)) class _DummyLock(object): def acquire(self): pass def release(self): pass class WrappedRequest(object): """Wraps a scrapy Request class with methods defined by urllib2.Request class to interact with CookieJar class see http://docs.python.org/library/urllib2.html#urllib2.Request """ def __init__(self, request): self.request = request def get_full_url(self): return self.request.url def get_host(self): return urlparse_cached(self.request).netloc def get_type(self): return urlparse_cached(self.request).scheme def is_unverifiable(self): """Unverifiable should indicate whether the request is unverifiable, as defined by RFC 2965. It defaults to False. An unverifiable request is one whose URL the user did not have the option to approve. For example, if the request is for an image in an HTML document, and the user had no option to approve the automatic fetching of the image, this should be true. """ return self.request.meta.get('is_unverifiable', False) def get_origin_req_host(self): return urlparse_cached(self.request).hostname def has_header(self, name): return name in self.request.headers def get_header(self, name, default=None): return self.request.headers.get(name, default) def header_items(self): return self.request.headers.items() def add_unredirected_header(self, name, value): self.request.headers.appendlist(name, value) #print 'add_unredirected_header', self.request.headers class WrappedResponse(object): def __init__(self, response): self.response = response def info(self): return self def getheaders(self, name): return self.response.headers.getlist(name) Scrapy-0.14.4/scrapy/http/response/0000700000016101777760000000000011754532077017260 5ustar buildbotnogroupScrapy-0.14.4/scrapy/http/response/xml.py0000600000016101777760000000134711754531743020440 0ustar buildbotnogroup""" This module implements the XmlResponse class which adds encoding discovering through XML encoding declarations to the TextResponse class. See documentation in docs/topics/request-response.rst """ import re from scrapy.http.response.text import TextResponse from scrapy.utils.python import memoizemethod_noargs class XmlResponse(TextResponse): _template = r'''%s\s*=\s*["']?\s*%s\s*["']?''' _encoding_re = _template % ('encoding', r'(?P[\w-]+)') XMLDECL_RE = re.compile(r'<\?xml\s.*?%s' % _encoding_re, re.I) @memoizemethod_noargs def _body_declared_encoding(self): chunk = self.body[:5000] match = self.XMLDECL_RE.search(chunk) return match.group('charset') if match else None Scrapy-0.14.4/scrapy/http/response/dammit.py0000600000016101777760000002651111754531743021113 0ustar buildbotnogroup""" This module contains a fork of the UnicodeDammit class from BeautifulSoup, that expliclty disabled any usage of chardet library. The UnicodeDammit class is used as a last resource for detecting the encoding of a response. """ import re import codecs chardet = None # we don't want to use chardet since it's very slow, class UnicodeDammit: """A class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.""" # This dictionary maps commonly seen values for "charset" in HTML # meta tags to the corresponding Python codec names. It only covers # values that aren't in Python's aliases and can't be determined # by the heuristics in find_codec. CHARSET_ALIASES = { "macintosh" : "mac-roman", "x-sjis" : "shift-jis" } def __init__(self, markup, overrideEncodings=[], smartQuotesTo='xml', isHTML=False): self.declaredHTMLEncoding = None self.markup, documentEncoding, sniffedEncoding = \ self._detectEncoding(markup, isHTML) self.smartQuotesTo = smartQuotesTo self.triedEncodings = [] if markup == '' or isinstance(markup, unicode): self.originalEncoding = None self.unicode = unicode(markup) return u = None for proposedEncoding in overrideEncodings: u = self._convertFrom(proposedEncoding) if u: break if not u: for proposedEncoding in (documentEncoding, sniffedEncoding): u = self._convertFrom(proposedEncoding) if u: break # If no luck and we have auto-detection library, try that: if not u and chardet and not isinstance(self.markup, unicode): u = self._convertFrom(chardet.detect(self.markup)['encoding']) # As a last resort, try utf-8 and windows-1252: if not u: for proposed_encoding in ("utf-8", "windows-1252"): u = self._convertFrom(proposed_encoding) if u: break self.unicode = u if not u: self.originalEncoding = None def _subMSChar(self, orig): """Changes a MS smart quote character to an XML or HTML entity.""" sub = self.MS_CHARS.get(orig) if isinstance(sub, tuple): if self.smartQuotesTo == 'xml': sub = '&#x%s;' % sub[1] else: sub = '&%s;' % sub[0] return sub def _convertFrom(self, proposed): proposed = self.find_codec(proposed) if not proposed or proposed in self.triedEncodings: return None self.triedEncodings.append(proposed) markup = self.markup # Convert smart quotes to HTML if coming from an encoding # that might have them. if self.smartQuotesTo and proposed.lower() in("windows-1252", "iso-8859-1", "iso-8859-2"): markup = re.compile("([\x80-\x9f])").sub \ (lambda(x): self._subMSChar(x.group(1)), markup) try: # print "Trying to convert document to %s" % proposed u = self._toUnicode(markup, proposed) self.markup = u self.originalEncoding = proposed except Exception, e: # print "That didn't work!" # print e return None #print "Correct encoding: %s" % proposed return self.markup def _toUnicode(self, data, encoding): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) return newdata def _detectEncoding(self, xml_data, isHTML=False): """Given a document, tries to detect its XML encoding.""" xml_encoding = sniffed_xml_encoding = None try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = self._ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \ and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \ (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: sniffed_xml_encoding = 'ascii' pass except: xml_encoding_match = None xml_encoding_match = re.compile( '^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) if not xml_encoding_match and isHTML: regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I) xml_encoding_match = regexp.search(xml_data) if xml_encoding_match is not None: xml_encoding = xml_encoding_match.groups()[0].lower() if isHTML: self.declaredHTMLEncoding = xml_encoding if sniffed_xml_encoding and \ (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding return xml_data, xml_encoding, sniffed_xml_encoding def find_codec(self, charset): return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \ or (charset and self._codec(charset.replace("-", ""))) \ or (charset and self._codec(charset.replace("-", "_"))) \ or charset def _codec(self, charset): if not charset: return charset codec = None try: codecs.lookup(charset) codec = charset except (LookupError, ValueError): pass return codec EBCDIC_TO_ASCII_MAP = None def _ebcdic_to_ascii(self, s): c = self.__class__ if not c.EBCDIC_TO_ASCII_MAP: emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200, 201,202,106,107,108,109,110,111,112,113,114,203,204,205, 206,207,208,209,126,115,116,117,118,119,120,121,122,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72, 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81, 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89, 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57, 250,251,252,253,254,255) import string c.EBCDIC_TO_ASCII_MAP = string.maketrans( \ ''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(c.EBCDIC_TO_ASCII_MAP) MS_CHARS = { '\x80' : ('euro', '20AC'), '\x81' : ' ', '\x82' : ('sbquo', '201A'), '\x83' : ('fnof', '192'), '\x84' : ('bdquo', '201E'), '\x85' : ('hellip', '2026'), '\x86' : ('dagger', '2020'), '\x87' : ('Dagger', '2021'), '\x88' : ('circ', '2C6'), '\x89' : ('permil', '2030'), '\x8A' : ('Scaron', '160'), '\x8B' : ('lsaquo', '2039'), '\x8C' : ('OElig', '152'), '\x8D' : '?', '\x8E' : ('#x17D', '17D'), '\x8F' : '?', '\x90' : '?', '\x91' : ('lsquo', '2018'), '\x92' : ('rsquo', '2019'), '\x93' : ('ldquo', '201C'), '\x94' : ('rdquo', '201D'), '\x95' : ('bull', '2022'), '\x96' : ('ndash', '2013'), '\x97' : ('mdash', '2014'), '\x98' : ('tilde', '2DC'), '\x99' : ('trade', '2122'), '\x9a' : ('scaron', '161'), '\x9b' : ('rsaquo', '203A'), '\x9c' : ('oelig', '153'), '\x9d' : '?', '\x9e' : ('#x17E', '17E'), '\x9f' : ('Yuml', ''),} ####################################################################### Scrapy-0.14.4/scrapy/http/response/html.py0000600000016101777760000000201211754531743020572 0ustar buildbotnogroup""" This module implements the HtmlResponse class which adds encoding discovering through HTML encoding declarations to the TextResponse class. See documentation in docs/topics/request-response.rst """ import re from scrapy.http.response.text import TextResponse from scrapy.utils.python import memoizemethod_noargs class HtmlResponse(TextResponse): _template = r'''%s\s*=\s*["']?\s*%s\s*["']?''' _httpequiv_re = _template % ('http-equiv', 'Content-Type') _content_re = _template % ('content', r'(?P[^;]+);\s*charset=(?P[\w-]+)') _content2_re = _template % ('charset', r'(?P[\w-]+)') METATAG_RE = re.compile(r'" % (self.status, self.url) __repr__ = __str__ def copy(self): """Return a copy of this Response""" return self.replace() def replace(self, *args, **kwargs): """Create a new Response with the same attributes except for those given new values. """ for x in ['url', 'status', 'headers', 'body', 'request', 'flags']: kwargs.setdefault(x, getattr(self, x)) cls = kwargs.pop('cls', self.__class__) return cls(*args, **kwargs) Scrapy-0.14.4/scrapy/http/response/text.py0000600000016101777760000000704711754531743020627 0ustar buildbotnogroup""" This module implements the TextResponse class which adds encoding handling and discovering (through HTTP headers) to base Response class. See documentation in docs/topics/request-response.rst """ import re import codecs from scrapy.http.response.dammit import UnicodeDammit from scrapy.http.response import Response from scrapy.utils.python import memoizemethod_noargs from scrapy.utils.encoding import encoding_exists, resolve_encoding from scrapy.conf import settings # Python decoder doesn't follow unicode standard when handling # bad utf-8 encoded strings. see http://bugs.python.org/issue8271 codecs.register_error('scrapy_replace', lambda exc: (u'\ufffd', exc.start+1)) class TextResponse(Response): _DEFAULT_ENCODING = settings['DEFAULT_RESPONSE_ENCODING'] _ENCODING_RE = re.compile(r'charset=([\w-]+)', re.I) def __init__(self, *args, **kwargs): self._encoding = kwargs.pop('encoding', None) self._cached_benc = None self._cached_ubody = None super(TextResponse, self).__init__(*args, **kwargs) def _set_url(self, url): if isinstance(url, unicode): if self.encoding is None: raise TypeError('Cannot convert unicode url - %s has no encoding' % type(self).__name__) self._url = url.encode(self.encoding) else: super(TextResponse, self)._set_url(url) def _set_body(self, body): self._body = '' if isinstance(body, unicode): if self.encoding is None: raise TypeError('Cannot convert unicode body - %s has no encoding' % type(self).__name__) self._body = body.encode(self._encoding) else: super(TextResponse, self)._set_body(body) def replace(self, *args, **kwargs): kwargs.setdefault('encoding', self.encoding) return Response.replace(self, *args, **kwargs) @property def encoding(self): return self._get_encoding(infer=True) def _get_encoding(self, infer=False): enc = self._declared_encoding() if enc and not encoding_exists(enc): enc = None if not enc and infer: enc = self._body_inferred_encoding() if not enc: enc = self._DEFAULT_ENCODING return resolve_encoding(enc) def _declared_encoding(self): return self._encoding or self._headers_encoding() \ or self._body_declared_encoding() def body_as_unicode(self): """Return body as unicode""" if self._cached_ubody is None: self._cached_ubody = self.body.decode(self.encoding, 'scrapy_replace') return self._cached_ubody @memoizemethod_noargs def _headers_encoding(self): content_type = self.headers.get('Content-Type') if content_type: m = self._ENCODING_RE.search(content_type) if m: encoding = m.group(1) if encoding_exists(encoding): return encoding def _body_inferred_encoding(self): if self._cached_benc is None: enc = self._get_encoding() dammit = UnicodeDammit(self.body, [enc]) benc = dammit.originalEncoding self._cached_benc = benc # UnicodeDammit is buggy decoding utf-16 if self._cached_ubody is None and benc != 'utf-16': self._cached_ubody = dammit.unicode return self._cached_benc def _body_declared_encoding(self): # implemented in subclasses (XmlResponse, HtmlResponse) return None Scrapy-0.14.4/scrapy/http/headers.py0000600000016101777760000000366311754531743017420 0ustar buildbotnogroupfrom w3lib.http import headers_dict_to_raw from scrapy.utils.datatypes import CaselessDict class Headers(CaselessDict): """Case insensitive http headers dictionary""" def __init__(self, seq=None, encoding='utf-8'): self.encoding = encoding super(Headers, self).__init__(seq) def normkey(self, key): """Headers must not be unicode""" if isinstance(key, unicode): return key.title().encode(self.encoding) return key.title() def normvalue(self, value): """Headers must not be unicode""" if not hasattr(value, '__iter__'): value = [value] return [x.encode(self.encoding) if isinstance(x, unicode) else x \ for x in value] def __getitem__(self, key): try: return super(Headers, self).__getitem__(key)[-1] except IndexError: return None def get(self, key, def_val=None): try: return super(Headers, self).get(key, def_val)[-1] except IndexError: return None def getlist(self, key, def_val=None): try: return super(Headers, self).__getitem__(key) except KeyError: if def_val is not None: return self.normvalue(def_val) return [] def setlist(self, key, list_): self[key] = list_ def setlistdefault(self, key, default_list=()): return self.setdefault(key, default_list) def appendlist(self, key, value): lst = self.getlist(key) lst.extend(self.normvalue(value)) self[key] = lst def items(self): return list(self.iteritems()) def iteritems(self): return ((k, self.getlist(k)) for k in self.keys()) def values(self): return [self[k] for k in self.keys()] def to_string(self): return headers_dict_to_raw(self) def __copy__(self): return self.__class__(self) copy = __copy__ Scrapy-0.14.4/scrapy/http/__init__.py0000600000016101777760000000104111754531743017530 0ustar buildbotnogroup""" Module containing all HTTP related classes Use this module (instead of the more specific ones) when importing Headers, Request and Response outside this module. """ from scrapy.http.headers import Headers from scrapy.http.request import Request from scrapy.http.request.form import FormRequest from scrapy.http.request.rpc import XmlRpcRequest from scrapy.http.response import Response from scrapy.http.response.html import HtmlResponse from scrapy.http.response.xml import XmlResponse from scrapy.http.response.text import TextResponse Scrapy-0.14.4/scrapy/http/request/0000700000016101777760000000000011754532077017112 5ustar buildbotnogroupScrapy-0.14.4/scrapy/http/request/rpc.py0000600000016101777760000000206511754531743020254 0ustar buildbotnogroup""" This module implements the XmlRpcRequest class which is a more convenient class (that Request) to generate xml-rpc requests. See documentation in docs/topics/request-response.rst """ import xmlrpclib from scrapy.http.request import Request from scrapy.utils.python import get_func_args DUMPS_ARGS = get_func_args(xmlrpclib.dumps) class XmlRpcRequest(Request): def __init__(self, *args, **kwargs): encoding = kwargs.get('encoding', None) if 'body' not in kwargs and 'params' in kwargs: kw = dict((k, kwargs.pop(k)) for k in DUMPS_ARGS if k in kwargs) kwargs['body'] = xmlrpclib.dumps(**kw) # spec defines that requests must use POST method kwargs.setdefault('method', 'POST') # xmlrpc query multiples times over the same url kwargs.setdefault('dont_filter', True) # restore encoding if encoding is not None: kwargs['encoding'] = encoding super(XmlRpcRequest, self).__init__(*args, **kwargs) self.headers.setdefault('Content-Type', 'text/xml') Scrapy-0.14.4/scrapy/http/request/form.py0000600000016101777760000000532211754531743020432 0ustar buildbotnogroup""" This module implements the FormRequest class which is a more covenient class (than Request) to generate Requests based on form data. See documentation in docs/topics/request-response.rst """ import urllib from cStringIO import StringIO from scrapy.xlib.ClientForm import ParseFile from scrapy.http.request import Request from scrapy.utils.python import unicode_to_str def _unicode_to_str(string, encoding): if hasattr(string, '__iter__'): return [unicode_to_str(k, encoding) for k in string] else: return unicode_to_str(string, encoding) class FormRequest(Request): def __init__(self, *args, **kwargs): formdata = kwargs.pop('formdata', None) super(FormRequest, self).__init__(*args, **kwargs) if formdata: items = formdata.iteritems() if isinstance(formdata, dict) else formdata query = [(unicode_to_str(k, self.encoding), _unicode_to_str(v, self.encoding)) for k, v in items] self.method = 'POST' self._set_body(urllib.urlencode(query, doseq=1)) self.headers['Content-Type'] = 'application/x-www-form-urlencoded' @classmethod def from_response(cls, response, formname=None, formnumber=0, formdata=None, clickdata=None, dont_click=False, **kwargs): encoding = getattr(response, 'encoding', 'utf-8') forms = ParseFile(StringIO(response.body), response.url, encoding=encoding, backwards_compat=False) if not forms: raise ValueError("No
element found in %s" % response) form = None if formname: for f in forms: if f.name == formname: form = f break if not form: try: form = forms[formnumber] except IndexError: raise IndexError("Form number %d not found in %s" % (formnumber, response)) if formdata: # remove all existing fields with the same name before, so that # formdata fields properly can properly override existing ones, # which is the desired behaviour form.controls = [c for c in form.controls if c.name not in formdata] for k, v in formdata.iteritems(): for v2 in v if hasattr(v, '__iter__') else [v]: form.new_control('text', k, {'value': v2}) if dont_click: url, body, headers = form._switch_click('request_data') else: url, body, headers = form.click_request_data(**(clickdata or {})) kwargs.setdefault('headers', {}).update(headers) return cls(url, method=form.method, body=body, **kwargs) Scrapy-0.14.4/scrapy/http/request/__init__.py0000600000016101777760000000662511754531743021235 0ustar buildbotnogroup""" This module implements the Request class which is used to represent HTTP requests in Scrapy. See documentation in docs/topics/request-response.rst """ import copy from w3lib.url import safe_url_string from scrapy.http.headers import Headers from scrapy.utils.trackref import object_ref from scrapy.utils.decorator import deprecated from scrapy.utils.url import escape_ajax from scrapy.http.common import deprecated_setter class Request(object_ref): def __init__(self, url, callback=None, method='GET', headers=None, body=None, cookies=None, meta=None, encoding='utf-8', priority=0, dont_filter=False, errback=None): self._encoding = encoding # this one has to be set first self.method = str(method).upper() self._set_url(url) self._set_body(body) assert isinstance(priority, int), "Request priority not an integer: %r" % priority self.priority = priority assert callback or not errback, "Cannot use errback without a callback" self.callback = callback self.errback = errback self.cookies = cookies or {} self.headers = Headers(headers or {}, encoding=encoding) self.dont_filter = dont_filter self._meta = dict(meta) if meta else None @property def meta(self): if self._meta is None: self._meta = {} return self._meta def _get_url(self): return self._url def _set_url(self, url): if isinstance(url, str): self._url = escape_ajax(safe_url_string(url)) elif isinstance(url, unicode): if self.encoding is None: raise TypeError('Cannot convert unicode url - %s has no encoding' % type(self).__name__) self._set_url(url.encode(self.encoding)) else: raise TypeError('Request url must be str or unicode, got %s:' % type(url).__name__) if ':' not in self._url: raise ValueError('Missing scheme in request url: %s' % self._url) url = property(_get_url, deprecated_setter(_set_url, 'url')) def _get_body(self): return self._body def _set_body(self, body): if isinstance(body, str): self._body = body elif isinstance(body, unicode): if self.encoding is None: raise TypeError('Cannot convert unicode body - %s has no encoding' % type(self).__name__) self._body = body.encode(self.encoding) elif body is None: self._body = '' else: raise TypeError("Request body must either str or unicode. Got: '%s'" % type(body).__name__) body = property(_get_body, deprecated_setter(_set_body, 'body')) @property def encoding(self): return self._encoding def __str__(self): return "<%s %s>" % (self.method, self.url) __repr__ = __str__ def copy(self): """Return a copy of this Request""" return self.replace() def replace(self, *args, **kwargs): """Create a new Request with the same attributes except for those given new values. """ for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta', \ 'encoding', 'priority', 'dont_filter', 'callback', 'errback']: kwargs.setdefault(x, getattr(self, x)) cls = kwargs.pop('cls', self.__class__) return cls(*args, **kwargs) Scrapy-0.14.4/scrapy/http/common.py0000600000016101777760000000060611754531743017267 0ustar buildbotnogroupimport warnings from scrapy.exceptions import ScrapyDeprecationWarning def deprecated_setter(setter, attrname): def newsetter(self, value): c = self.__class__.__name__ warnings.warn("Don't modify %s.%s attribute, use %s.replace() instead" % \ (c, attrname, c), ScrapyDeprecationWarning, stacklevel=2) return setter(self, value) return newsetter Scrapy-0.14.4/scrapy/log.py0000600000016101777760000000775211754531743015612 0ustar buildbotnogroup""" Scrapy logging facility See documentation in docs/topics/logging.rst """ import sys import logging import warnings from twisted.python import log import scrapy from scrapy.conf import settings from scrapy.utils.python import unicode_to_str from scrapy.utils.misc import load_object from scrapy.exceptions import ScrapyDeprecationWarning # Logging levels DEBUG = logging.DEBUG INFO = logging.INFO WARNING = logging.WARNING ERROR = logging.ERROR CRITICAL = logging.CRITICAL SILENT = CRITICAL + 1 level_names = { logging.DEBUG: "DEBUG", logging.INFO: "INFO", logging.WARNING: "WARNING", logging.ERROR: "ERROR", logging.CRITICAL: "CRITICAL", SILENT: "SILENT", } started = False class ScrapyFileLogObserver(log.FileLogObserver): def __init__(self, f, level=INFO, encoding='utf-8'): self.level = level self.encoding = encoding log.FileLogObserver.__init__(self, f) def emit(self, eventDict): ev = _adapt_eventdict(eventDict, self.level, self.encoding) if ev is not None: log.FileLogObserver.emit(self, ev) def _adapt_eventdict(eventDict, log_level=INFO, encoding='utf-8', prepend_level=True): """Adapt Twisted log eventDict making it suitable for logging with a Scrapy log observer. It may return None to indicate that the event should be ignored by a Scrapy log observer. `log_level` is the minimum level being logged, and `encoding` is the log encoding. """ ev = eventDict.copy() if ev['isError']: ev.setdefault('logLevel', ERROR) # ignore non-error messages from outside scrapy if ev.get('system') != 'scrapy' and not ev['isError']: return level = ev.get('logLevel') if level < log_level: return spider = ev.get('spider') if spider: ev['system'] = spider.name message = ev.get('message') lvlname = level_names.get(level, 'NOLEVEL') if message: message = [unicode_to_str(x, encoding) for x in message] if prepend_level: message[0] = "%s: %s" % (lvlname, message[0]) ev['message'] = message why = ev.get('why') if why: why = unicode_to_str(why, encoding) if prepend_level: why = "%s: %s" % (lvlname, why) ev['why'] = why return ev def _get_log_level(level_name_or_id=None): if level_name_or_id is None: lvlname = settings['LOG_LEVEL'] return globals()[lvlname] elif isinstance(level_name_or_id, int): return level_name_or_id elif isinstance(level_name_or_id, basestring): return globals()[level_name_or_id] else: raise ValueError("Unknown log level: %r" % level_name_or_id) def start(logfile=None, loglevel=None, logstdout=None): global started if started or not settings.getbool('LOG_ENABLED'): return started = True if log.defaultObserver: # check twisted log not already started loglevel = _get_log_level(loglevel) logfile = logfile or settings['LOG_FILE'] file = open(logfile, 'a') if logfile else sys.stderr if logstdout is None: logstdout = settings.getbool('LOG_STDOUT') sflo = ScrapyFileLogObserver(file, loglevel, settings['LOG_ENCODING']) _oldshowwarning = warnings.showwarning log.startLoggingWithObserver(sflo.emit, setStdout=logstdout) # restore warnings, wrongly silenced by Twisted warnings.showwarning = _oldshowwarning msg("Scrapy %s started (bot: %s)" % (scrapy.__version__, \ settings['BOT_NAME'])) def msg(message, level=INFO, **kw): if 'component' in kw: warnings.warn("Argument `component` of scrapy.log.msg() is deprecated", \ ScrapyDeprecationWarning, stacklevel=2) kw.setdefault('system', 'scrapy') kw['logLevel'] = level log.msg(message, **kw) def err(_stuff=None, _why=None, **kw): kw.setdefault('system', 'scrapy') kw['logLevel'] = kw.pop('level', ERROR) log.err(_stuff, _why, **kw) formatter = load_object(settings['LOG_FORMATTER'])() Scrapy-0.14.4/scrapy/item.py0000600000016101777760000000366011754531743015761 0ustar buildbotnogroup""" Scrapy Item See documentation in docs/topics/item.rst """ from pprint import pformat from UserDict import DictMixin from scrapy.utils.trackref import object_ref class BaseItem(object_ref): """Base class for all scraped items.""" pass class Field(dict): """Container of field metadata""" class ItemMeta(type): def __new__(mcs, class_name, bases, attrs): fields = {} new_attrs = {} for n, v in attrs.iteritems(): if isinstance(v, Field): fields[n] = v else: new_attrs[n] = v cls = type.__new__(mcs, class_name, bases, new_attrs) cls.fields = cls.fields.copy() cls.fields.update(fields) return cls class DictItem(DictMixin, BaseItem): fields = {} def __init__(self, *args, **kwargs): self._values = {} if args or kwargs: # avoid creating dict for most common case for k, v in dict(*args, **kwargs).iteritems(): self[k] = v def __getitem__(self, key): return self._values[key] def __setitem__(self, key, value): if key in self.fields: self._values[key] = value else: raise KeyError("%s does not support field: %s" % \ (self.__class__.__name__, key)) def __delitem__(self, key): del self._values[key] def __getattr__(self, name): if name in self.fields: raise AttributeError("Use item[%r] to get field value" % name) raise AttributeError(name) def __setattr__(self, name, value): if not name.startswith('_'): raise AttributeError("Use item[%r] = %r to set field value" % \ (name, value)) super(DictItem, self).__setattr__(name, value) def keys(self): return self._values.keys() def __repr__(self): return pformat(dict(self)) class Item(DictItem): __metaclass__ = ItemMeta Scrapy-0.14.4/scrapy/commands/0000700000016101777760000000000011754532077016244 5ustar buildbotnogroupScrapy-0.14.4/scrapy/commands/genspider.py0000600000016101777760000001044611754531743020604 0ustar buildbotnogroupimport os import shutil import string from os.path import join, dirname, abspath, exists, splitext import scrapy from scrapy.command import ScrapyCommand from scrapy.conf import settings from scrapy.utils.template import render_templatefile, string_camelcase from scrapy.exceptions import UsageError def sanitize_module_name(module_name): """Sanitize the given module name, by replacing dashes and points with underscores and prefixing it with a letter if it doesn't start with one """ module_name = module_name.replace('-', '_').replace('.', '_') if module_name[0] not in string.ascii_letters: module_name = "a" + module_name return module_name _templates_base_dir = settings['TEMPLATES_DIR'] or join(scrapy.__path__[0], \ 'templates') class Command(ScrapyCommand): requires_project = True default_settings = {'LOG_ENABLED': False} templates_dir = join(_templates_base_dir, 'spiders') def syntax(self): return "[options] " def short_desc(self): return "Generate new spider using pre-defined templates" def add_options(self, parser): ScrapyCommand.add_options(self, parser) parser.add_option("-l", "--list", dest="list", action="store_true", help="List available templates") parser.add_option("-e", "--edit", dest="edit", action="store_true", help="Edit spider after creating it") parser.add_option("-d", "--dump", dest="dump", metavar="TEMPLATE", help="Dump template to standard output") parser.add_option("-t", "--template", dest="template", default="crawl", help="Uses a custom template.") parser.add_option("--force", dest="force", action="store_true", help="If the spider already exists, overwrite it with the template") def run(self, args, opts): if opts.list: self._list_templates() return if opts.dump: template_file = self._find_template(opts.dump) if template_file: print open(template_file, 'r').read() return if len(args) != 2: raise UsageError() name, domain = args[0:2] module = sanitize_module_name(name) try: spider = self.crawler.spiders.create(name) except KeyError: pass else: # if spider already exists and not --force then halt if not opts.force: print "Spider %r already exists in module:" % name print " %s" % spider.__module__ return template_file = self._find_template(opts.template) if template_file: self._genspider(module, name, domain, opts.template, template_file) if opts.edit: self.exitcode = os.system('scrapy edit "%s"' % name) def _genspider(self, module, name, domain, template_name, template_file): """Generate the spider module, based on the given template""" tvars = { 'project_name': settings.get('BOT_NAME'), 'ProjectName': string_camelcase(settings.get('BOT_NAME')), 'module': module, 'name': name, 'domain': domain, 'classname': '%sSpider' % ''.join([s.capitalize() \ for s in module.split('_')]) } spiders_module = __import__(settings['NEWSPIDER_MODULE'], {}, {}, ['']) spiders_dir = abspath(dirname(spiders_module.__file__)) spider_file = "%s.py" % join(spiders_dir, module) shutil.copyfile(template_file, spider_file) render_templatefile(spider_file, **tvars) print "Created spider %r using template %r in module:" % (name, \ template_name) print " %s.%s" % (spiders_module.__name__, module) def _find_template(self, template): template_file = join(self.templates_dir, '%s.tmpl' % template) if exists(template_file): return template_file print "Unable to find template: %s\n" % template print 'Use "scrapy genspider --list" to see all available templates.' def _list_templates(self): print "Available templates:" for filename in sorted(os.listdir(self.templates_dir)): if filename.endswith('.tmpl'): print " %s" % splitext(filename)[0] Scrapy-0.14.4/scrapy/commands/parse.py0000600000016101777760000001057711754531743017743 0ustar buildbotnogroupfrom w3lib.url import is_url from scrapy.command import ScrapyCommand from scrapy.http import Request from scrapy.item import BaseItem from scrapy.utils import display from scrapy.utils.spider import iterate_spider_output, create_spider_for_request from scrapy.exceptions import UsageError from scrapy import log class Command(ScrapyCommand): requires_project = True def syntax(self): return "[options] " def short_desc(self): return "Parse URL (using its spider) and print the results" def add_options(self, parser): ScrapyCommand.add_options(self, parser) parser.add_option("--spider", dest="spider", default=None, \ help="use this spider without looking for one") parser.add_option("--nolinks", dest="nolinks", action="store_true", \ help="don't show links to follow (extracted requests)") parser.add_option("--noitems", dest="noitems", action="store_true", \ help="don't show scraped items") parser.add_option("--nocolour", dest="nocolour", action="store_true", \ help="avoid using pygments to colorize the output") parser.add_option("-r", "--rules", dest="rules", action="store_true", \ help="use CrawlSpider rules to discover the callback") parser.add_option("-c", "--callback", dest="callback", \ help="use this callback for parsing, instead looking for a callback") def pipeline_process(self, item, spider, opts): return item def run_callback(self, spider, response, callback, opts): cb = callback if callable(callback) else getattr(spider, callback, None) if not cb: log.msg('Cannot find callback %r in spider: %s' % (callback, spider.name)) return (), () items, requests = [], [] for x in iterate_spider_output(cb(response)): if isinstance(x, BaseItem): items.append(x) elif isinstance(x, Request): requests.append(x) return items, requests def get_callback_from_rules(self, spider, response): if getattr(spider, 'rules', None): for rule in spider.rules: if rule.link_extractor.matches(response.url) and rule.callback: return rule.callback else: log.msg("No CrawlSpider rules found in spider %r, please specify " "a callback to use for parsing" % spider.name, log.ERROR) def print_results(self, items, requests, cb_name, opts): if not opts.noitems: print "# Scraped Items - callback: %s" % cb_name, "-"*60 display.pprint([dict(x) for x in items], colorize=not opts.nocolour) if not opts.nolinks: print "# Requests - callback: %s" % cb_name, "-"*68 display.pprint(requests, colorize=not opts.nocolour) def get_spider(self, request, opts): if opts.spider: try: return self.crawler.spiders.create(opts.spider) except KeyError: log.msg('Unable to find spider: %s' % opts.spider, log.ERROR) else: spider = create_spider_for_request(self.crawler.spiders, request) if spider: return spider log.msg('Unable to find spider for: %s' % request, log.ERROR) def get_response_and_spider(self, url, opts): responses = [] # to collect downloaded responses request = Request(url, callback=responses.append) spider = self.get_spider(request, opts) if not spider: return None, None self.crawler.crawl(spider, [request]) self.crawler.start() if not responses: log.msg('No response downloaded for: %s' % request, log.ERROR, \ spider=spider) return None, None return responses[0], spider def run(self, args, opts): if not len(args) == 1 or not is_url(args[0]): raise UsageError() response, spider = self.get_response_and_spider(args[0], opts) if not response: return callback = None if opts.callback: callback = opts.callback elif opts.rules: callback = self.get_callback_from_rules(spider, response) items, requests = self.run_callback(spider, response, callback or 'parse', \ opts) self.print_results(items, requests, callback, opts) Scrapy-0.14.4/scrapy/commands/edit.py0000600000016101777760000000170711754531743017551 0ustar buildbotnogroupimport sys, os from scrapy.command import ScrapyCommand from scrapy.exceptions import UsageError class Command(ScrapyCommand): requires_project = True default_settings = {'LOG_ENABLED': False} def syntax(self): return "" def short_desc(self): return "Edit spider" def long_desc(self): return "Edit a spider using the editor defined in EDITOR setting" def _err(self, msg): sys.stderr.write(msg + os.linesep) self.exitcode = 1 def run(self, args, opts): if len(args) != 1: raise UsageError() editor = self.crawler.settings['EDITOR'] try: spider = self.crawler.spiders.create(args[0]) except KeyError: return self._err("Spider not found: %s" % args[0]) sfile = sys.modules[spider.__module__].__file__ sfile = sfile.replace('.pyc', '.py') self.exitcode = os.system('%s "%s"' % (editor, sfile)) Scrapy-0.14.4/scrapy/commands/list.py0000600000016101777760000000047011754531743017573 0ustar buildbotnogroupfrom scrapy.command import ScrapyCommand class Command(ScrapyCommand): requires_project = True default_settings = {'LOG_ENABLED': False} def short_desc(self): return "List available spiders" def run(self, args, opts): for s in self.crawler.spiders.list(): print s Scrapy-0.14.4/scrapy/commands/settings.py0000600000016101777760000000266411754531743020467 0ustar buildbotnogroupfrom scrapy.command import ScrapyCommand class Command(ScrapyCommand): requires_project = False default_settings = {'LOG_ENABLED': False} def syntax(self): return "[options]" def short_desc(self): return "Get settings values" def add_options(self, parser): ScrapyCommand.add_options(self, parser) parser.add_option("--get", dest="get", metavar="SETTING", \ help="print raw setting value") parser.add_option("--getbool", dest="getbool", metavar="SETTING", \ help="print setting value, intepreted as a boolean") parser.add_option("--getint", dest="getint", metavar="SETTING", \ help="print setting value, intepreted as an integer") parser.add_option("--getfloat", dest="getfloat", metavar="SETTING", \ help="print setting value, intepreted as an float") parser.add_option("--getlist", dest="getlist", metavar="SETTING", \ help="print setting value, intepreted as an float") def run(self, args, opts): settings = self.crawler.settings if opts.get: print settings.get(opts.get) elif opts.getbool: print settings.getbool(opts.getbool) elif opts.getint: print settings.getint(opts.getint) elif opts.getfloat: print settings.getfloat(opts.getfloat) elif opts.getlist: print settings.getlist(opts.getlist) Scrapy-0.14.4/scrapy/commands/version.py0000600000016101777760000000152211754531743020304 0ustar buildbotnogroupimport sys import platform import twisted import scrapy from scrapy.command import ScrapyCommand class Command(ScrapyCommand): def syntax(self): return "[-v]" def short_desc(self): return "Print Scrapy version" def add_options(self, parser): ScrapyCommand.add_options(self, parser) parser.add_option("--verbose", "-v", dest="verbose", action="store_true", help="also display twisted/python/platform info (useful for bug reports)") def run(self, args, opts): if opts.verbose: print "Scrapy : %s" % scrapy.__version__ print "Twisted : %s" % twisted.version.short() print "Python : %s" % sys.version.replace("\n", "- ") print "Platform: %s" % platform.platform() else: print "Scrapy %s" % scrapy.__version__ Scrapy-0.14.4/scrapy/commands/server.py0000600000016101777760000000120111754531743020117 0ustar buildbotnogroupfrom __future__ import absolute_import from scrapy.command import ScrapyCommand from scrapy.exceptions import UsageError class Command(ScrapyCommand): requires_project = True def short_desc(self): return "Start Scrapyd server for this project" def long_desc(self): return "Start Scrapyd server for this project, which can be referred " \ "from the JSON API with the name 'default'" def run(self, args, opts): try: from scrapyd.script import execute execute() except ImportError: raise UsageError("Scrapyd is not available in this system") Scrapy-0.14.4/scrapy/commands/startproject.py0000600000016101777760000000334711754531743021352 0ustar buildbotnogroupimport sys import string import re import shutil from os.path import join, exists import scrapy from scrapy.command import ScrapyCommand from scrapy.utils.template import render_templatefile, string_camelcase from scrapy.utils.py26 import ignore_patterns, copytree from scrapy.exceptions import UsageError TEMPLATES_PATH = join(scrapy.__path__[0], 'templates', 'project') TEMPLATES_TO_RENDER = ( ('scrapy.cfg',), ('${project_name}', 'settings.py.tmpl'), ('${project_name}', 'items.py.tmpl'), ('${project_name}', 'pipelines.py.tmpl'), ) IGNORE = ignore_patterns('*.pyc', '.svn') class Command(ScrapyCommand): requires_project = False def syntax(self): return "" def short_desc(self): return "Create new project" def run(self, args, opts): if len(args) != 1: raise UsageError() project_name = args[0] if not re.search(r'^[_a-zA-Z]\w*$', project_name): print 'Error: Project names must begin with a letter and contain only\n' \ 'letters, numbers and underscores' sys.exit(1) elif exists(project_name): print "Error: directory %r already exists" % project_name sys.exit(1) moduletpl = join(TEMPLATES_PATH, 'module') copytree(moduletpl, join(project_name, project_name), ignore=IGNORE) shutil.copy(join(TEMPLATES_PATH, 'scrapy.cfg'), project_name) for paths in TEMPLATES_TO_RENDER: path = join(*paths) tplfile = join(project_name, string.Template(path).substitute(project_name=project_name)) render_templatefile(tplfile, project_name=project_name, ProjectName=string_camelcase(project_name)) Scrapy-0.14.4/scrapy/commands/deploy.py0000600000016101777760000001553111754531743020120 0ustar buildbotnogroupfrom __future__ import with_statement import sys import os import glob import tempfile import shutil import time import urllib2 import netrc from urlparse import urlparse, urljoin from subprocess import Popen, PIPE, check_call from w3lib.form import encode_multipart from scrapy.command import ScrapyCommand from scrapy.exceptions import UsageError from scrapy.utils.py26 import json from scrapy.utils.http import basic_auth_header from scrapy.utils.conf import get_config, closest_scrapy_cfg _SETUP_PY_TEMPLATE = \ """# Automatically created by: scrapy deploy from setuptools import setup, find_packages setup( name = 'project', version = '1.0', packages = find_packages(), entry_points = {'scrapy': ['settings = %(settings)s']}, ) """ class Command(ScrapyCommand): requires_project = True def syntax(self): return "[options] [ [target] | -l | -L ]" def short_desc(self): return "Deploy project in Scrapyd target" def long_desc(self): return "Deploy the current project into the given Scrapyd server " \ "(known as target)" def add_options(self, parser): ScrapyCommand.add_options(self, parser) parser.add_option("-p", "--project", help="the project name in the target") parser.add_option("-v", "--version", help="the version to deploy. Defaults to current timestamp") parser.add_option("-l", "--list-targets", action="store_true", \ help="list available targets") parser.add_option("-L", "--list-projects", metavar="TARGET", \ help="list available projects on TARGET") parser.add_option("--egg", metavar="FILE", help="use the given egg, instead of building it") parser.add_option("--build-egg", metavar="FILE", help="only build the egg, don't deploy it") def run(self, args, opts): try: import setuptools except ImportError: raise UsageError("setuptools not installed") if opts.list_targets: for name, target in _get_targets().items(): print "%-20s %s" % (name, target['url']) return if opts.list_projects: target = _get_target(opts.list_projects) req = urllib2.Request(_url(target, 'listprojects.json')) _add_auth_header(req, target) f = urllib2.urlopen(req) projects = json.loads(f.read())['projects'] print os.linesep.join(projects) return tmpdir = None if opts.build_egg: # build egg only egg, tmpdir = _build_egg() _log("Writing egg to %s" % opts.build_egg) shutil.copyfile(egg, opts.build_egg) else: # buld egg and deploy target_name = _get_target_name(args) target = _get_target(target_name) project = _get_project(target, opts) version = _get_version(target, opts) if opts.egg: _log("Using egg: %s" % opts.egg) egg = opts.egg else: _log("Building egg of %s-%s" % (project, version)) egg, tmpdir = _build_egg() _upload_egg(target, egg, project, version) if tmpdir: shutil.rmtree(tmpdir) def _log(message): sys.stderr.write(message + os.linesep) def _get_target_name(args): if len(args) > 1: raise UsageError("Too many arguments: %s" % ' '.join(args)) elif args: return args[0] elif len(args) < 1: return 'default' def _get_project(target, opts): project = opts.project or target.get('project') if not project: raise UsageError("Missing project") return project def _get_option(section, option, default=None): cfg = get_config() return cfg.get(section, option) if cfg.has_option(section, option) \ else default def _get_targets(): cfg = get_config() baset = dict(cfg.items('deploy')) if cfg.has_section('deploy') else {} targets = {} if 'url' in baset: targets['default'] = baset for x in cfg.sections(): if x.startswith('deploy:'): t = baset.copy() t.update(cfg.items(x)) targets[x[7:]] = t return targets def _get_target(name): try: return _get_targets()[name] except KeyError: raise UsageError("Unknown target: %s" % name) def _url(target, action): return urljoin(target['url'], action) def _get_version(target, opts): version = opts.version or target.get('version') if version == 'HG': p = Popen(['hg', 'tip', '--template', '{rev}'], stdout=PIPE) return 'r%s' % p.communicate()[0] elif version == 'GIT': p = Popen(['git', 'rev-parse', 'HEAD'], stdout=PIPE) return '%s' % p.communicate()[0].strip('\n') elif version: return version else: return str(int(time.time())) def _upload_egg(target, eggpath, project, version): with open(eggpath, 'rb') as f: eggdata = f.read() data = { 'project': project, 'version': version, 'egg': ('project.egg', eggdata), } body, boundary = encode_multipart(data) url = _url(target, 'addversion.json') headers = { 'Content-Type': 'multipart/form-data; boundary=%s' % boundary, 'Content-Length': str(len(body)), } req = urllib2.Request(url, body, headers) _add_auth_header(req, target) _log("Deploying %s-%s to %s" % (project, version, url)) _http_post(req) def _add_auth_header(request, target): if 'username' in target: u, p = target.get('username'), target.get('password', '') request.add_header('Authorization', basic_auth_header(u, p)) else: # try netrc try: host = urlparse(target['url']).hostname a = netrc.netrc().authenticators(host) request.add_header('Authorization', basic_auth_header(a[0], a[2])) except (netrc.NetrcParseError, IOError, TypeError): pass def _http_post(request): try: f = urllib2.urlopen(request) _log("Server response (%s):" % f.code) print f.read() except urllib2.HTTPError, e: _log("Deploy failed (%s):" % e.code) print e.read() except urllib2.URLError, e: _log("Deploy failed: %s" % e) def _build_egg(): closest = closest_scrapy_cfg() os.chdir(os.path.dirname(closest)) if not os.path.exists('setup.py'): settings = get_config().get('settings', 'default') _create_default_setup_py(settings=settings) d = tempfile.mkdtemp() f = tempfile.TemporaryFile(dir=d) check_call([sys.executable, 'setup.py', 'clean', '-a', 'bdist_egg', '-d', d], stdout=f) egg = glob.glob(os.path.join(d, '*.egg'))[0] return egg, d def _create_default_setup_py(**kwargs): with open('setup.py', 'w') as f: f.write(_SETUP_PY_TEMPLATE % kwargs) Scrapy-0.14.4/scrapy/commands/fetch.py0000600000016101777760000000371211754531743017713 0ustar buildbotnogroupfrom w3lib.url import is_url from scrapy.command import ScrapyCommand from scrapy.http import Request from scrapy.spider import BaseSpider from scrapy.exceptions import UsageError from scrapy.utils.spider import create_spider_for_request class Command(ScrapyCommand): requires_project = False def syntax(self): return "[options] " def short_desc(self): return "Fetch a URL using the Scrapy downloader" def long_desc(self): return "Fetch a URL using the Scrapy downloader and print its content " \ "to stdout. You may want to use --nolog to disable logging" def add_options(self, parser): ScrapyCommand.add_options(self, parser) parser.add_option("--spider", dest="spider", help="use this spider") parser.add_option("--headers", dest="headers", action="store_true", \ help="print response HTTP headers instead of body") def _print_headers(self, headers, prefix): for key, values in headers.items(): for value in values: print '%s %s: %s' % (prefix, key, value) def _print_response(self, response, opts): if opts.headers: self._print_headers(response.request.headers, '>') print '>' self._print_headers(response.headers, '<') else: print response.body def run(self, args, opts): if len(args) != 1 or not is_url(args[0]): raise UsageError() cb = lambda x: self._print_response(x, opts) request = Request(args[0], callback=cb, dont_filter=True) request.meta['handle_httpstatus_all'] = True spider = None if opts.spider: spider = self.crawler.spiders.create(opts.spider) else: spider = create_spider_for_request(self.crawler.spiders, request, \ default_spider=BaseSpider('default')) self.crawler.crawl(spider, [request]) self.crawler.start() Scrapy-0.14.4/scrapy/commands/runspider.py0000600000016101777760000000412411754531743020633 0ustar buildbotnogroupimport sys import os from scrapy.utils.spider import iter_spider_classes from scrapy.command import ScrapyCommand from scrapy.exceptions import UsageError from scrapy.utils.conf import arglist_to_dict def _import_file(filepath): abspath = os.path.abspath(filepath) dirname, file = os.path.split(abspath) fname, fext = os.path.splitext(file) if fext != '.py': raise ValueError("Not a Python source file: %s" % abspath) if dirname: sys.path = [dirname] + sys.path try: module = __import__(fname, {}, {}, ['']) finally: if dirname: sys.path.pop(0) return module class Command(ScrapyCommand): requires_project = False def syntax(self): return "[options] " def short_desc(self): return "Run a self-contained spider (without creating a project)" def long_desc(self): return "Run the spider defined in the given file" def add_options(self, parser): ScrapyCommand.add_options(self, parser) parser.add_option("-a", dest="spargs", action="append", default=[], metavar="NAME=VALUE", \ help="set spider argument (may be repeated)") def process_options(self, args, opts): ScrapyCommand.process_options(self, args, opts) try: opts.spargs = arglist_to_dict(opts.spargs) except ValueError: raise UsageError("Invalid -a value, use -a NAME=VALUE", print_help=False) def run(self, args, opts): if len(args) != 1: raise UsageError() filename = args[0] if not os.path.exists(filename): raise UsageError("File not found: %s\n" % filename) try: module = _import_file(filename) except (ImportError, ValueError), e: raise UsageError("Unable to load %r: %s\n" % (filename, e)) spclasses = list(iter_spider_classes(module)) if not spclasses: raise UsageError("No spider found in file: %s\n" % filename) spider = spclasses.pop()(**opts.spargs) self.crawler.crawl(spider) self.crawler.start() Scrapy-0.14.4/scrapy/commands/crawl.py0000600000016101777760000000343411754531743017733 0ustar buildbotnogroupfrom scrapy.command import ScrapyCommand from scrapy.utils.conf import arglist_to_dict from scrapy.exceptions import UsageError class Command(ScrapyCommand): requires_project = True def syntax(self): return "[options] " def short_desc(self): return "Start crawling from a spider or URL" def add_options(self, parser): ScrapyCommand.add_options(self, parser) parser.add_option("-a", dest="spargs", action="append", default=[], metavar="NAME=VALUE", \ help="set spider argument (may be repeated)") parser.add_option("-o", "--output", metavar="FILE", \ help="dump scraped items into FILE (use - for stdout)") parser.add_option("-t", "--output-format", metavar="FORMAT", default="jsonlines", \ help="format to use for dumping items with -o (default: %default)") def process_options(self, args, opts): ScrapyCommand.process_options(self, args, opts) try: opts.spargs = arglist_to_dict(opts.spargs) except ValueError: raise UsageError("Invalid -a value, use -a NAME=VALUE", print_help=False) if opts.output: if opts.output == '-': self.settings.overrides['FEED_URI'] = 'stdout:' else: self.settings.overrides['FEED_URI'] = opts.output self.settings.overrides['FEED_FORMAT'] = opts.output_format def run(self, args, opts): if len(args) < 1: raise UsageError() elif len(args) > 1: raise UsageError("running 'scrapy crawl' with more than one spider is no longer supported") for spname in args: spider = self.crawler.spiders.create(spname, **opts.spargs) self.crawler.crawl(spider) self.crawler.start() Scrapy-0.14.4/scrapy/commands/__init__.py0000600000016101777760000000000011754531743020344 0ustar buildbotnogroupScrapy-0.14.4/scrapy/commands/view.py0000600000016101777760000000116511754531743017574 0ustar buildbotnogroupfrom scrapy.command import ScrapyCommand from scrapy.commands import fetch from scrapy.utils.response import open_in_browser class Command(fetch.Command): def short_desc(self): return "Open URL in browser, as seen by Scrapy" def long_desc(self): return "Fetch a URL using the Scrapy downloader and show its " \ "contents in a browser" def add_options(self, parser): ScrapyCommand.add_options(self, parser) parser.add_option("--spider", dest="spider", help="use this spider") def _print_response(self, response, opts): open_in_browser(response) Scrapy-0.14.4/scrapy/commands/shell.py0000600000016101777760000000241011754531743017723 0ustar buildbotnogroup""" Scrapy Shell See documentation in docs/topics/shell.rst """ from scrapy.command import ScrapyCommand from scrapy.shell import Shell from scrapy import log class Command(ScrapyCommand): requires_project = False default_settings = {'KEEP_ALIVE': True, 'LOGSTATS_INTERVAL': 0} def syntax(self): return "[url|file]" def short_desc(self): return "Interactive scraping console" def long_desc(self): return "Interactive console for scraping the given url" def add_options(self, parser): ScrapyCommand.add_options(self, parser) parser.add_option("-c", dest="code", help="evaluate the code in the shell, print the result and exit") def update_vars(self, vars): """You can use this function to update the Scrapy objects that will be available in the shell """ pass def run(self, args, opts): url = args[0] if args else None shell = Shell(self.crawler, update_vars=self.update_vars, inthread=True, \ code=opts.code) def err(f): log.err(f, "Shell error") self.exitcode = 1 d = shell.start(url=url) d.addErrback(err) d.addBoth(lambda _: self.crawler.stop()) self.crawler.start() Scrapy-0.14.4/scrapy/dupefilter.py0000600000016101777760000000217411754531743017165 0ustar buildbotnogroupimport os from scrapy.utils.request import request_fingerprint from scrapy.utils.job import job_dir class BaseDupeFilter(object): @classmethod def from_settings(cls, settings): return cls() def request_seen(self, request): return False def open(self): # can return deferred pass def close(self, reason): # can return a deferred pass class RFPDupeFilter(BaseDupeFilter): """Request Fingerprint duplicates filter""" def __init__(self, path=None): self.file = None self.fingerprints = set() if path: self.file = open(os.path.join(path, 'requests.seen'), 'a+') self.fingerprints.update(x.rstrip() for x in self.file) @classmethod def from_settings(cls, settings): return cls(job_dir(settings)) def request_seen(self, request): fp = request_fingerprint(request) if fp in self.fingerprints: return True self.fingerprints.add(fp) if self.file: self.file.write(fp + os.linesep) def close(self, reason): if self.file: self.file.close() Scrapy-0.14.4/scrapy/project.py0000600000016101777760000000047311754531743016470 0ustar buildbotnogroup""" --------- WARNING: THIS MODULE IS DEPRECATED ----------- This module is deprecated. If you want to get the Scrapy crawler from your extension, middleware or pipeline implement the `from_crawler` class method. For example: @classmethod def from_crawler(cls, crawler): return cls(crawler) """ Scrapy-0.14.4/scrapy/mime.types0000600000016101777760000004765011754531743016475 0ustar buildbotnogroup############################################################################### # # MIME-TYPES and the extensions that represent them # ############################################################################### application/activemessage application/andrew-inset ez application/annodex anx application/applefile application/atom+xml atom application/atomcat+xml atomcat application/atomserv+xml atomsrv application/atomicmail application/batch-SMTP application/beep+xml application/bbolin lin application/cals-1840 application/cap cap pcap application/commonground application/cu-seeme cu application/cybercash application/davmount+xml davmount application/dca-rft application/dec-dx application/docbook+xml application/dsptype tsp application/dvcs application/ecmascript es application/edi-consent application/edi-x12 application/edifact application/eshop application/font-tdpfr application/futuresplash spl application/ghostview application/hta hta application/http application/hyperstudio application/iges application/index application/index.cmd application/index.obj application/index.response application/index.vnd application/iotp application/ipp application/isup application/java-archive jar application/java-serialized-object ser application/java-vm class application/javascript js application/m3g m3g application/mac-binhex40 hqx application/mac-compactpro cpt application/macwriteii application/marc application/mathematica nb nbp application/ms-tnef application/msaccess mdb application/msword doc dot application/news-message-id application/news-transmission application/ocsp-request application/ocsp-response application/octet-stream bin application/oda oda application/ogg ogx application/parityfec application/pdf pdf application/pgp-encrypted application/pgp-keys key application/pgp-signature pgp application/pics-rules prf application/pkcs10 application/pkcs7-mime application/pkcs7-signature application/pkix-cert application/pkix-crl application/pkixcmp application/postscript ps ai eps espi epsf eps2 eps3 application/prs.alvestrand.titrax-sheet application/prs.cww application/prs.nprend application/qsig application/rar rar application/rdf+xml rdf application/remote-printing application/riscos application/rss+xml rss application/rtf rtf application/sdp application/set-payment application/set-payment-initiation application/set-registration application/set-registration-initiation application/sgml application/sgml-open-catalog application/sieve application/slate application/smil smi smil application/timestamp-query application/timestamp-reply application/vemmi application/whoispp-query application/whoispp-response application/wita application/x400-bp application/xhtml+xml xhtml xht application/xml xml xsl xsd application/xml-dtd application/xml-external-parsed-entity application/xspf+xml xspf application/zip zip application/vnd.3M.Post-it-Notes application/vnd.accpac.simply.aso application/vnd.accpac.simply.imp application/vnd.acucobol application/vnd.aether.imp application/vnd.anser-web-certificate-issue-initiation application/vnd.anser-web-funds-transfer-initiation application/vnd.audiograph application/vnd.bmi application/vnd.businessobjects application/vnd.canon-cpdl application/vnd.canon-lips application/vnd.cinderella cdy application/vnd.claymore application/vnd.commerce-battelle application/vnd.commonspace application/vnd.comsocaller application/vnd.contact.cmsg application/vnd.cosmocaller application/vnd.ctc-posml application/vnd.cups-postscript application/vnd.cups-raster application/vnd.cups-raw application/vnd.cybank application/vnd.dna application/vnd.dpgraph application/vnd.dxr application/vnd.ecdis-update application/vnd.ecowin.chart application/vnd.ecowin.filerequest application/vnd.ecowin.fileupdate application/vnd.ecowin.series application/vnd.ecowin.seriesrequest application/vnd.ecowin.seriesupdate application/vnd.enliven application/vnd.epson.esf application/vnd.epson.msf application/vnd.epson.quickanime application/vnd.epson.salt application/vnd.epson.ssf application/vnd.ericsson.quickcall application/vnd.eudora.data application/vnd.fdf application/vnd.ffsns application/vnd.flographit application/vnd.framemaker application/vnd.fsc.weblaunch application/vnd.fujitsu.oasys application/vnd.fujitsu.oasys2 application/vnd.fujitsu.oasys3 application/vnd.fujitsu.oasysgp application/vnd.fujitsu.oasysprs application/vnd.fujixerox.ddd application/vnd.fujixerox.docuworks application/vnd.fujixerox.docuworks.binder application/vnd.fut-misnet application/vnd.google-earth.kml+xml kml application/vnd.google-earth.kmz kmz application/vnd.grafeq application/vnd.groove-account application/vnd.groove-identity-message application/vnd.groove-injector application/vnd.groove-tool-message application/vnd.groove-tool-template application/vnd.groove-vcard application/vnd.hhe.lesson-player application/vnd.hp-HPGL application/vnd.hp-PCL application/vnd.hp-PCLXL application/vnd.hp-hpid application/vnd.hp-hps application/vnd.httphone application/vnd.hzn-3d-crossword application/vnd.ibm.MiniPay application/vnd.ibm.afplinedata application/vnd.ibm.modcap application/vnd.informix-visionary application/vnd.intercon.formnet application/vnd.intertrust.digibox application/vnd.intertrust.nncp application/vnd.intu.qbo application/vnd.intu.qfx application/vnd.irepository.package+xml application/vnd.is-xpr application/vnd.japannet-directory-service application/vnd.japannet-jpnstore-wakeup application/vnd.japannet-payment-wakeup application/vnd.japannet-registration application/vnd.japannet-registration-wakeup application/vnd.japannet-setstore-wakeup application/vnd.japannet-verification application/vnd.japannet-verification-wakeup application/vnd.koan application/vnd.lotus-1-2-3 application/vnd.lotus-approach application/vnd.lotus-freelance application/vnd.lotus-notes application/vnd.lotus-organizer application/vnd.lotus-screencam application/vnd.lotus-wordpro application/vnd.mcd application/vnd.mediastation.cdkey application/vnd.meridian-slingshot application/vnd.mif application/vnd.minisoft-hp3000-save application/vnd.mitsubishi.misty-guard.trustweb application/vnd.mobius.daf application/vnd.mobius.dis application/vnd.mobius.msl application/vnd.mobius.plc application/vnd.mobius.txf application/vnd.motorola.flexsuite application/vnd.motorola.flexsuite.adsi application/vnd.motorola.flexsuite.fis application/vnd.motorola.flexsuite.gotap application/vnd.motorola.flexsuite.kmr application/vnd.motorola.flexsuite.ttc application/vnd.motorola.flexsuite.wem application/vnd.mozilla.xul+xml xul application/vnd.ms-artgalry application/vnd.ms-asf application/vnd.ms-excel xls xlb xlt application/vnd.ms-lrm application/vnd.ms-pki.seccat cat application/vnd.ms-pki.stl stl application/vnd.ms-powerpoint ppt pps application/vnd.ms-project application/vnd.ms-tnef application/vnd.ms-works application/vnd.mseq application/vnd.msign application/vnd.music-niff application/vnd.musician application/vnd.netfpx application/vnd.noblenet-directory application/vnd.noblenet-sealer application/vnd.noblenet-web application/vnd.novadigm.EDM application/vnd.novadigm.EDX application/vnd.novadigm.EXT application/vnd.oasis.opendocument.chart odc application/vnd.oasis.opendocument.database odb application/vnd.oasis.opendocument.formula odf application/vnd.oasis.opendocument.graphics odg application/vnd.oasis.opendocument.graphics-template otg application/vnd.oasis.opendocument.image odi application/vnd.oasis.opendocument.presentation odp application/vnd.oasis.opendocument.presentation-template otp application/vnd.oasis.opendocument.spreadsheet ods application/vnd.oasis.opendocument.spreadsheet-template ots application/vnd.oasis.opendocument.text odt application/vnd.oasis.opendocument.text-master odm application/vnd.oasis.opendocument.text-template ott application/vnd.oasis.opendocument.text-web oth application/vnd.osa.netdeploy application/vnd.palm application/vnd.pg.format application/vnd.pg.osasli application/vnd.powerbuilder6 application/vnd.powerbuilder6-s application/vnd.powerbuilder7 application/vnd.powerbuilder7-s application/vnd.powerbuilder75 application/vnd.powerbuilder75-s application/vnd.previewsystems.box application/vnd.publishare-delta-tree application/vnd.pvi.ptid1 application/vnd.pwg-xhtml-print+xml application/vnd.rapid application/vnd.rim.cod cod application/vnd.s3sms application/vnd.seemail application/vnd.shana.informed.formdata application/vnd.shana.informed.formtemplate application/vnd.shana.informed.interchange application/vnd.shana.informed.package application/vnd.smaf mmf application/vnd.sss-cod application/vnd.sss-dtf application/vnd.sss-ntf application/vnd.stardivision.calc sdc application/vnd.stardivision.chart sds application/vnd.stardivision.draw sda application/vnd.stardivision.impress sdd application/vnd.stardivision.math sdf application/vnd.stardivision.writer sdw application/vnd.stardivision.writer-global sgl application/vnd.street-stream application/vnd.sun.xml.calc sxc application/vnd.sun.xml.calc.template stc application/vnd.sun.xml.draw sxd application/vnd.sun.xml.draw.template std application/vnd.sun.xml.impress sxi application/vnd.sun.xml.impress.template sti application/vnd.sun.xml.math sxm application/vnd.sun.xml.writer sxw application/vnd.sun.xml.writer.global sxg application/vnd.sun.xml.writer.template stw application/vnd.svd application/vnd.swiftview-ics application/vnd.symbian.install sis application/vnd.triscape.mxs application/vnd.trueapp application/vnd.truedoc application/vnd.tve-trigger application/vnd.ufdl application/vnd.uplanet.alert application/vnd.uplanet.alert-wbxml application/vnd.uplanet.bearer-choice application/vnd.uplanet.bearer-choice-wbxml application/vnd.uplanet.cacheop application/vnd.uplanet.cacheop-wbxml application/vnd.uplanet.channel application/vnd.uplanet.channel-wbxml application/vnd.uplanet.list application/vnd.uplanet.list-wbxml application/vnd.uplanet.listcmd application/vnd.uplanet.listcmd-wbxml application/vnd.uplanet.signal application/vnd.vcx application/vnd.vectorworks application/vnd.vidsoft.vidconference application/vnd.visio vsd application/vnd.vividence.scriptfile application/vnd.wap.sic application/vnd.wap.slc application/vnd.wap.wbxml wbxml application/vnd.wap.wmlc wmlc application/vnd.wap.wmlscriptc wmlsc application/vnd.webturbo application/vnd.wordperfect wpd application/vnd.wordperfect5.1 wp5 application/vnd.wrq-hp3000-labelled application/vnd.wt.stf application/vnd.xara application/vnd.xfdl application/vnd.yellowriver-custom-menu application/x-123 wk application/x-7z-compressed 7z application/x-abiword abw application/x-apple-diskimage dmg application/x-bcpio bcpio application/x-bittorrent torrent application/x-cab cab application/x-cbr cbr application/x-cbz cbz application/x-cdf cdf cda application/x-cdlink vcd application/x-chess-pgn pgn application/x-core application/x-cpio cpio application/x-csh csh application/x-debian-package deb udeb application/x-director dcr dir dxr application/x-dms dms application/x-doom wad application/x-dvi dvi application/x-httpd-eruby rhtml application/x-executable application/x-font pfa pfb gsf pcf pcf.Z application/x-freemind mm application/x-futuresplash spl application/x-gnumeric gnumeric application/x-go-sgf sgf application/x-graphing-calculator gcf application/x-gtar gtar tgz taz application/x-hdf hdf application/x-httpd-php phtml pht php application/x-httpd-php-source phps application/x-httpd-php3 php3 application/x-httpd-php3-preprocessed php3p application/x-httpd-php4 php4 application/x-ica ica application/x-info info application/x-internet-signup ins isp application/x-iphone iii application/x-iso9660-image iso application/x-jam jam application/x-java-applet application/x-java-bean application/x-java-jnlp-file jnlp application/x-jmol jmz application/x-kchart chrt application/x-kdelnk application/x-killustrator kil application/x-koan skp skd skt skm application/x-kpresenter kpr kpt application/x-kspread ksp application/x-kword kwd kwt application/x-latex latex application/x-lha lha application/x-lyx lyx application/x-lzh lzh application/x-lzx lzx application/x-maker frm maker frame fm fb book fbdoc application/x-mif mif application/x-ms-wmd wmd application/x-ms-wmz wmz application/x-msdos-program com exe bat dll application/x-msi msi application/x-netcdf nc application/x-ns-proxy-autoconfig pac dat application/x-nwc nwc application/x-object o application/x-oz-application oza application/x-pkcs7-certreqresp p7r application/x-pkcs7-crl crl application/x-python-code pyc pyo application/x-qgis qgs shp shx application/x-quicktimeplayer qtl application/x-redhat-package-manager rpm application/x-ruby rb application/x-rx application/x-sh sh application/x-shar shar application/x-shellscript application/x-shockwave-flash swf swfl application/x-stuffit sit sitx application/x-sv4cpio sv4cpio application/x-sv4crc sv4crc application/x-tar tar application/x-tcl tcl application/x-tex-gf gf application/x-tex-pk pk application/x-texinfo texinfo texi application/x-trash ~ % bak old sik application/x-troff t tr roff application/x-troff-man man application/x-troff-me me application/x-troff-ms ms application/x-ustar ustar application/x-videolan application/x-wais-source src application/x-wingz wz application/x-x509-ca-cert crt application/x-xcf xcf application/x-xfig fig application/x-xpinstall xpi audio/32kadpcm audio/3gpp audio/amr amr audio/amr-wb awb audio/amr amr audio/amr-wb awb audio/annodex axa audio/basic au snd audio/flac flac audio/g.722.1 audio/l16 audio/midi mid midi kar audio/mp4a-latm audio/mpa-robust audio/mpeg mpga mpega mp2 mp3 m4a audio/mpegurl m3u audio/ogg oga ogg spx audio/parityfec audio/prs.sid sid audio/telephone-event audio/tone audio/vnd.cisco.nse audio/vnd.cns.anp1 audio/vnd.cns.inf1 audio/vnd.digital-winds audio/vnd.everad.plj audio/vnd.lucent.voice audio/vnd.nortel.vbk audio/vnd.nuera.ecelp4800 audio/vnd.nuera.ecelp7470 audio/vnd.nuera.ecelp9600 audio/vnd.octel.sbc audio/vnd.qcelp audio/vnd.rhetorex.32kadpcm audio/vnd.vmx.cvsd audio/x-aiff aif aiff aifc audio/x-gsm gsm audio/x-mpegurl m3u audio/x-ms-wma wma audio/x-ms-wax wax audio/x-pn-realaudio-plugin audio/x-pn-realaudio ra rm ram audio/x-realaudio ra audio/x-scpls pls audio/x-sd2 sd2 audio/x-wav wav chemical/x-alchemy alc chemical/x-cache cac cache chemical/x-cache-csf csf chemical/x-cactvs-binary cbin cascii ctab chemical/x-cdx cdx chemical/x-cerius cer chemical/x-chem3d c3d chemical/x-chemdraw chm chemical/x-cif cif chemical/x-cmdf cmdf chemical/x-cml cml chemical/x-compass cpa chemical/x-crossfire bsd chemical/x-csml csml csm chemical/x-ctx ctx chemical/x-cxf cxf cef #chemical/x-daylight-smiles smi chemical/x-embl-dl-nucleotide emb embl chemical/x-galactic-spc spc chemical/x-gamess-input inp gam gamin chemical/x-gaussian-checkpoint fch fchk chemical/x-gaussian-cube cub chemical/x-gaussian-input gau gjc gjf chemical/x-gaussian-log gal chemical/x-gcg8-sequence gcg chemical/x-genbank gen chemical/x-hin hin chemical/x-isostar istr ist chemical/x-jcamp-dx jdx dx chemical/x-kinemage kin chemical/x-macmolecule mcm chemical/x-macromodel-input mmd mmod chemical/x-mdl-molfile mol chemical/x-mdl-rdfile rd chemical/x-mdl-rxnfile rxn chemical/x-mdl-sdfile sd sdf chemical/x-mdl-tgf tgf #chemical/x-mif mif chemical/x-mmcif mcif chemical/x-mol2 mol2 chemical/x-molconn-Z b chemical/x-mopac-graph gpt chemical/x-mopac-input mop mopcrt mpc zmt chemical/x-mopac-out moo chemical/x-mopac-vib mvb chemical/x-ncbi-asn1 asn chemical/x-ncbi-asn1-ascii prt ent chemical/x-ncbi-asn1-binary val aso chemical/x-ncbi-asn1-spec asn chemical/x-pdb pdb ent chemical/x-rosdal ros chemical/x-swissprot sw chemical/x-vamas-iso14976 vms chemical/x-vmd vmd chemical/x-xtel xtel chemical/x-xyz xyz image/cgm image/g3fax image/gif gif image/ief ief image/jpeg jpeg jpg jpe image/naplps image/pcx pcx image/png png image/prs.btif image/prs.pti image/svg+xml svg svgz image/tiff tiff tif image/vnd.cns.inf2 image/vnd.djvu djvu djv image/vnd.dwg image/vnd.dxf image/vnd.fastbidsheet image/vnd.fpx image/vnd.fst image/vnd.fujixerox.edmics-mmr image/vnd.fujixerox.edmics-rlc image/vnd.mix image/vnd.net-fpx image/vnd.svf image/vnd.wap.wbmp wbmp image/vnd.xiff image/x-cmu-raster ras image/x-coreldraw cdr image/x-coreldrawpattern pat image/x-coreldrawtemplate cdt image/x-corelphotopaint cpt image/x-icon ico image/x-jg art image/x-jng jng image/x-ms-bmp bmp image/x-photoshop psd image/x-portable-anymap pnm image/x-portable-bitmap pbm image/x-portable-graymap pgm image/x-portable-pixmap ppm image/x-rgb rgb image/x-xbitmap xbm image/x-xpixmap xpm image/x-xwindowdump xwd inode/chardevice inode/blockdevice inode/directory-locked inode/directory inode/fifo inode/socket message/delivery-status message/disposition-notification message/external-body message/http message/s-http message/news message/partial message/rfc822 eml model/iges igs iges model/mesh msh mesh silo model/vnd.dwf model/vnd.flatland.3dml model/vnd.gdl model/vnd.gs-gdl model/vnd.gtw model/vnd.mts model/vnd.vtu model/vrml wrl vrml multipart/alternative multipart/appledouble multipart/byteranges multipart/digest multipart/encrypted multipart/form-data multipart/header-set multipart/mixed multipart/parallel multipart/related multipart/report multipart/signed multipart/voice-message text/calendar ics icz text/css css text/csv csv text/directory text/english text/enriched text/h323 323 text/html html htm shtml text/iuls uls text/mathml mml text/parityfec text/plain asc txt text pot brf text/prs.lines.tag text/rfc822-headers text/richtext rtx text/rtf text/scriptlet sct wsc text/t140 text/texmacs tm ts text/tab-separated-values tsv text/uri-list text/vnd.abc text/vnd.curl text/vnd.DMClientScript text/vnd.flatland.3dml text/vnd.fly text/vnd.fmi.flexstor text/vnd.in3d.3dml text/vnd.in3d.spot text/vnd.IPTC.NewsML text/vnd.IPTC.NITF text/vnd.latex-z text/vnd.motorola.reflex text/vnd.ms-mediapackage text/vnd.sun.j2me.app-descriptor jad text/vnd.wap.si text/vnd.wap.sl text/vnd.wap.wml wml text/vnd.wap.wmlscript wmls text/x-bibtex bib text/x-boo boo text/x-c++hdr h++ hpp hxx hh text/x-c++src c++ cpp cxx cc text/x-chdr h text/x-component htc text/x-crontab text/x-csh csh text/x-csrc c text/x-dsrc d text/x-diff diff patch text/x-haskell hs text/x-java java text/x-literate-haskell lhs text/x-makefile text/x-moc moc text/x-pascal p pas text/x-pcs-gcd gcd text/x-perl pl pm text/x-python py text/x-scala scala text/x-server-parsed-html text/x-setext etx text/x-sh sh text/x-tcl tcl tk text/x-tex tex ltx sty cls text/x-vcalendar vcs text/x-vcard vcf video/3gpp 3gp video/annodex axv video/dl dl video/dv dif dv video/fli fli video/gl gl video/mpeg mpeg mpg mpe video/mp4 mp4 video/quicktime qt mov video/mp4v-es video/ogg ogv video/parityfec video/pointer video/vnd.fvt video/vnd.motorola.video video/vnd.motorola.videop video/vnd.mpegurl mxu video/vnd.mts video/vnd.nokia.interleaved-multimedia video/vnd.vivo video/x-flv flv video/x-la-asf lsf lsx video/x-mng mng video/x-ms-asf asf asx video/x-ms-wm wm video/x-ms-wmv wmv video/x-ms-wmx wmx video/x-ms-wvx wvx video/x-msvideo avi video/x-sgi-movie movie video/x-matroska mpv x-conference/x-cooltalk ice x-epoc/x-sisx-app sisx x-world/x-vrml vrm vrml wrl x-scrapy/test scrapytest Scrapy-0.14.4/scrapy/stats.py0000600000016101777760000000050611754531743016155 0ustar buildbotnogroupfrom scrapy.statscol import DummyStatsCollector from scrapy.conf import settings from scrapy.utils.misc import load_object # if stats are disabled use a DummyStatsCollector to improve performance if settings.getbool('STATS_ENABLED'): stats = load_object(settings['STATS_CLASS'])() else: stats = DummyStatsCollector() Scrapy-0.14.4/scrapy/contrib/0000700000016101777760000000000011754532077016103 5ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib/downloadermiddleware/0000700000016101777760000000000011754532077022277 5ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib/downloadermiddleware/redirect.py0000600000016101777760000000661511754531743024463 0ustar buildbotnogroupfrom urlparse import urljoin from scrapy import log from scrapy.http import HtmlResponse from scrapy.utils.response import get_meta_refresh from scrapy.exceptions import IgnoreRequest, NotConfigured from scrapy.conf import settings class RedirectMiddleware(object): """Handle redirection of requests based on response status and meta-refresh html tag""" def __init__(self): if not settings.getbool('REDIRECT_ENABLED'): raise NotConfigured self.max_metarefresh_delay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY') self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES') self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST') def process_response(self, request, response, spider): if 'dont_redirect' in request.meta: return response if request.method.upper() == 'HEAD': if response.status in [301, 302, 303, 307] and 'Location' in response.headers: redirected_url = urljoin(request.url, response.headers['location']) redirected = request.replace(url=redirected_url) return self._redirect(redirected, request, spider, response.status) else: return response if response.status in [302, 303] and 'Location' in response.headers: redirected_url = urljoin(request.url, response.headers['location']) redirected = self._redirect_request_using_get(request, redirected_url) return self._redirect(redirected, request, spider, response.status) if response.status in [301, 307] and 'Location' in response.headers: redirected_url = urljoin(request.url, response.headers['location']) redirected = request.replace(url=redirected_url) return self._redirect(redirected, request, spider, response.status) if isinstance(response, HtmlResponse): interval, url = get_meta_refresh(response) if url and interval < self.max_metarefresh_delay: redirected = self._redirect_request_using_get(request, url) return self._redirect(redirected, request, spider, 'meta refresh') return response def _redirect(self, redirected, request, spider, reason): ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times) redirects = request.meta.get('redirect_times', 0) + 1 if ttl and redirects <= self.max_redirect_times: redirected.meta['redirect_times'] = redirects redirected.meta['redirect_ttl'] = ttl - 1 redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \ [request.url] redirected.dont_filter = request.dont_filter redirected.priority = request.priority + self.priority_adjust log.msg("Redirecting (%s) to %s from %s" % (reason, redirected, request), spider=spider, level=log.DEBUG) return redirected else: log.msg("Discarding %s: max redirections reached" % request, spider=spider, level=log.DEBUG) raise IgnoreRequest def _redirect_request_using_get(self, request, redirect_url): redirected = request.replace(url=redirect_url, method='GET', body='') redirected.headers.pop('Content-Type', None) redirected.headers.pop('Content-Length', None) return redirected Scrapy-0.14.4/scrapy/contrib/downloadermiddleware/cookies.py0000600000016101777760000000474611754531743024321 0ustar buildbotnogroupimport os from collections import defaultdict from scrapy.xlib.pydispatch import dispatcher from scrapy import signals from scrapy.exceptions import NotConfigured from scrapy.http import Response from scrapy.http.cookies import CookieJar from scrapy.conf import settings from scrapy import log class CookiesMiddleware(object): """This middleware enables working with sites that need cookies""" debug = settings.getbool('COOKIES_DEBUG') def __init__(self): if not settings.getbool('COOKIES_ENABLED'): raise NotConfigured self.jars = defaultdict(CookieJar) dispatcher.connect(self.spider_closed, signals.spider_closed) def process_request(self, request, spider): if 'dont_merge_cookies' in request.meta: return jar = self.jars[spider] cookies = self._get_request_cookies(jar, request) for cookie in cookies: jar.set_cookie_if_ok(cookie, request) # set Cookie header request.headers.pop('Cookie', None) jar.add_cookie_header(request) self._debug_cookie(request, spider) def process_response(self, request, response, spider): if 'dont_merge_cookies' in request.meta: return response # extract cookies from Set-Cookie and drop invalid/expired cookies jar = self.jars[spider] jar.extract_cookies(response, request) self._debug_set_cookie(response, spider) return response def spider_closed(self, spider): self.jars.pop(spider, None) def _debug_cookie(self, request, spider): if self.debug: cl = request.headers.getlist('Cookie') if cl: msg = "Sending cookies to: %s" % request + os.linesep msg += os.linesep.join("Cookie: %s" % c for c in cl) log.msg(msg, spider=spider, level=log.DEBUG) def _debug_set_cookie(self, response, spider): if self.debug: cl = response.headers.getlist('Set-Cookie') if cl: msg = "Received cookies from: %s" % response + os.linesep msg += os.linesep.join("Set-Cookie: %s" % c for c in cl) log.msg(msg, spider=spider, level=log.DEBUG) def _get_request_cookies(self, jar, request): headers = {'Set-Cookie': ['%s=%s;' % (k, v) for k, v in request.cookies.iteritems()]} response = Response(request.url, headers=headers) cookies = jar.make_cookies(response, request) return cookies Scrapy-0.14.4/scrapy/contrib/downloadermiddleware/useragent.py0000600000016101777760000000115111754531743024645 0ustar buildbotnogroup"""Set User-Agent header per spider or use a default value from settings""" from scrapy.utils.python import WeakKeyCache class UserAgentMiddleware(object): """This middleware allows spiders to override the user_agent""" def __init__(self): self.cache = WeakKeyCache(self._user_agent) def _user_agent(self, spider): if hasattr(spider, 'user_agent'): return spider.user_agent return spider.settings['USER_AGENT'] def process_request(self, request, spider): ua = self.cache[spider] if ua: request.headers.setdefault('User-Agent', ua) Scrapy-0.14.4/scrapy/contrib/downloadermiddleware/defaultheaders.py0000600000016101777760000000107511754531743025635 0ustar buildbotnogroup""" DefaultHeaders downloader middleware See documentation in docs/topics/downloader-middleware.rst """ from scrapy import conf from scrapy.utils.python import WeakKeyCache class DefaultHeadersMiddleware(object): def __init__(self, settings=conf.settings): self._headers = WeakKeyCache(self._default_headers) def _default_headers(self, spider): return spider.settings.get('DEFAULT_REQUEST_HEADERS').items() def process_request(self, request, spider): for k, v in self._headers[spider]: request.headers.setdefault(k, v) Scrapy-0.14.4/scrapy/contrib/downloadermiddleware/stats.py0000600000016101777760000000253211754531743024012 0ustar buildbotnogroupfrom scrapy.exceptions import NotConfigured from scrapy.utils.request import request_httprepr from scrapy.utils.response import response_httprepr from scrapy.stats import stats from scrapy.conf import settings class DownloaderStats(object): def __init__(self): if not settings.getbool('DOWNLOADER_STATS'): raise NotConfigured def process_request(self, request, spider): stats.inc_value('downloader/request_count', spider=spider) stats.inc_value('downloader/request_method_count/%s' % request.method, spider=spider) reqlen = len(request_httprepr(request)) stats.inc_value('downloader/request_bytes', reqlen, spider=spider) def process_response(self, request, response, spider): stats.inc_value('downloader/response_count', spider=spider) stats.inc_value('downloader/response_status_count/%s' % response.status, spider=spider) reslen = len(response_httprepr(response)) stats.inc_value('downloader/response_bytes', reslen, spider=spider) return response def process_exception(self, request, exception, spider): ex_class = "%s.%s" % (exception.__class__.__module__, exception.__class__.__name__) stats.inc_value('downloader/exception_count', spider=spider) stats.inc_value('downloader/exception_type_count/%s' % ex_class, spider=spider) Scrapy-0.14.4/scrapy/contrib/downloadermiddleware/httpauth.py0000600000016101777760000000145111754531743024514 0ustar buildbotnogroup""" HTTP basic auth downloader middleware See documentation in docs/topics/downloader-middleware.rst """ from w3lib.http import basic_auth_header from scrapy.utils.python import WeakKeyCache class HttpAuthMiddleware(object): """Set Basic HTTP Authorization header (http_user and http_pass spider class attributes)""" def __init__(self): self._cache = WeakKeyCache(self._authorization) def _authorization(self, spider): usr = getattr(spider, 'http_user', '') pwd = getattr(spider, 'http_pass', '') if usr or pwd: return basic_auth_header(usr, pwd) def process_request(self, request, spider): auth = self._cache[spider] if auth and 'Authorization' not in request.headers: request.headers['Authorization'] = auth Scrapy-0.14.4/scrapy/contrib/downloadermiddleware/httpproxy.py0000600000016101777760000000304011754531743024730 0ustar buildbotnogroupimport base64 from urllib import getproxies, unquote, proxy_bypass from urllib2 import _parse_proxy from urlparse import urlunparse from scrapy.utils.httpobj import urlparse_cached from scrapy.exceptions import NotConfigured class HttpProxyMiddleware(object): def __init__(self): self.proxies = {} for type, url in getproxies().items(): self.proxies[type] = self._get_proxy(url, type) if not self.proxies: raise NotConfigured def _get_proxy(self, url, orig_type): proxy_type, user, password, hostport = _parse_proxy(url) proxy_url = urlunparse((proxy_type or orig_type, hostport, '', '', '', '')) if user and password: user_pass = '%s:%s' % (unquote(user), unquote(password)) creds = base64.b64encode(user_pass).strip() else: creds = None return creds, proxy_url def process_request(self, request, spider): # ignore if proxy is already seted if 'proxy' in request.meta: return parsed = urlparse_cached(request) scheme = parsed.scheme # 'no_proxy' is only supported by http schemes if scheme in ('http', 'https') and proxy_bypass(parsed.hostname): return if scheme in self.proxies: self._set_proxy(request, scheme) def _set_proxy(self, request, scheme): creds, proxy = self.proxies[scheme] request.meta['proxy'] = proxy if creds: request.headers['Proxy-Authorization'] = 'Basic ' + creds Scrapy-0.14.4/scrapy/contrib/downloadermiddleware/httpcompression.py0000600000016101777760000000374511754531743026124 0ustar buildbotnogroupimport zlib from scrapy.utils.gz import gunzip from scrapy.http import Response, TextResponse from scrapy.responsetypes import responsetypes class HttpCompressionMiddleware(object): """This middleware allows compressed (gzip, deflate) traffic to be sent/received from web sites""" def process_request(self, request, spider): request.headers.setdefault('Accept-Encoding', 'x-gzip,gzip,deflate') def process_response(self, request, response, spider): if isinstance(response, Response): content_encoding = response.headers.getlist('Content-Encoding') if content_encoding: encoding = content_encoding.pop() decoded_body = self._decode(response.body, encoding.lower()) respcls = responsetypes.from_args(headers=response.headers, \ url=response.url) kwargs = dict(cls=respcls, body=decoded_body) if issubclass(respcls, TextResponse): # force recalculating the encoding until we make sure the # responsetypes guessing is reliable kwargs['encoding'] = None response = response.replace(**kwargs) if not content_encoding: del response.headers['Content-Encoding'] return response def _decode(self, body, encoding): if encoding == 'gzip' or encoding == 'x-gzip': body = gunzip(body) if encoding == 'deflate': try: body = zlib.decompress(body) except zlib.error: # ugly hack to work with raw deflate content that may # be sent by microsoft servers. For more information, see: # http://carsten.codimi.de/gzip.yaws/ # http://www.port80software.com/200ok/archive/2005/10/31/868.aspx # http://www.gzip.org/zlib/zlib_faq.html#faq38 body = zlib.decompress(body, -15) return body Scrapy-0.14.4/scrapy/contrib/downloadermiddleware/__init__.py0000600000016101777760000000000011754531743024377 0ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib/downloadermiddleware/retry.py0000600000016101777760000000641011754531743024020 0ustar buildbotnogroup""" An extension to retry failed requests that are potentially caused by temporary problems such as a connection timeout or HTTP 500 error. You can change the behaviour of this middleware by modifing the scraping settings: RETRY_TIMES - how many times to retry a failed page RETRY_HTTP_CODES - which HTTP response codes to retry Failed pages are collected on the scraping process and rescheduled at the end, once the spider has finished crawling all regular (non failed) pages. Once there is no more failed pages to retry this middleware sends a signal (retry_complete), so other extensions could connect to that signal. About HTTP errors to consider: - You may want to remove 400 from RETRY_HTTP_CODES, if you stick to the HTTP protocol. It's included by default because it's a common code used to indicate server overload, which would be something we want to retry """ from twisted.internet.error import TimeoutError as ServerTimeoutError, DNSLookupError, \ ConnectionRefusedError, ConnectionDone, ConnectError, \ ConnectionLost, TCPTimedOutError from twisted.internet.defer import TimeoutError as UserTimeoutError from scrapy import log from scrapy.exceptions import NotConfigured from scrapy.utils.response import response_status_message from scrapy.conf import settings class RetryMiddleware(object): # IOError is raised by the HttpCompression middleware when trying to # decompress an empty response EXCEPTIONS_TO_RETRY = (ServerTimeoutError, UserTimeoutError, DNSLookupError, ConnectionRefusedError, ConnectionDone, ConnectError, ConnectionLost, TCPTimedOutError, IOError) def __init__(self): if not settings.getbool('RETRY_ENABLED'): raise NotConfigured self.max_retry_times = settings.getint('RETRY_TIMES') self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES')) self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST') def process_response(self, request, response, spider): if 'dont_retry' in request.meta: return response if response.status in self.retry_http_codes: reason = response_status_message(response.status) return self._retry(request, reason, spider) or response return response def process_exception(self, request, exception, spider): if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \ and 'dont_retry' not in request.meta: return self._retry(request, exception, spider) def _retry(self, request, reason, spider): retries = request.meta.get('retry_times', 0) + 1 if retries <= self.max_retry_times: log.msg("Retrying %s (failed %d times): %s" % (request, retries, reason), spider=spider, level=log.DEBUG) retryreq = request.copy() retryreq.meta['retry_times'] = retries retryreq.dont_filter = True retryreq.priority = request.priority + self.priority_adjust return retryreq else: log.msg("Gave up retrying %s (failed %d times): %s" % (request, retries, reason), spider=spider, level=log.DEBUG) Scrapy-0.14.4/scrapy/contrib/downloadermiddleware/httpcache.py0000600000016101777760000001165511754531743024625 0ustar buildbotnogroupfrom __future__ import with_statement import os from os.path import join, exists from time import time import cPickle as pickle from w3lib.http import headers_dict_to_raw, headers_raw_to_dict from scrapy.xlib.pydispatch import dispatcher from scrapy import signals from scrapy.http import Headers from scrapy.exceptions import NotConfigured, IgnoreRequest from scrapy.responsetypes import responsetypes from scrapy.utils.request import request_fingerprint from scrapy.utils.httpobj import urlparse_cached from scrapy.utils.misc import load_object from scrapy.utils.project import data_path from scrapy import conf class HttpCacheMiddleware(object): def __init__(self, settings=conf.settings): if not settings.getbool('HTTPCACHE_ENABLED'): raise NotConfigured self.storage = load_object(settings['HTTPCACHE_STORAGE'])(settings) self.ignore_missing = settings.getbool('HTTPCACHE_IGNORE_MISSING') self.ignore_schemes = settings.getlist('HTTPCACHE_IGNORE_SCHEMES') self.ignore_http_codes = map(int, settings.getlist('HTTPCACHE_IGNORE_HTTP_CODES')) dispatcher.connect(self.spider_opened, signal=signals.spider_opened) dispatcher.connect(self.spider_closed, signal=signals.spider_closed) def spider_opened(self, spider): self.storage.open_spider(spider) def spider_closed(self, spider): self.storage.close_spider(spider) def process_request(self, request, spider): if not self.is_cacheable(request): return response = self.storage.retrieve_response(spider, request) if response and self.is_cacheable_response(response): response.flags.append('cached') return response elif self.ignore_missing: raise IgnoreRequest("Ignored request not in cache: %s" % request) def process_response(self, request, response, spider): if self.is_cacheable(request) and self.is_cacheable_response(response): self.storage.store_response(spider, request, response) return response def is_cacheable_response(self, response): return response.status not in self.ignore_http_codes def is_cacheable(self, request): return urlparse_cached(request).scheme not in self.ignore_schemes class FilesystemCacheStorage(object): def __init__(self, settings=conf.settings): self.cachedir = data_path(settings['HTTPCACHE_DIR']) self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS') def open_spider(self, spider): pass def close_spider(self, spider): pass def retrieve_response(self, spider, request): """Return response if present in cache, or None otherwise.""" metadata = self._read_meta(spider, request) if metadata is None: return # not cached rpath = self._get_request_path(spider, request) with open(join(rpath, 'response_body'), 'rb') as f: body = f.read() with open(join(rpath, 'response_headers'), 'rb') as f: rawheaders = f.read() url = metadata.get('response_url') status = metadata['status'] headers = Headers(headers_raw_to_dict(rawheaders)) respcls = responsetypes.from_args(headers=headers, url=url) response = respcls(url=url, headers=headers, status=status, body=body) return response def store_response(self, spider, request, response): """Store the given response in the cache.""" rpath = self._get_request_path(spider, request) if not exists(rpath): os.makedirs(rpath) metadata = { 'url': request.url, 'method': request.method, 'status': response.status, 'response_url': response.url, 'timestamp': time(), } with open(join(rpath, 'meta'), 'wb') as f: f.write(repr(metadata)) with open(join(rpath, 'pickled_meta'), 'wb') as f: pickle.dump(metadata, f, protocol=2) with open(join(rpath, 'response_headers'), 'wb') as f: f.write(headers_dict_to_raw(response.headers)) with open(join(rpath, 'response_body'), 'wb') as f: f.write(response.body) with open(join(rpath, 'request_headers'), 'wb') as f: f.write(headers_dict_to_raw(request.headers)) with open(join(rpath, 'request_body'), 'wb') as f: f.write(request.body) def _get_request_path(self, spider, request): key = request_fingerprint(request) return join(self.cachedir, spider.name, key[0:2], key) def _read_meta(self, spider, request): rpath = self._get_request_path(spider, request) metapath = join(rpath, 'pickled_meta') if not exists(metapath): return # not found mtime = os.stat(rpath).st_mtime if 0 < self.expiration_secs < time() - mtime: return # expired with open(metapath, 'rb') as f: return pickle.load(f) Scrapy-0.14.4/scrapy/contrib/downloadermiddleware/downloadtimeout.py0000600000016101777760000000116711754531743026075 0ustar buildbotnogroup""" Download timeout middleware See documentation in docs/topics/downloader-middleware.rst """ from scrapy.utils.python import WeakKeyCache class DownloadTimeoutMiddleware(object): def __init__(self): self._cache = WeakKeyCache(self._download_timeout) def _download_timeout(self, spider): if hasattr(spider, 'download_timeout'): return spider.download_timeout return spider.settings.getint('DOWNLOAD_TIMEOUT') def process_request(self, request, spider): timeout = self._cache[spider] if timeout: request.meta.setdefault('download_timeout', timeout) Scrapy-0.14.4/scrapy/contrib/downloadermiddleware/chunked.py0000600000016101777760000000077111754531743024300 0ustar buildbotnogroupfrom scrapy.utils.http import decode_chunked_transfer class ChunkedTransferMiddleware(object): """This middleware adds support for chunked transfer encoding, as documented in: http://en.wikipedia.org/wiki/Chunked_transfer_encoding """ def process_response(self, request, response, spider): if response.headers.get('Transfer-Encoding') == 'chunked': body = decode_chunked_transfer(response.body) return response.replace(body=body) return response Scrapy-0.14.4/scrapy/contrib/downloadermiddleware/robotstxt.py0000600000016101777760000000443211754531743024725 0ustar buildbotnogroup""" This is a middleware to respect robots.txt policies. To activate it you must enable this middleware and enable the ROBOTSTXT_OBEY setting. """ import robotparser from scrapy.xlib.pydispatch import dispatcher from scrapy import signals, log from scrapy.exceptions import NotConfigured, IgnoreRequest from scrapy.http import Request from scrapy.utils.httpobj import urlparse_cached class RobotsTxtMiddleware(object): DOWNLOAD_PRIORITY = 1000 def __init__(self, crawler): if not crawler.settings.getbool('ROBOTSTXT_OBEY'): raise NotConfigured self.crawler = crawler self._parsers = {} self._spider_netlocs = {} self._useragents = {} dispatcher.connect(self.spider_opened, signals.spider_opened) dispatcher.connect(self.spider_closed, signals.spider_closed) @classmethod def from_crawler(cls, crawler): return cls(crawler) def process_request(self, request, spider): useragent = self._useragents[spider] rp = self.robot_parser(request, spider) if rp and not rp.can_fetch(useragent, request.url): log.msg("Forbidden by robots.txt: %s" % request, log.DEBUG) raise IgnoreRequest def robot_parser(self, request, spider): url = urlparse_cached(request) netloc = url.netloc if netloc not in self._parsers: self._parsers[netloc] = None robotsurl = "%s://%s/robots.txt" % (url.scheme, url.netloc) robotsreq = Request(robotsurl, priority=self.DOWNLOAD_PRIORITY) dfd = self.crawler.engine.download(robotsreq, spider) dfd.addCallback(self._parse_robots) self._spider_netlocs[spider].add(netloc) return self._parsers[netloc] def _parse_robots(self, response): rp = robotparser.RobotFileParser(response.url) rp.parse(response.body.splitlines()) self._parsers[urlparse_cached(response).netloc] = rp def spider_opened(self, spider): self._spider_netlocs[spider] = set() self._useragents[spider] = spider.settings['USER_AGENT'] def spider_closed(self, spider): for netloc in self._spider_netlocs[spider]: del self._parsers[netloc] del self._spider_netlocs[spider] del self._useragents[spider] Scrapy-0.14.4/scrapy/contrib/webservice/0000700000016101777760000000000011754532077020241 5ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib/webservice/stats.py0000600000016101777760000000034411754531743021753 0ustar buildbotnogroupfrom scrapy.webservice import JsonRpcResource from scrapy.stats import stats class StatsResource(JsonRpcResource): ws_name = 'stats' def __init__(self, crawler): JsonRpcResource.__init__(self, crawler, stats) Scrapy-0.14.4/scrapy/contrib/webservice/enginestatus.py0000600000016101777760000000132511754531743023326 0ustar buildbotnogroupfrom scrapy.webservice import JsonResource from scrapy.utils.engine import get_engine_status class EngineStatusResource(JsonResource): ws_name = 'enginestatus' def __init__(self, crawler, spider_name=None): JsonResource.__init__(self, crawler) self._spider_name = spider_name self.isLeaf = spider_name is not None def render_GET(self, txrequest): status = get_engine_status(self.crawler.engine) if self._spider_name is None: return status for sp, st in status['spiders'].items(): if sp.name == self._spider_name: return st def getChild(self, name, txrequest): return EngineStatusResource(name, self.crawler) Scrapy-0.14.4/scrapy/contrib/webservice/crawler.py0000600000016101777760000000031311754531743022250 0ustar buildbotnogroupfrom scrapy.webservice import JsonRpcResource class CrawlerResource(JsonRpcResource): ws_name = 'crawler' def __init__(self, crawler): JsonRpcResource.__init__(self, crawler, crawler) Scrapy-0.14.4/scrapy/contrib/webservice/__init__.py0000600000016101777760000000000011754531743022341 0ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib/pipeline/0000700000016101777760000000000011754532077017710 5ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib/pipeline/media.py0000600000016101777760000001111311754531743021337 0ustar buildbotnogroupfrom collections import defaultdict from twisted.internet.defer import Deferred, DeferredList from twisted.python.failure import Failure from scrapy.utils.defer import mustbe_deferred, defer_result from scrapy import log from scrapy.utils.request import request_fingerprint from scrapy.utils.misc import arg_to_iter class MediaPipeline(object): LOG_FAILED_RESULTS = True class SpiderInfo(object): def __init__(self, spider): self.spider = spider self.downloading = set() self.downloaded = {} self.waiting = defaultdict(list) def __init__(self, download_func=None): self.spiderinfo = {} self.download_func = download_func @classmethod def from_crawler(cls, crawler): try: pipe = cls.from_settings(crawler.settings) except AttributeError: pipe = cls() pipe.crawler = crawler return pipe def open_spider(self, spider): self.spiderinfo[spider] = self.SpiderInfo(spider) def close_spider(self, spider): del self.spiderinfo[spider] def process_item(self, item, spider): info = self.spiderinfo[spider] requests = arg_to_iter(self.get_media_requests(item, info)) dlist = [self._process_request(r, info) for r in requests] dfd = DeferredList(dlist, consumeErrors=1) return dfd.addCallback(self.item_completed, item, info) def _process_request(self, request, info): fp = request_fingerprint(request) cb = request.callback or (lambda _: _) eb = request.errback request.callback = None request.errback = None # Return cached result if request was already seen if fp in info.downloaded: return defer_result(info.downloaded[fp]).addCallbacks(cb, eb) # Otherwise, wait for result wad = Deferred().addCallbacks(cb, eb) info.waiting[fp].append(wad) # Check if request is downloading right now to avoid doing it twice if fp in info.downloading: return wad # Download request checking media_to_download hook output first info.downloading.add(fp) dfd = mustbe_deferred(self.media_to_download, request, info) dfd.addCallback(self._check_media_to_download, request, info) dfd.addBoth(self._cache_result_and_execute_waiters, fp, info) dfd.addErrback(log.err, spider=info.spider) return dfd.addBoth(lambda _: wad) # it must return wad at last def _check_media_to_download(self, result, request, info): if result is not None: return result if self.download_func: # this ugly code was left only to support tests. TODO: remove dfd = mustbe_deferred(self.download_func, request, info.spider) dfd.addCallbacks( callback=self.media_downloaded, callbackArgs=(request, info), errback=self.media_failed, errbackArgs=(request, info)) else: request.meta['handle_httpstatus_all'] = True dfd = self.crawler.engine.download(request, info.spider) dfd.addCallbacks( callback=self.media_downloaded, callbackArgs=(request, info), errback=self.media_failed, errbackArgs=(request, info)) return dfd def _cache_result_and_execute_waiters(self, result, fp, info): if isinstance(result, Failure): # minimize cached information for failure result.cleanFailure() result.frames = [] result.stack = None info.downloading.remove(fp) info.downloaded[fp] = result # cache result for wad in info.waiting.pop(fp): defer_result(result).chainDeferred(wad) ### Overradiable Interface def media_to_download(self, request, info): """Check request before starting download""" pass def get_media_requests(self, item, info): """Returns the media requests to download""" pass def media_downloaded(self, response, request, info): """Handler for success downloads""" return response def media_failed(self, failure, request, info): """Handler for failed downloads""" return failure def item_completed(self, results, item, info): """Called per item when all media requests has been processed""" if self.LOG_FAILED_RESULTS: msg = '%s found errors proessing %s' % (self.__class__.__name__, item) for ok, value in results: if not ok: log.err(value, msg, spider=info.spider) return item Scrapy-0.14.4/scrapy/contrib/pipeline/images.py0000600000016101777760000002616311754531743021540 0ustar buildbotnogroup""" Images Pipeline See documentation in topics/images.rst """ from __future__ import with_statement import os import time import hashlib import urlparse import rfc822 from cStringIO import StringIO from collections import defaultdict from twisted.internet import defer, threads from PIL import Image from scrapy.xlib.pydispatch import dispatcher from scrapy import log from scrapy.stats import stats from scrapy.utils.misc import md5sum from scrapy.http import Request from scrapy import signals from scrapy.exceptions import DropItem, NotConfigured, IgnoreRequest from scrapy.contrib.pipeline.media import MediaPipeline class NoimagesDrop(DropItem): """Product with no images exception""" class ImageException(Exception): """General image error exception""" class FSImagesStore(object): def __init__(self, basedir): if '://' in basedir: basedir = basedir.split('://', 1)[1] self.basedir = basedir self._mkdir(self.basedir) self.created_directories = defaultdict(set) dispatcher.connect(self.spider_closed, signals.spider_closed) def spider_closed(self, spider): self.created_directories.pop(spider.name, None) def persist_image(self, key, image, buf, info): absolute_path = self._get_filesystem_path(key) self._mkdir(os.path.dirname(absolute_path), info) image.save(absolute_path) def stat_image(self, key, info): absolute_path = self._get_filesystem_path(key) try: last_modified = os.path.getmtime(absolute_path) except: # FIXME: catching everything! return {} with open(absolute_path, 'rb') as imagefile: checksum = md5sum(imagefile) return {'last_modified': last_modified, 'checksum': checksum} def _get_filesystem_path(self, key): path_comps = key.split('/') return os.path.join(self.basedir, *path_comps) def _mkdir(self, dirname, domain=None): seen = self.created_directories[domain] if domain else set() if dirname not in seen: if not os.path.exists(dirname): os.makedirs(dirname) seen.add(dirname) class S3ImagesStore(object): AWS_ACCESS_KEY_ID = None AWS_SECRET_ACCESS_KEY = None POLICY = 'public-read' HEADERS = { 'Cache-Control': 'max-age=172800', 'Content-Type': 'image/jpeg', } def __init__(self, uri): assert uri.startswith('s3://') self.bucket, self.prefix = uri[5:].split('/', 1) def stat_image(self, key, info): def _onsuccess(boto_key): checksum = boto_key.etag.strip('"') last_modified = boto_key.last_modified modified_tuple = rfc822.parsedate_tz(last_modified) modified_stamp = int(rfc822.mktime_tz(modified_tuple)) return {'checksum': checksum, 'last_modified': modified_stamp} return self._get_boto_key(key).addCallback(_onsuccess) def _get_boto_bucket(self): from boto.s3.connection import S3Connection # disable ssl (is_secure=False) because of this python bug: # http://bugs.python.org/issue5103 c = S3Connection(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, is_secure=False) return c.get_bucket(self.bucket, validate=False) def _get_boto_key(self, key): b = self._get_boto_bucket() key_name = '%s%s' % (self.prefix, key) return threads.deferToThread(b.get_key, key_name) def persist_image(self, key, image, buf, info): """Upload image to S3 storage""" width, height = image.size b = self._get_boto_bucket() key_name = '%s%s' % (self.prefix, key) k = b.new_key(key_name) k.set_metadata('width', str(width)) k.set_metadata('height', str(height)) buf.seek(0) return threads.deferToThread(k.set_contents_from_file, buf, \ headers=self.HEADERS, policy=self.POLICY) class ImagesPipeline(MediaPipeline): """Abstract pipeline that implement the image downloading and thumbnail generation logic This pipeline tries to minimize network transfers and image processing, doing stat of the images and determining if image is new, uptodate or expired. `new` images are those that pipeline never processed and needs to be downloaded from supplier site the first time. `uptodate` images are the ones that the pipeline processed and are still valid images. `expired` images are those that pipeline already processed but the last modification was made long time ago, so a reprocessing is recommended to refresh it in case of change. """ MEDIA_NAME = 'image' MIN_WIDTH = 0 MIN_HEIGHT = 0 EXPIRES = 90 THUMBS = {} STORE_SCHEMES = { '': FSImagesStore, 'file': FSImagesStore, 's3': S3ImagesStore, } def __init__(self, store_uri, download_func=None): if not store_uri: raise NotConfigured self.store = self._get_store(store_uri) super(ImagesPipeline, self).__init__(download_func=download_func) @classmethod def from_settings(cls, settings): cls.MIN_WIDTH = settings.getint('IMAGES_MIN_WIDTH', 0) cls.MIN_HEIGHT = settings.getint('IMAGES_MIN_HEIGHT', 0) cls.EXPIRES = settings.getint('IMAGES_EXPIRES', 90) cls.THUMBS = settings.get('IMAGES_THUMBS', {}) s3store = cls.STORE_SCHEMES['s3'] s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID'] s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY'] store_uri = settings['IMAGES_STORE'] return cls(store_uri) def _get_store(self, uri): if os.path.isabs(uri): # to support win32 paths like: C:\\some\dir scheme = 'file' else: scheme = urlparse.urlparse(uri).scheme store_cls = self.STORE_SCHEMES[scheme] return store_cls(uri) def media_downloaded(self, response, request, info): referer = request.headers.get('Referer') if response.status != 200: log.msg('Image (code: %s): Error downloading image from %s referred in <%s>' \ % (response.status, request, referer), level=log.WARNING, spider=info.spider) raise ImageException if not response.body: log.msg('Image (empty-content): Empty image from %s referred in <%s>: no-content' \ % (request, referer), level=log.WARNING, spider=info.spider) raise ImageException status = 'cached' if 'cached' in response.flags else 'downloaded' msg = 'Image (%s): Downloaded image from %s referred in <%s>' % \ (status, request, referer) log.msg(msg, level=log.DEBUG, spider=info.spider) self.inc_stats(info.spider, status) try: key = self.image_key(request.url) checksum = self.image_downloaded(response, request, info) except ImageException, ex: log.msg(str(ex), level=log.WARNING, spider=info.spider) raise except Exception: log.err(spider=info.spider) raise ImageException return {'url': request.url, 'path': key, 'checksum': checksum} def media_failed(self, failure, request, info): if not isinstance(failure.value, IgnoreRequest): referer = request.headers.get('Referer') msg = 'Image (unknown-error): Error downloading %s from %s referred in <%s>: %s' \ % (self.MEDIA_NAME, request, referer, str(failure)) log.msg(msg, level=log.WARNING, spider=info.spider) raise ImageException def media_to_download(self, request, info): def _onsuccess(result): if not result: return # returning None force download last_modified = result.get('last_modified', None) if not last_modified: return # returning None force download age_seconds = time.time() - last_modified age_days = age_seconds / 60 / 60 / 24 if age_days > self.EXPIRES: return # returning None force download referer = request.headers.get('Referer') log.msg('Image (uptodate): Downloaded %s from <%s> referred in <%s>' % \ (self.MEDIA_NAME, request.url, referer), level=log.DEBUG, spider=info.spider) self.inc_stats(info.spider, 'uptodate') checksum = result.get('checksum', None) return {'url': request.url, 'path': key, 'checksum': checksum} key = self.image_key(request.url) dfd = defer.maybeDeferred(self.store.stat_image, key, info) dfd.addCallbacks(_onsuccess, lambda _:None) dfd.addErrback(log.err, self.__class__.__name__ + '.store.stat_image') return dfd def image_downloaded(self, response, request, info): checksum = None for key, image, buf in self.get_images(response, request, info): if checksum is None: buf.seek(0) checksum = md5sum(buf) self.store.persist_image(key, image, buf, info) return checksum def get_images(self, response, request, info): key = self.image_key(request.url) orig_image = Image.open(StringIO(response.body)) width, height = orig_image.size if width < self.MIN_WIDTH or height < self.MIN_HEIGHT: raise ImageException("Image too small (%dx%d < %dx%d): %s" % \ (width, height, self.MIN_WIDTH, self.MIN_HEIGHT, response.url)) image, buf = self.convert_image(orig_image) yield key, image, buf for thumb_id, size in self.THUMBS.iteritems(): thumb_key = self.thumb_key(request.url, thumb_id) thumb_image, thumb_buf = self.convert_image(image, size) yield thumb_key, thumb_image, thumb_buf def inc_stats(self, spider, status): stats.inc_value('image_count', spider=spider) stats.inc_value('image_status_count/%s' % status, spider=spider) def convert_image(self, image, size=None): if image.format == 'PNG' and image.mode == 'RGBA': background = Image.new('RGBA', image.size, (255, 255, 255)) background.paste(image, image) image = background.convert('RGB') elif image.mode != 'RGB': image = image.convert('RGB') if size: image = image.copy() image.thumbnail(size, Image.ANTIALIAS) buf = StringIO() try: image.save(buf, 'JPEG') except Exception, ex: raise ImageException("Cannot process image. Error: %s" % ex) return image, buf def image_key(self, url): image_guid = hashlib.sha1(url).hexdigest() return 'full/%s.jpg' % (image_guid) def thumb_key(self, url, thumb_id): image_guid = hashlib.sha1(url).hexdigest() return 'thumbs/%s/%s.jpg' % (thumb_id, image_guid) def get_media_requests(self, item, info): return [Request(x) for x in item.get('image_urls', [])] def item_completed(self, results, item, info): item['images'] = [x for ok, x in results if ok] return item Scrapy-0.14.4/scrapy/contrib/pipeline/__init__.py0000600000016101777760000000117511754531743022026 0ustar buildbotnogroup""" Item pipeline See documentation in docs/item-pipeline.rst """ from scrapy.middleware import MiddlewareManager class ItemPipelineManager(MiddlewareManager): component_name = 'item pipeline' @classmethod def _get_mwlist_from_settings(cls, settings): return settings.getlist('ITEM_PIPELINES') def _add_middleware(self, pipe): super(ItemPipelineManager, self)._add_middleware(pipe) if hasattr(pipe, 'process_item'): self.methods['process_item'].append(pipe.process_item) def process_item(self, item, spider): return self._process_chain('process_item', item, spider) Scrapy-0.14.4/scrapy/contrib/logstats.py0000600000016101777760000000402111754531743020313 0ustar buildbotnogroupfrom twisted.internet import task from scrapy.xlib.pydispatch import dispatcher from scrapy.exceptions import NotConfigured from scrapy.conf import settings from scrapy import log, signals class Slot(object): def __init__(self): self.items = 0 self.itemsprev = 0 self.pages = 0 self.pagesprev = 0 class LogStats(object): """Log basic scraping stats periodically""" def __init__(self): self.interval = settings.getfloat('LOGSTATS_INTERVAL') if not self.interval: raise NotConfigured self.slots = {} self.multiplier = 60.0 / self.interval dispatcher.connect(self.item_scraped, signal=signals.item_scraped) dispatcher.connect(self.response_received, signal=signals.response_received) dispatcher.connect(self.spider_opened, signal=signals.spider_opened) dispatcher.connect(self.spider_closed, signal=signals.spider_closed) dispatcher.connect(self.engine_started, signal=signals.engine_started) dispatcher.connect(self.engine_stopped, signal=signals.engine_stopped) def item_scraped(self, spider): self.slots[spider].items += 1 def response_received(self, spider): self.slots[spider].pages += 1 def spider_opened(self, spider): self.slots[spider] = Slot() def spider_closed(self, spider): del self.slots[spider] def engine_started(self): self.tsk = task.LoopingCall(self.log) self.tsk.start(self.interval) def log(self): for spider, slot in self.slots.items(): irate = (slot.items - slot.itemsprev) * self.multiplier prate = (slot.pages - slot.pagesprev) * self.multiplier slot.pagesprev, slot.itemsprev = slot.pages, slot.items msg = "Crawled %d pages (at %d pages/min), scraped %d items (at %d items/min)" \ % (slot.pages, prate, slot.items, irate) log.msg(msg, spider=spider) def engine_stopped(self): if self.tsk.running: self.tsk.stop() Scrapy-0.14.4/scrapy/contrib/memusage.py0000600000016101777760000001000611754531743020256 0ustar buildbotnogroup""" MemoryUsage extension See documentation in docs/topics/extensions.rst """ import socket from pprint import pformat from twisted.internet import task from scrapy.xlib.pydispatch import dispatcher from scrapy import signals from scrapy import log from scrapy.exceptions import NotConfigured from scrapy.mail import MailSender from scrapy.stats import stats from scrapy.utils.memory import get_vmvalue_from_procfs, procfs_supported from scrapy.utils.engine import get_engine_status class MemoryUsage(object): def __init__(self, crawler): if not crawler.settings.getbool('MEMUSAGE_ENABLED'): raise NotConfigured if not procfs_supported(): raise NotConfigured self.crawler = crawler self.warned = False self.notify_mails = crawler.settings.getlist('MEMUSAGE_NOTIFY_MAIL') self.limit = crawler.settings.getint('MEMUSAGE_LIMIT_MB')*1024*1024 self.warning = crawler.settings.getint('MEMUSAGE_WARNING_MB')*1024*1024 self.report = crawler.settings.getbool('MEMUSAGE_REPORT') self.mail = MailSender() dispatcher.connect(self.engine_started, signal=signals.engine_started) dispatcher.connect(self.engine_stopped, signal=signals.engine_stopped) @classmethod def from_crawler(cls, crawler): return cls(crawler) def get_virtual_size(self): return get_vmvalue_from_procfs('VmSize') def engine_started(self): stats.set_value('memusage/startup', self.get_virtual_size()) self.tasks = [] tsk = task.LoopingCall(self.update) self.tasks.append(tsk) tsk.start(60.0, now=True) if self.limit: tsk = task.LoopingCall(self._check_limit) self.tasks.append(tsk) tsk.start(60.0, now=True) if self.warning: tsk = task.LoopingCall(self._check_warning) self.tasks.append(tsk) tsk.start(60.0, now=True) def engine_stopped(self): for tsk in self.tasks: if tsk.running: tsk.stop() def update(self): stats.max_value('memusage/max', self.get_virtual_size()) def _check_limit(self): if self.get_virtual_size() > self.limit: stats.set_value('memusage/limit_reached', 1) mem = self.limit/1024/1024 log.msg("Memory usage exceeded %dM. Shutting down Scrapy..." % mem, level=log.ERROR) if self.notify_mails: subj = "%s terminated: memory usage exceeded %dM at %s" % \ (self.crawler.settings['BOT_NAME'], mem, socket.gethostname()) self._send_report(self.notify_mails, subj) stats.set_value('memusage/limit_notified', 1) self.crawler.stop() def _check_warning(self): if self.warned: # warn only once return if self.get_virtual_size() > self.warning: stats.set_value('memusage/warning_reached', 1) mem = self.warning/1024/1024 log.msg("Memory usage reached %dM" % mem, level=log.WARNING) if self.notify_mails: subj = "%s warning: memory usage reached %dM at %s" % \ (self.crawler.settings['BOT_NAME'], mem, socket.gethostname()) self._send_report(self.notify_mails, subj) stats.set_value('memusage/warning_notified', 1) self.warned = True def _send_report(self, rcpts, subject): """send notification mail with some additional useful info""" s = "Memory usage at engine startup : %dM\r\n" % (stats.get_value('memusage/startup')/1024/1024) s += "Maximum memory usage : %dM\r\n" % (stats.get_value('memusage/max')/1024/1024) s += "Current memory usage : %dM\r\n" % (self.get_virtual_size()/1024/1024) s += "ENGINE STATUS ------------------------------------------------------- \r\n" s += "\r\n" s += pformat(get_engine_status(self.crawler.engine)) s += "\r\n" self.mail.send(rcpts, subject, s) Scrapy-0.14.4/scrapy/contrib/debug.py0000600000016101777760000000321111754531743017541 0ustar buildbotnogroup""" Extensions for debugging Scrapy See documentation in docs/topics/extensions.rst """ import os import sys import signal import traceback import threading from pdb import Pdb from scrapy.utils.engine import format_engine_status from scrapy import log class StackTraceDump(object): def __init__(self, crawler=None): self.crawler = crawler try: signal.signal(signal.SIGUSR2, self.dump_stacktrace) signal.signal(signal.SIGQUIT, self.dump_stacktrace) except AttributeError: # win32 platforms don't support SIGUSR signals pass @classmethod def from_crawler(cls, crawler): return cls(crawler) def dump_stacktrace(self, signum, frame): stackdumps = self._thread_stacks() enginestatus = format_engine_status(self.crawler.engine) msg = "Dumping stack trace and engine status" \ "\n{0}\n{1}".format(enginestatus, stackdumps) log.msg(msg) def _thread_stacks(self): id2name = dict((th.ident, th.name) for th in threading.enumerate()) dumps = '' for id_, frame in sys._current_frames().items(): name = id2name.get(id_, '') dump = ''.join(traceback.format_stack(frame)) dumps += "# Thread: {0}({1})\n{2}\n".format(name, id_, dump) return dumps class Debugger(object): def __init__(self): try: signal.signal(signal.SIGUSR2, self._enter_debugger) except AttributeError: # win32 platforms don't support SIGUSR signals pass def _enter_debugger(self, signum, frame): Pdb().set_trace(frame.f_back) Scrapy-0.14.4/scrapy/contrib/exporter/0000700000016101777760000000000011754532077017753 5ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib/exporter/jsonlines.py0000600000016101777760000000044011754531743022330 0ustar buildbotnogroupfrom scrapy.contrib.exporter import JsonLinesItemExporter from scrapy.exceptions import ScrapyDeprecationWarning import warnings warnings.warn("Module `scrapy.contrib.exporter.jsonlines` is deprecated - use `scrapy.contrib.exporter` instead", ScrapyDeprecationWarning, stacklevel=2) Scrapy-0.14.4/scrapy/contrib/exporter/__init__.py0000600000016101777760000001540711754531743022074 0ustar buildbotnogroup""" Item Exporters are used to export/serialize items into different formats. """ import csv import pprint import marshal import cPickle as pickle from xml.sax.saxutils import XMLGenerator from scrapy.utils.py26 import json __all__ = ['BaseItemExporter', 'PprintItemExporter', 'PickleItemExporter', \ 'CsvItemExporter', 'XmlItemExporter', 'JsonLinesItemExporter', \ 'JsonItemExporter', 'MarshalItemExporter'] class BaseItemExporter(object): def __init__(self, **kwargs): self._configure(kwargs) def _configure(self, options, dont_fail=False): """Configure the exporter by poping options from the ``options`` dict. If dont_fail is set, it won't raise an exception on unexpected options (useful for using with keyword arguments in subclasses constructors) """ self.fields_to_export = options.pop('fields_to_export', None) self.export_empty_fields = options.pop('export_empty_fields', False) self.encoding = options.pop('encoding', 'utf-8') if not dont_fail and options: raise TypeError("Unexpected options: %s" % ', '.join(options.keys())) def export_item(self, item): raise NotImplementedError def serialize_field(self, field, name, value): serializer = field.get('serializer', self._to_str_if_unicode) return serializer(value) def start_exporting(self): pass def finish_exporting(self): pass def _to_str_if_unicode(self, value): return value.encode(self.encoding) if isinstance(value, unicode) else value def _get_serialized_fields(self, item, default_value=None, include_empty=None): """Return the fields to export as an iterable of tuples (name, serialized_value) """ if include_empty is None: include_empty = self.export_empty_fields if self.fields_to_export is None: if include_empty: field_iter = item.fields.iterkeys() else: field_iter = item.iterkeys() else: if include_empty: field_iter = self.fields_to_export else: nonempty_fields = set(item.keys()) field_iter = (x for x in self.fields_to_export if x in \ nonempty_fields) for field_name in field_iter: if field_name in item: field = item.fields[field_name] value = self.serialize_field(field, field_name, item[field_name]) else: value = default_value yield field_name, value class JsonLinesItemExporter(BaseItemExporter): def __init__(self, file, **kwargs): self._configure(kwargs) self.file = file self.encoder = json.JSONEncoder(**kwargs) def export_item(self, item): itemdict = dict(self._get_serialized_fields(item)) self.file.write(self.encoder.encode(itemdict) + '\n') class JsonItemExporter(JsonLinesItemExporter): def __init__(self, file, **kwargs): self._configure(kwargs) self.file = file self.encoder = json.JSONEncoder(**kwargs) self.first_item = True def start_exporting(self): self.file.write("[") def finish_exporting(self): self.file.write("]") def export_item(self, item): if self.first_item: self.first_item = False else: self.file.write(',\n') itemdict = dict(self._get_serialized_fields(item)) self.file.write(self.encoder.encode(itemdict)) class XmlItemExporter(BaseItemExporter): def __init__(self, file, **kwargs): self.item_element = kwargs.pop('item_element', 'item') self.root_element = kwargs.pop('root_element', 'items') self._configure(kwargs) self.xg = XMLGenerator(file, encoding=self.encoding) def start_exporting(self): self.xg.startDocument() self.xg.startElement(self.root_element, {}) def export_item(self, item): self.xg.startElement(self.item_element, {}) for name, value in self._get_serialized_fields(item, default_value=''): self._export_xml_field(name, value) self.xg.endElement(self.item_element) def finish_exporting(self): self.xg.endElement(self.root_element) self.xg.endDocument() def _export_xml_field(self, name, serialized_value): self.xg.startElement(name, {}) if hasattr(serialized_value, '__iter__'): for value in serialized_value: self._export_xml_field('value', value) else: self.xg.characters(serialized_value) self.xg.endElement(name) class CsvItemExporter(BaseItemExporter): def __init__(self, file, include_headers_line=True, join_multivalued=',', **kwargs): self._configure(kwargs, dont_fail=True) self.include_headers_line = include_headers_line self.csv_writer = csv.writer(file, **kwargs) self._headers_not_written = True self._join_multivalued = join_multivalued def _to_str_if_unicode(self, value): if isinstance(value, (list, tuple)): try: value = self._join_multivalued.join(value) except TypeError: # list in value may not contain strings pass return super(CsvItemExporter, self)._to_str_if_unicode(value) def export_item(self, item): if self._headers_not_written: self._headers_not_written = False self._write_headers_and_set_fields_to_export(item) fields = self._get_serialized_fields(item, default_value='', \ include_empty=True) values = [x[1] for x in fields] self.csv_writer.writerow(values) def _write_headers_and_set_fields_to_export(self, item): if self.include_headers_line: if not self.fields_to_export: self.fields_to_export = item.fields.keys() self.csv_writer.writerow(self.fields_to_export) class PickleItemExporter(BaseItemExporter): def __init__(self, file, protocol=2, **kwargs): self._configure(kwargs) self.file =file self.protocol = protocol def export_item(self, item): d = dict(self._get_serialized_fields(item)) pickle.dump(d, self.file, self.protocol) class MarshalItemExporter(BaseItemExporter): def __init__(self, file, **kwargs): self._configure(kwargs) self.file = file def export_item(self, item): marshal.dump(dict(self._get_serialized_fields(item)), self.file) class PprintItemExporter(BaseItemExporter): def __init__(self, file, **kwargs): self._configure(kwargs) self.file = file def export_item(self, item): itemdict = dict(self._get_serialized_fields(item)) self.file.write(pprint.pformat(itemdict) + '\n') Scrapy-0.14.4/scrapy/contrib/feedexport.py0000600000016101777760000001525111754531743020627 0ustar buildbotnogroup""" Feed Exports extension See documentation in docs/topics/feed-exports.rst """ import sys, os, posixpath from tempfile import TemporaryFile from datetime import datetime from urlparse import urlparse from ftplib import FTP from zope.interface import Interface, implements from twisted.internet import defer, threads from w3lib.url import file_uri_to_path from scrapy import log, signals from scrapy.xlib.pydispatch import dispatcher from scrapy.utils.ftp import ftp_makedirs_cwd from scrapy.exceptions import NotConfigured from scrapy.utils.misc import load_object from scrapy.conf import settings class IFeedStorage(Interface): """Interface that all Feed Storages must implement""" def __init__(uri): """Initialize the storage with the parameters given in the URI""" def open(spider): """Open the storage for the given spider. It must return a file-like object that will be used for the exporters""" def store(file): """Store the given file stream""" class BlockingFeedStorage(object): implements(IFeedStorage) def open(self, spider): return TemporaryFile(prefix='feed-') def store(self, file): return threads.deferToThread(self._store_in_thread, file) def _store_in_thread(self, file): raise NotImplementedError class StdoutFeedStorage(object): implements(IFeedStorage) def __init__(self, uri, _stdout=sys.stdout): self._stdout = _stdout def open(self, spider): return self._stdout def store(self, file): pass class FileFeedStorage(object): implements(IFeedStorage) def __init__(self, uri): self.path = file_uri_to_path(uri) def open(self, spider): dirname = os.path.dirname(self.path) if dirname and not os.path.exists(dirname): os.makedirs(dirname) return open(self.path, 'ab') def store(self, file): file.close() class S3FeedStorage(BlockingFeedStorage): def __init__(self, uri): try: import boto except ImportError: raise NotConfigured self.connect_s3 = boto.connect_s3 u = urlparse(uri) self.bucketname = u.hostname self.access_key = u.username or settings['AWS_ACCESS_KEY_ID'] self.secret_key = u.password or settings['AWS_SECRET_ACCESS_KEY'] self.keyname = u.path def _store_in_thread(self, file): file.seek(0) conn = self.connect_s3(self.access_key, self.secret_key) bucket = conn.get_bucket(self.bucketname, validate=False) key = bucket.new_key(self.keyname) key.set_contents_from_file(file) key.close() class FTPFeedStorage(BlockingFeedStorage): def __init__(self, uri): u = urlparse(uri) self.host = u.hostname self.port = int(u.port or '21') self.username = u.username self.password = u.password self.path = u.path def _store_in_thread(self, file): file.seek(0) ftp = FTP() ftp.connect(self.host, self.port) ftp.login(self.username, self.password) dirname, filename = posixpath.split(self.path) ftp_makedirs_cwd(ftp, dirname) ftp.storbinary('STOR %s' % filename, file) ftp.quit() class SpiderSlot(object): def __init__(self, file, exporter, storage, uri): self.file = file self.exporter = exporter self.storage = storage self.uri = uri self.itemcount = 0 class FeedExporter(object): def __init__(self): self.urifmt = settings['FEED_URI'] if not self.urifmt: raise NotConfigured self.format = settings['FEED_FORMAT'].lower() self.storages = self._load_components('FEED_STORAGES') self.exporters = self._load_components('FEED_EXPORTERS') if not self._storage_supported(self.urifmt): raise NotConfigured if not self._exporter_supported(self.format): raise NotConfigured self.store_empty = settings.getbool('FEED_STORE_EMPTY') uripar = settings['FEED_URI_PARAMS'] self._uripar = load_object(uripar) if uripar else lambda x, y: None self.slots = {} dispatcher.connect(self.open_spider, signals.spider_opened) dispatcher.connect(self.close_spider, signals.spider_closed) dispatcher.connect(self.item_scraped, signals.item_scraped) def open_spider(self, spider): uri = self.urifmt % self._get_uri_params(spider) storage = self._get_storage(uri) file = storage.open(spider) exporter = self._get_exporter(file) exporter.start_exporting() self.slots[spider] = SpiderSlot(file, exporter, storage, uri) def close_spider(self, spider): slot = self.slots.pop(spider) if not slot.itemcount and not self.store_empty: return slot.exporter.finish_exporting() logfmt = "%%s %s feed (%d items) in: %s" % (self.format, \ slot.itemcount, slot.uri) d = defer.maybeDeferred(slot.storage.store, slot.file) d.addCallback(lambda _: log.msg(logfmt % "Stored", spider=spider)) d.addErrback(log.err, logfmt % "Error storing", spider=spider) return d def item_scraped(self, item, spider): slot = self.slots[spider] slot.exporter.export_item(item) slot.itemcount += 1 return item def _load_components(self, setting_prefix): conf = dict(settings['%s_BASE' % setting_prefix]) conf.update(settings[setting_prefix]) d = {} for k, v in conf.items(): try: d[k] = load_object(v) except NotConfigured: pass return d def _exporter_supported(self, format): if format in self.exporters: return True log.msg("Unknown feed format: %s" % format, log.ERROR) def _storage_supported(self, uri): scheme = urlparse(uri).scheme if scheme in self.storages: try: self._get_storage(uri) return True except NotConfigured: log.msg("Disabled feed storage scheme: %s" % scheme, log.ERROR) else: log.msg("Unknown feed storage scheme: %s" % scheme, log.ERROR) def _get_exporter(self, *a, **kw): return self.exporters[self.format](*a, **kw) def _get_storage(self, uri): return self.storages[urlparse(uri).scheme](uri) def _get_uri_params(self, spider): params = {} for k in dir(spider): params[k] = getattr(spider, k) ts = datetime.utcnow().replace(microsecond=0).isoformat().replace(':', '-') params['time'] = ts self._uripar(params, spider) return params Scrapy-0.14.4/scrapy/contrib/linkextractors/0000700000016101777760000000000011754532077021157 5ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib/linkextractors/image.py0000600000016101777760000000637311754531743022625 0ustar buildbotnogroup""" This module implements the HtmlImageLinkExtractor for extracting image links only. """ from urlparse import urljoin from scrapy.link import Link from scrapy.utils.url import canonicalize_url from scrapy.utils.python import unicode_to_str, flatten from scrapy.selector.libxml2sel import XPathSelectorList, HtmlXPathSelector class HTMLImageLinkExtractor(object): '''HTMLImageLinkExtractor objects are intended to extract image links from HTML pages given certain xpath locations. These locations can be passed in a list/tuple either when instanciating the LinkExtractor, or whenever you call extract_links. If no locations are specified in any of these places, a default pattern '//img' will be used. If locations are specified when instanciating the LinkExtractor, and also when calling extract_links, both locations will be used for that call of extract_links''' def __init__(self, locations=None, unique=True, canonicalize=True): self.locations = flatten([locations]) self.unique = unique self.canonicalize = canonicalize def extract_from_selector(self, selector, encoding, parent=None): ret = [] def _add_link(url_sel, alt_sel=None): url = flatten([url_sel.extract()]) alt = flatten([alt_sel.extract()]) if alt_sel else (u'', ) if url: ret.append(Link(unicode_to_str(url[0], encoding), alt[0])) if selector.xmlNode.type == 'element': if selector.xmlNode.name == 'img': _add_link(selector.select('@src'), selector.select('@alt') or \ selector.select('@title')) else: children = selector.select('child::*') if len(children): for child in children: ret.extend(self.extract_from_selector(child, encoding, parent=selector)) elif selector.xmlNode.name == 'a' and not parent: _add_link(selector.select('@href'), selector.select('@title')) else: _add_link(selector) return ret def extract_links(self, response): xs = HtmlXPathSelector(response) base_url = xs.select('//base/@href').extract() base_url = urljoin(response.url, base_url[0].encode(response.encoding)) if base_url else response.url links = [] for location in self.locations: if isinstance(location, basestring): selectors = xs.select(location) elif isinstance(location, (XPathSelectorList, HtmlXPathSelector)): selectors = [location] if isinstance(location, HtmlXPathSelector) else location else: continue for selector in selectors: links.extend(self.extract_from_selector(selector, response.encoding)) seen, ret = set(), [] for link in links: link.url = urljoin(base_url, link.url) if self.unique: if link.url in seen: continue else: seen.add(link.url) if self.canonicalize: link.url = canonicalize_url(link.url) ret.append(link) return ret def matches(self, url): return False Scrapy-0.14.4/scrapy/contrib/linkextractors/lxmlhtml.py0000600000016101777760000000221111754531743023367 0ustar buildbotnogroup""" Link extractor based on lxml.html """ import lxml.html from scrapy.link import Link from scrapy.utils.python import unique as unique_list class LxmlParserLinkExtractor(object): def __init__(self, tag="a", attr="href", process=None, unique=False): self.scan_tag = tag if callable(tag) else lambda t: t == tag self.scan_attr = attr if callable(attr) else lambda a: a == attr self.process_attr = process if callable(process) else lambda v: v self.unique = unique self.links = [] def _extract_links(self, response_text, response_url): html = lxml.html.fromstring(response_text) html.make_links_absolute(response_url) for e, a, l, p in html.iterlinks(): if self.scan_tag(e.tag): if self.scan_attr(a): link = Link(self.process_attr(l), text=e.text) self.links.append(link) links = unique_list(self.links, key=lambda link: link.url) \ if self.unique else self.links return links def extract_links(self, response): return self._extract_links(response.body, response.url) Scrapy-0.14.4/scrapy/contrib/linkextractors/sgml.py0000600000016101777760000001441511754531743022501 0ustar buildbotnogroup""" SGMLParser-based Link extractors """ import re from urlparse import urlparse, urljoin from w3lib.url import safe_url_string from scrapy.selector import HtmlXPathSelector from scrapy.link import Link from scrapy.linkextractor import IGNORED_EXTENSIONS from scrapy.utils.misc import arg_to_iter from scrapy.utils.python import FixedSGMLParser, unique as unique_list, str_to_unicode from scrapy.utils.url import canonicalize_url, url_is_from_any_domain, url_has_any_extension from scrapy.utils.response import get_base_url class BaseSgmlLinkExtractor(FixedSGMLParser): def __init__(self, tag="a", attr="href", unique=False, process_value=None): FixedSGMLParser.__init__(self) self.scan_tag = tag if callable(tag) else lambda t: t == tag self.scan_attr = attr if callable(attr) else lambda a: a == attr self.process_value = (lambda v: v) if process_value is None else process_value self.current_link = None self.unique = unique def _extract_links(self, response_text, response_url, response_encoding, base_url=None): """ Do the real extraction work """ self.reset() self.feed(response_text) self.close() ret = [] if base_url is None: base_url = urljoin(response_url, self.base_url) if self.base_url else response_url for link in self.links: if isinstance(link.url, unicode): link.url = link.url.encode(response_encoding) link.url = urljoin(base_url, link.url) link.url = safe_url_string(link.url, response_encoding) link.text = str_to_unicode(link.text, response_encoding, errors='replace') ret.append(link) return ret def _process_links(self, links): """ Normalize and filter extracted links The subclass should override it if neccessary """ links = unique_list(links, key=lambda link: link.url) if self.unique else links return links def extract_links(self, response): # wrapper needed to allow to work directly with text links = self._extract_links(response.body, response.url, response.encoding) links = self._process_links(links) return links def reset(self): FixedSGMLParser.reset(self) self.links = [] self.base_url = None def unknown_starttag(self, tag, attrs): if tag == 'base': self.base_url = dict(attrs).get('href') if self.scan_tag(tag): for attr, value in attrs: if self.scan_attr(attr): url = self.process_value(value) if url is not None: link = Link(url=url) self.links.append(link) self.current_link = link def unknown_endtag(self, tag): self.current_link = None def handle_data(self, data): if self.current_link: self.current_link.text = self.current_link.text + data.strip() def matches(self, url): """This extractor matches with any url, since it doesn't contain any patterns""" return True _re_type = type(re.compile("", 0)) _matches = lambda url, regexs: any((r.search(url) for r in regexs)) _is_valid_url = lambda url: url.split('://', 1)[0] in set(['http', 'https', 'file']) class SgmlLinkExtractor(BaseSgmlLinkExtractor): def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(), tags=('a', 'area'), attrs=('href'), canonicalize=True, unique=True, process_value=None, deny_extensions=None): self.allow_res = [x if isinstance(x, _re_type) else re.compile(x) for x in arg_to_iter(allow)] self.deny_res = [x if isinstance(x, _re_type) else re.compile(x) for x in arg_to_iter(deny)] self.allow_domains = set(arg_to_iter(allow_domains)) self.deny_domains = set(arg_to_iter(deny_domains)) self.restrict_xpaths = tuple(arg_to_iter(restrict_xpaths)) self.canonicalize = canonicalize if deny_extensions is None: deny_extensions = IGNORED_EXTENSIONS self.deny_extensions = set(['.' + e for e in deny_extensions]) tag_func = lambda x: x in tags attr_func = lambda x: x in attrs BaseSgmlLinkExtractor.__init__(self, tag=tag_func, attr=attr_func, unique=unique, process_value=process_value) def extract_links(self, response): base_url = None if self.restrict_xpaths: hxs = HtmlXPathSelector(response) html = ''.join(''.join(html_fragm for html_fragm in hxs.select(xpath_expr).extract()) \ for xpath_expr in self.restrict_xpaths) base_url = get_base_url(response) else: html = response.body links = self._extract_links(html, response.url, response.encoding, base_url) links = self._process_links(links) return links def _process_links(self, links): links = [x for x in links if self._link_allowed(x)] links = BaseSgmlLinkExtractor._process_links(self, links) return links def _link_allowed(self, link): parsed_url = urlparse(link.url) allowed = _is_valid_url(link.url) if self.allow_res: allowed &= _matches(link.url, self.allow_res) if self.deny_res: allowed &= not _matches(link.url, self.deny_res) if self.allow_domains: allowed &= url_is_from_any_domain(parsed_url, self.allow_domains) if self.deny_domains: allowed &= not url_is_from_any_domain(parsed_url, self.deny_domains) if self.deny_extensions: allowed &= not url_has_any_extension(parsed_url, self.deny_extensions) if allowed and self.canonicalize: link.url = canonicalize_url(parsed_url) return allowed def matches(self, url): if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains): return False if self.deny_domains and url_is_from_any_domain(url, self.deny_domains): return False allowed = [regex.search(url) for regex in self.allow_res] if self.allow_res else [True] denied = [regex.search(url) for regex in self.deny_res] if self.deny_res else [] return any(allowed) and not any(denied) Scrapy-0.14.4/scrapy/contrib/linkextractors/__init__.py0000600000016101777760000000022211754531743023265 0ustar buildbotnogroup""" scrapy.contrib.linkextractors This package contains a collection of Link Extractors. For more info see docs/topics/link-extractors.rst """ Scrapy-0.14.4/scrapy/contrib/linkextractors/htmlparser.py0000600000016101777760000000461711754531743023723 0ustar buildbotnogroup""" HTMLParser-based link extractor """ from HTMLParser import HTMLParser from urlparse import urljoin from w3lib.url import safe_url_string from scrapy.link import Link from scrapy.utils.python import unique as unique_list class HtmlParserLinkExtractor(HTMLParser): def __init__(self, tag="a", attr="href", process=None, unique=False): HTMLParser.__init__(self) self.scan_tag = tag if callable(tag) else lambda t: t == tag self.scan_attr = attr if callable(attr) else lambda a: a == attr self.process_attr = process if callable(process) else lambda v: v self.unique = unique def _extract_links(self, response_text, response_url, response_encoding): self.reset() self.feed(response_text) self.close() links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links ret = [] base_url = urljoin(response_url, self.base_url) if self.base_url else response_url for link in links: if isinstance(link.url, unicode): link.url = link.url.encode(response_encoding) link.url = urljoin(base_url, link.url) link.url = safe_url_string(link.url, response_encoding) link.text = link.text.decode(response_encoding) ret.append(link) return ret def extract_links(self, response): # wrapper needed to allow to work directly with text return self._extract_links(response.body, response.url, response.encoding) def reset(self): HTMLParser.reset(self) self.base_url = None self.current_link = None self.links = [] def handle_starttag(self, tag, attrs): if tag == 'base': self.base_url = dict(attrs).get('href') if self.scan_tag(tag): for attr, value in attrs: if self.scan_attr(attr): url = self.process_attr(value) link = Link(url=url) self.links.append(link) self.current_link = link def handle_endtag(self, tag): self.current_link = None def handle_data(self, data): if self.current_link and not self.current_link.text: self.current_link.text = data.strip() def matches(self, url): """This extractor matches with any url, since it doesn't contain any patterns""" return True Scrapy-0.14.4/scrapy/contrib/linkextractors/regex.py0000600000016101777760000000222011754531743022640 0ustar buildbotnogroupimport re from urlparse import urljoin from w3lib.html import remove_tags, remove_entities, replace_escape_chars from scrapy.link import Link from .sgml import SgmlLinkExtractor linkre = re.compile( "|\s.*?>)(.*?)<[/ ]?a>", re.DOTALL | re.IGNORECASE) def clean_link(link_text): """Remove leading and trailing whitespace and punctuation""" return link_text.strip("\t\r\n '\"") class RegexLinkExtractor(SgmlLinkExtractor): """High performant link extractor""" def _extract_links(self, response_text, response_url, response_encoding, base_url=None): if base_url is None: base_url = urljoin(response_url, self.base_url) if self.base_url else response_url clean_url = lambda u: urljoin(base_url, remove_entities(clean_link(u.decode(response_encoding)))) clean_text = lambda t: replace_escape_chars(remove_tags(t.decode(response_encoding))).strip() links_text = linkre.findall(response_text) urlstext = set([(clean_url(url), clean_text(text)) for url, _, text in links_text]) return [Link(url, text) for url, text in urlstext] Scrapy-0.14.4/scrapy/contrib/loader/0000700000016101777760000000000011754532077017351 5ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib/loader/__init__.py0000600000016101777760000001201511754531743021462 0ustar buildbotnogroup""" Item Loader See documentation in docs/topics/loaders.rst """ from collections import defaultdict import re from scrapy.item import Item from scrapy.selector import HtmlXPathSelector from scrapy.utils.misc import arg_to_iter, extract_regex from scrapy.utils.python import flatten from .common import wrap_loader_context from .processor import Identity class ItemLoader(object): default_item_class = Item default_input_processor = Identity() default_output_processor = Identity() def __init__(self, item=None, **context): if item is None: item = self.default_item_class() self.item = context['item'] = item self.context = context self._values = defaultdict(list) def add_value(self, field_name, value, *processors, **kw): value = self.get_value(value, *processors, **kw) if value is None: return if not field_name: for k,v in value.iteritems(): self._add_value(k, v) else: self._add_value(field_name, value) def replace_value(self, field_name, value, *processors, **kw): value = self.get_value(value, *processors, **kw) if value is None: return if not field_name: for k,v in value.iteritems(): self._replace_value(k, v) else: self._replace_value(field_name, value) def _add_value(self, field_name, value): value = arg_to_iter(value) processed_value = self._process_input_value(field_name, value) if processed_value: self._values[field_name] += arg_to_iter(processed_value) def _replace_value(self, field_name, value): self._values.pop(field_name, None) self._add_value(field_name, value) def get_value(self, value, *processors, **kw): regex = kw.get('re', None) if regex: value = arg_to_iter(value) value = flatten([extract_regex(regex, x) for x in value]) for proc in processors: if value is None: break proc = wrap_loader_context(proc, self.context) value = proc(value) return value def load_item(self): item = self.item for field_name in self._values: item[field_name] = self.get_output_value(field_name) return item def get_output_value(self, field_name): proc = self.get_output_processor(field_name) proc = wrap_loader_context(proc, self.context) try: return proc(self._values[field_name]) except Exception, e: raise ValueError("Error with output processor: field=%r value=%r error='%s: %s'" % \ (field_name, self._values[field_name], type(e).__name__, str(e))) def get_collected_values(self, field_name): return self._values[field_name] def get_input_processor(self, field_name): proc = getattr(self, '%s_in' % field_name, None) if not proc: proc = self._get_item_field_attr(field_name, 'input_processor', \ self.default_input_processor) return proc def get_output_processor(self, field_name): proc = getattr(self, '%s_out' % field_name, None) if not proc: proc = self._get_item_field_attr(field_name, 'output_processor', \ self.default_output_processor) return proc def _process_input_value(self, field_name, value): proc = self.get_input_processor(field_name) proc = wrap_loader_context(proc, self.context) return proc(value) def _get_item_field_attr(self, field_name, key, default=None): if isinstance(self.item, Item): value = self.item.fields[field_name].get(key, default) else: value = default return value class XPathItemLoader(ItemLoader): default_selector_class = HtmlXPathSelector def __init__(self, item=None, selector=None, response=None, **context): if selector is None and response is None: raise RuntimeError("%s must be instantiated with a selector " \ "or response" % self.__class__.__name__) if selector is None: selector = self.default_selector_class(response) self.selector = selector context.update(selector=selector, response=response) super(XPathItemLoader, self).__init__(item, **context) def add_xpath(self, field_name, xpath, *processors, **kw): values = self._get_values(xpath, **kw) self.add_value(field_name, values, *processors, **kw) def replace_xpath(self, field_name, xpath, *processors, **kw): values = self._get_values(xpath, **kw) self.replace_value(field_name, values, *processors, **kw) def get_xpath(self, xpath, *processors, **kw): values = self._get_values(xpath, **kw) return self.get_value(values, *processors, **kw) def _get_values(self, xpaths, **kw): xpaths = arg_to_iter(xpaths) return flatten([self.selector.select(xpath).extract() for xpath in xpaths]) Scrapy-0.14.4/scrapy/contrib/loader/processor.py0000600000016101777760000000407511754531743021751 0ustar buildbotnogroup""" This module provides some commonly used processors for Item Loaders. See documentation in docs/topics/loaders.rst """ from scrapy.utils.misc import arg_to_iter from scrapy.utils.datatypes import MergeDict from .common import wrap_loader_context class MapCompose(object): def __init__(self, *functions, **default_loader_context): self.functions = functions self.default_loader_context = default_loader_context def __call__(self, value, loader_context=None): values = arg_to_iter(value) if loader_context: context = MergeDict(loader_context, self.default_loader_context) else: context = self.default_loader_context wrapped_funcs = [wrap_loader_context(f, context) for f in self.functions] for func in wrapped_funcs: next_values = [] for v in values: next_values += arg_to_iter(func(v)) values = next_values return values class Compose(object): def __init__(self, *functions, **default_loader_context): self.functions = functions self.stop_on_none = default_loader_context.get('stop_on_none', True) self.default_loader_context = default_loader_context def __call__(self, value, loader_context=None): if loader_context: context = MergeDict(loader_context, self.default_loader_context) else: context = self.default_loader_context wrapped_funcs = [wrap_loader_context(f, context) for f in self.functions] for func in wrapped_funcs: if value is None and self.stop_on_none: break value = func(value) return value class TakeFirst(object): def __call__(self, values): for value in values: if value: return value class Identity(object): def __call__(self, values): return values class Join(object): def __init__(self, separator=u' '): self.separator = separator def __call__(self, values): return self.separator.join(values) Scrapy-0.14.4/scrapy/contrib/loader/common.py0000600000016101777760000000072311754531743021216 0ustar buildbotnogroup"""Common functions used in Item Loaders code""" from functools import partial from scrapy.utils.python import get_func_args def wrap_loader_context(function, context): """Wrap functions that receive loader_context to contain the context "pre-loaded" and expose a interface that receives only one argument """ if 'loader_context' in get_func_args(function): return partial(function, loader_context=context) else: return function Scrapy-0.14.4/scrapy/contrib/corestats.py0000600000016101777760000000235311754531743020470 0ustar buildbotnogroup""" Extension for collecting core stats like items scraped and start/finish times """ import datetime from scrapy.xlib.pydispatch import dispatcher from scrapy import signals from scrapy.stats import stats class CoreStats(object): def __init__(self): dispatcher.connect(self.stats_spider_opened, signal=signals.stats_spider_opened) dispatcher.connect(self.stats_spider_closing, signal=signals.stats_spider_closing) dispatcher.connect(self.item_scraped, signal=signals.item_scraped) dispatcher.connect(self.item_dropped, signal=signals.item_dropped) def stats_spider_opened(self, spider): stats.set_value('start_time', datetime.datetime.utcnow(), spider=spider) def stats_spider_closing(self, spider, reason): stats.set_value('finish_time', datetime.datetime.utcnow(), spider=spider) stats.set_value('finish_reason', reason, spider=spider) def item_scraped(self, item, spider): stats.inc_value('item_scraped_count', spider=spider) def item_dropped(self, item, spider, exception): reason = exception.__class__.__name__ stats.inc_value('item_dropped_count', spider=spider) stats.inc_value('item_dropped_reasons_count/%s' % reason, spider=spider) Scrapy-0.14.4/scrapy/contrib/spiders/0000700000016101777760000000000011754532077017554 5ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib/spiders/init.py0000600000016101777760000000331411754531743021073 0ustar buildbotnogroupfrom scrapy.spider import BaseSpider class InitSpider(BaseSpider): """Base Spider with initialization facilities""" def __init__(self, *a, **kw): super(InitSpider, self).__init__(*a, **kw) self._postinit_reqs = [] self._init_complete = False self._init_started = False def make_requests_from_url(self, url): req = super(InitSpider, self).make_requests_from_url(url) if self._init_complete: return req self._postinit_reqs.append(req) if not self._init_started: self._init_started = True return self.init_request() def initialized(self, response=None): """This method must be set as the callback of your last initialization request. See self.init_request() docstring for more info. """ self._init_complete = True reqs = self._postinit_reqs[:] del self._postinit_reqs return reqs def init_request(self): """This function should return one initialization request, with the self.initialized method as callback. When the self.initialized method is called this spider is considered initialized. If you need to perform several requests for initializing your spider, you can do so by using different callbacks. The only requirement is that the final callback (of the last initialization request) must be self.initialized. The default implementation calls self.initialized immediately, and means that no initialization is needed. This method should be overridden only when you need to perform requests to initialize your spider """ return self.initialized() Scrapy-0.14.4/scrapy/contrib/spiders/feed.py0000600000016101777760000001302311754531743021031 0ustar buildbotnogroup""" This module implements the XMLFeedSpider which is the recommended spider to use for scraping from an XML feed. See documentation in docs/topics/spiders.rst """ from scrapy.spider import BaseSpider from scrapy.item import BaseItem from scrapy.http import Request from scrapy.utils.iterators import xmliter, csviter from scrapy.selector import XmlXPathSelector, HtmlXPathSelector from scrapy.exceptions import NotConfigured, NotSupported class XMLFeedSpider(BaseSpider): """ This class intends to be the base class for spiders that scrape from XML feeds. You can choose whether to parse the file using the 'iternodes' iterator, an 'xml' selector, or an 'html' selector. In most cases, it's convenient to use iternodes, since it's a faster and cleaner. """ iterator = 'iternodes' itertag = 'item' namespaces = () def process_results(self, response, results): """This overridable method is called for each result (item or request) returned by the spider, and it's intended to perform any last time processing required before returning the results to the framework core, for example setting the item GUIDs. It receives a list of results and the response which originated that results. It must return a list of results (Items or Requests). """ return results def adapt_response(self, response): """You can override this function in order to make any changes you want to into the feed before parsing it. This function must return a response. """ return response def parse_node(self, response, selector): """This method must be overriden with your custom spider functionality""" if hasattr(self, 'parse_item'): # backward compatibility return self.parse_item(response, selector) raise NotImplementedError def parse_nodes(self, response, nodes): """This method is called for the nodes matching the provided tag name (itertag). Receives the response and an XPathSelector for each node. Overriding this method is mandatory. Otherwise, you spider won't work. This method must return either a BaseItem, a Request, or a list containing any of them. """ for selector in nodes: ret = self.parse_node(response, selector) if isinstance(ret, (BaseItem, Request)): ret = [ret] if not isinstance(ret, (list, tuple)): raise TypeError('You cannot return an "%s" object from a spider' % type(ret).__name__) for result_item in self.process_results(response, ret): yield result_item def parse(self, response): if not hasattr(self, 'parse_node'): raise NotConfigured('You must define parse_node method in order to scrape this XML feed') response = self.adapt_response(response) if self.iterator == 'iternodes': nodes = xmliter(response, self.itertag) elif self.iterator == 'xml': selector = XmlXPathSelector(response) self._register_namespaces(selector) nodes = selector.select('//%s' % self.itertag) elif self.iterator == 'html': selector = HtmlXPathSelector(response) self._register_namespaces(selector) nodes = selector.select('//%s' % self.itertag) else: raise NotSupported('Unsupported node iterator') return self.parse_nodes(response, nodes) def _register_namespaces(self, selector): for (prefix, uri) in self.namespaces: selector.register_namespace(prefix, uri) class CSVFeedSpider(BaseSpider): """Spider for parsing CSV feeds. It receives a CSV file in a response; iterates through each of its rows, and calls parse_row with a dict containing each field's data. You can set some options regarding the CSV file, such as the delimiter and the file's headers. """ delimiter = None # When this is None, python's csv module's default delimiter is used headers = None def process_results(self, response, results): """This method has the same purpose as the one in XMLFeedSpider""" return results def adapt_response(self, response): """This method has the same purpose as the one in XMLFeedSpider""" return response def parse_row(self, response, row): """This method must be overriden with your custom spider functionality""" raise NotImplementedError def parse_rows(self, response): """Receives a response and a dict (representing each row) with a key for each provided (or detected) header of the CSV file. This spider also gives the opportunity to override adapt_response and process_results methods for pre and post-processing purposes. """ for row in csviter(response, self.delimiter, self.headers): ret = self.parse_row(response, row) if isinstance(ret, (BaseItem, Request)): ret = [ret] if not isinstance(ret, (list, tuple)): raise TypeError('You cannot return an "%s" object from a spider' % type(ret).__name__) for result_item in self.process_results(response, ret): yield result_item def parse(self, response): if not hasattr(self, 'parse_row'): raise NotConfigured('You must define parse_row method in order to scrape this CSV feed') response = self.adapt_response(response) return self.parse_rows(response) Scrapy-0.14.4/scrapy/contrib/spiders/sitemap.py0000600000016101777760000000407711754531743021601 0ustar buildbotnogroupimport re from scrapy.spider import BaseSpider from scrapy.http import Request, XmlResponse from scrapy.utils.sitemap import Sitemap, sitemap_urls_from_robots from scrapy.utils.gz import gunzip from scrapy import log class SitemapSpider(BaseSpider): sitemap_urls = () sitemap_rules = [('', 'parse')] sitemap_follow = [''] def __init__(self, *a, **kw): super(SitemapSpider, self).__init__(*a, **kw) self._cbs = [] for r, c in self.sitemap_rules: if isinstance(c, basestring): c = getattr(self, c) self._cbs.append((regex(r), c)) self._follow = [regex(x) for x in self.sitemap_follow] def start_requests(self): return [Request(x, callback=self._parse_sitemap) for x in self.sitemap_urls] def _parse_sitemap(self, response): if response.url.endswith('/robots.txt'): for url in sitemap_urls_from_robots(response.body): yield Request(url, callback=self._parse_sitemap) else: if isinstance(response, XmlResponse): body = response.body elif is_gzipped(response): body = gunzip(response.body) else: log.msg("Ignoring non-XML sitemap: %s" % response, log.WARNING) return s = Sitemap(body) if s.type == 'sitemapindex': for loc in iterloc(s): if any(x.search(loc) for x in self._follow): yield Request(loc, callback=self._parse_sitemap) elif s.type == 'urlset': for loc in iterloc(s): for r, c in self._cbs: if r.search(loc): yield Request(loc, callback=c) break def is_gzipped(response): ctype = response.headers.get('Content-Type', '') return ctype in ('application/x-gzip', 'application/gzip') def regex(x): if isinstance(x, basestring): return re.compile(x) return x def iterloc(it): for d in it: yield d['loc'] Scrapy-0.14.4/scrapy/contrib/spiders/crawl.py0000600000016101777760000000600511754531743021240 0ustar buildbotnogroup""" This modules implements the CrawlSpider which is the recommended spider to use for scraping typical web sites that requires crawling pages. See documentation in docs/topics/spiders.rst """ import copy from scrapy.http import Request from scrapy.utils.spider import iterate_spider_output from scrapy.spider import BaseSpider from scrapy.conf import settings def identity(x): return x class Rule(object): def __init__(self, link_extractor, callback=None, cb_kwargs=None, follow=None, process_links=None, process_request=identity): self.link_extractor = link_extractor self.callback = callback self.cb_kwargs = cb_kwargs or {} self.process_links = process_links self.process_request = process_request if follow is None: self.follow = False if callback else True else: self.follow = follow class CrawlSpider(BaseSpider): rules = () def __init__(self, *a, **kw): super(CrawlSpider, self).__init__(*a, **kw) self._compile_rules() def parse(self, response): return self._parse_response(response, self.parse_start_url, cb_kwargs={}, follow=True) def parse_start_url(self, response): return [] def process_results(self, response, results): return results def _requests_to_follow(self, response): seen = set() for n, rule in enumerate(self._rules): links = [l for l in rule.link_extractor.extract_links(response) if l not in seen] if links and rule.process_links: links = rule.process_links(links) seen = seen.union(links) for link in links: r = Request(url=link.url, callback=self._response_downloaded) r.meta.update(rule=n, link_text=link.text) yield rule.process_request(r) def _response_downloaded(self, response): rule = self._rules[response.meta['rule']] return self._parse_response(response, rule.callback, rule.cb_kwargs, rule.follow) def _parse_response(self, response, callback, cb_kwargs, follow=True): if callback: cb_res = callback(response, **cb_kwargs) or () cb_res = self.process_results(response, cb_res) for requests_or_item in iterate_spider_output(cb_res): yield requests_or_item if follow and settings.getbool('CRAWLSPIDER_FOLLOW_LINKS', True): for request_or_item in self._requests_to_follow(response): yield request_or_item def _compile_rules(self): def get_method(method): if callable(method): return method elif isinstance(method, basestring): return getattr(self, method, None) self._rules = [copy.copy(r) for r in self.rules] for rule in self._rules: rule.callback = get_method(rule.callback) rule.process_links = get_method(rule.process_links) rule.process_request = get_method(rule.process_request) Scrapy-0.14.4/scrapy/contrib/spiders/__init__.py0000600000016101777760000000027111754531743021666 0ustar buildbotnogroupfrom scrapy.contrib.spiders.crawl import CrawlSpider, Rule from scrapy.contrib.spiders.feed import XMLFeedSpider, CSVFeedSpider from scrapy.contrib.spiders.sitemap import SitemapSpider Scrapy-0.14.4/scrapy/contrib/statsmailer.py0000600000016101777760000000203411754531743021005 0ustar buildbotnogroup""" StatsMailer extension sends an email when a spider finishes scraping. Use STATSMAILER_RCPTS setting to enable and give the recipient mail address """ from scrapy.xlib.pydispatch import dispatcher from scrapy.stats import stats from scrapy import signals from scrapy.mail import MailSender from scrapy.conf import settings from scrapy.exceptions import NotConfigured class StatsMailer(object): def __init__(self): self.recipients = settings.getlist("STATSMAILER_RCPTS") if not self.recipients: raise NotConfigured dispatcher.connect(self.stats_spider_closed, signal=signals.stats_spider_closed) def stats_spider_closed(self, spider, spider_stats): mail = MailSender() body = "Global stats\n\n" body += "\n".join("%-50s : %s" % i for i in stats.get_stats().items()) body += "\n\n%s stats\n\n" % spider.name body += "\n".join("%-50s : %s" % i for i in spider_stats.items()) mail.send(self.recipients, "Scrapy stats for: %s" % spider.name, body) Scrapy-0.14.4/scrapy/contrib/spidermiddleware/0000700000016101777760000000000011754532077021427 5ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib/spidermiddleware/httperror.py0000600000016101777760000000207511754531743024037 0ustar buildbotnogroup""" HttpError Spider Middleware See documentation in docs/topics/spider-middleware.rst """ from scrapy.exceptions import IgnoreRequest class HttpError(IgnoreRequest): """A non-200 response was filtered""" def __init__(self, response, *args, **kwargs): self.response = response super(HttpError, self).__init__(*args, **kwargs) class HttpErrorMiddleware(object): def process_spider_input(self, response, spider): if 200 <= response.status < 300: # common case return meta = response.request.meta if 'handle_httpstatus_all' in meta: return if 'handle_httpstatus_list' in meta: allowed_statuses = meta['handle_httpstatus_list'] else: allowed_statuses = getattr(spider, 'handle_httpstatus_list', ()) if response.status in allowed_statuses: return raise HttpError(response, 'Ignoring non-200 response') def process_spider_exception(self, response, exception, spider): if isinstance(exception, HttpError): return [] Scrapy-0.14.4/scrapy/contrib/spidermiddleware/referer.py0000600000016101777760000000067011754531743023437 0ustar buildbotnogroup""" RefererMiddleware: populates Request referer field, based on the Response which originated it. """ from scrapy.http import Request class RefererMiddleware(object): def process_spider_output(self, response, result, spider): def _set_referer(r): if isinstance(r, Request): r.headers.setdefault('Referer', response.url) return r return (_set_referer(r) for r in result or ()) Scrapy-0.14.4/scrapy/contrib/spidermiddleware/offsite.py0000600000016101777760000000410211754531743023436 0ustar buildbotnogroup""" Offsite Spider Middleware See documentation in docs/topics/spider-middleware.rst """ import re from scrapy.xlib.pydispatch import dispatcher from scrapy import signals from scrapy.http import Request from scrapy.utils.httpobj import urlparse_cached from scrapy import log class OffsiteMiddleware(object): def __init__(self): self.host_regexes = {} self.domains_seen = {} dispatcher.connect(self.spider_opened, signal=signals.spider_opened) dispatcher.connect(self.spider_closed, signal=signals.spider_closed) def process_spider_output(self, response, result, spider): for x in result: if isinstance(x, Request): if x.dont_filter or self.should_follow(x, spider): yield x else: domain = urlparse_cached(x).hostname if domain and domain not in self.domains_seen[spider]: log.msg("Filtered offsite request to %r: %s" % (domain, x), level=log.DEBUG, spider=spider) self.domains_seen[spider].add(domain) else: yield x def should_follow(self, request, spider): regex = self.host_regexes[spider] # hostanme can be None for wrong urls (like javascript links) host = urlparse_cached(request).hostname or '' return bool(regex.search(host)) def get_host_regex(self, spider): """Override this method to implement a different offsite policy""" allowed_domains = getattr(spider, 'allowed_domains', None) if not allowed_domains: return re.compile('') # allow all by default domains = [d.replace('.', r'\.') for d in allowed_domains] regex = r'^(.*\.)?(%s)$' % '|'.join(domains) return re.compile(regex) def spider_opened(self, spider): self.host_regexes[spider] = self.get_host_regex(spider) self.domains_seen[spider] = set() def spider_closed(self, spider): del self.host_regexes[spider] del self.domains_seen[spider] Scrapy-0.14.4/scrapy/contrib/spidermiddleware/urllength.py0000600000016101777760000000167511754531743024017 0ustar buildbotnogroup""" Url Length Spider Middleware See documentation in docs/topics/spider-middleware.rst """ from scrapy import log from scrapy.http import Request from scrapy.exceptions import NotConfigured class UrlLengthMiddleware(object): def __init__(self, maxlength): self.maxlength = maxlength @classmethod def from_settings(cls, settings): maxlength = settings.getint('URLLENGTH_LIMIT') if not maxlength: raise NotConfigured return cls(maxlength) def process_spider_output(self, response, result, spider): def _filter(request): if isinstance(request, Request) and len(request.url) > self.maxlength: log.msg("Ignoring link (url length > %d): %s " % (self.maxlength, request.url), \ level=log.DEBUG, spider=spider) return False else: return True return (r for r in result or () if _filter(r)) Scrapy-0.14.4/scrapy/contrib/spidermiddleware/depth.py0000600000016101777760000000376411754531743023120 0ustar buildbotnogroup""" Depth Spider Middleware See documentation in docs/topics/spider-middleware.rst """ import warnings from scrapy import log from scrapy.http import Request from scrapy.exceptions import ScrapyDeprecationWarning class DepthMiddleware(object): def __init__(self, maxdepth, stats=None, verbose_stats=False, prio=1): self.maxdepth = maxdepth self.stats = stats self.verbose_stats = verbose_stats self.prio = prio @classmethod def from_settings(cls, settings): maxdepth = settings.getint('DEPTH_LIMIT') usestats = settings.getbool('DEPTH_STATS') verbose = settings.getbool('DEPTH_STATS_VERBOSE') prio = settings.getint('DEPTH_PRIORITY') if usestats: from scrapy.stats import stats else: stats = None return cls(maxdepth, stats, verbose, prio) def process_spider_output(self, response, result, spider): def _filter(request): if isinstance(request, Request): depth = response.request.meta['depth'] + 1 request.meta['depth'] = depth if self.prio: request.priority -= depth * self.prio if self.maxdepth and depth > self.maxdepth: log.msg("Ignoring link (depth > %d): %s " % (self.maxdepth, request.url), \ level=log.DEBUG, spider=spider) return False elif self.stats: if self.verbose_stats: self.stats.inc_value('request_depth_count/%s' % depth, spider=spider) self.stats.max_value('request_depth_max', depth, spider=spider) return True # base case (depth=0) if self.stats and 'depth' not in response.request.meta: response.request.meta['depth'] = 0 if self.verbose_stats: self.stats.inc_value('request_depth_count/0', spider=spider) return (r for r in result or () if _filter(r)) Scrapy-0.14.4/scrapy/contrib/spidermiddleware/__init__.py0000600000016101777760000000000011754531743023527 0ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib/throttle.py0000600000016101777760000001276511754531743020336 0ustar buildbotnogroupfrom scrapy.xlib.pydispatch import dispatcher from scrapy.exceptions import NotConfigured from scrapy import signals from scrapy.utils.httpobj import urlparse_cached from scrapy.resolver import dnscache class AutoThrottle(object): """ ============ AutoThrottle ============ This is an extension for automatically throttling crawling speed based on load. Design goals ============ 1. be nicer to sites instead of using default download delay of zero 2. automatically adjust scrapy to the optimum crawling speed, so the user doesn't have to tune the download delays and concurrent requests to find the optimum one. the user only needs to specify the maximum concurrent requests it allows, and the extension does the rest. Download latencies ================== In Scrapy, the download latency is the (real) time elapsed between establishing the TCP connection and receiving the HTTP headers. Note that these latencies are very hard to measure accurately in a cooperative multitasking environment because Scrapy may be busy processing a spider callback, for example, and unable to attend downloads. However, the latencies should give a reasonable estimate of how busy Scrapy (and ultimately, the server) is. This extension builds on that premise. Throttling rules ================ This adjusts download delays and concurrency based on the following rules: 1. spiders always start with one concurrent request and a download delay of START_DELAY 2. when a response is received, the download delay is adjusted to the average of previous download delay and the latency of the response. 3. after CONCURRENCY_CHECK_PERIOD responses have passed, the average latency of this period is checked against the previous one and: 3.1. if the latency remained constant (within standard deviation limits) and the concurrency is lower than MAX_CONCURRENCY, the concurrency is increased 3.2. if the latency has increased (beyond standard deviation limits) and the concurrency is higher than 1, the concurrency is decreased """ def __init__(self, crawler): settings = crawler.settings if not settings.getbool('AUTOTHROTTLE_ENABLED'): raise NotConfigured self.crawler = crawler dispatcher.connect(self.spider_opened, signal=signals.spider_opened) dispatcher.connect(self.response_received, signal=signals.response_received) self.START_DELAY = settings.getfloat("AUTOTHROTTLE_START_DELAY", 5.0) self.CONCURRENCY_CHECK_PERIOD = settings.getint("AUTOTHROTTLE_CONCURRENCY_CHECK_PERIOD", 10) self.MAX_CONCURRENCY = settings.getint("AUTOTHROTTLE_MAX_CONCURRENCY", 8) self.DEBUG = settings.getint("AUTOTHROTTLE_DEBUG", False) @classmethod def from_crawler(cls, crawler): return cls(crawler) def spider_opened(self, spider): spider.download_delay = self.START_DELAY if hasattr(spider, "max_concurrent_requests"): self.MAX_CONCURRENCY = spider.max_concurrent_requests # override in order to avoid to initialize slot with concurrency > 1 spider.max_concurrent_requests = 1 self.last_latencies = [self.START_DELAY] self.last_lat = self.START_DELAY, 0.0 def response_received(self, response, spider): slot = self._get_slot(response.request) latency = response.meta.get('download_latency') if not latency or not slot: return self._adjust_delay(slot, latency, response) self._check_concurrency(slot, latency) if self.DEBUG: spider.log("conc:%2d | delay:%5d ms | latency:%5d ms | size:%6d bytes" % \ (slot.concurrency, slot.delay*1000, \ latency*1000, len(response.body))) def _get_slot(self, request): downloader = self.crawler.engine.downloader key = urlparse_cached(request).hostname or '' if downloader.ip_concurrency: key = dnscache.get(key, key) return downloader.slots.get(key) def _check_concurrency(self, slot, latency): latencies = self.last_latencies latencies.append(latency) if len(latencies) == self.CONCURRENCY_CHECK_PERIOD: curavg, curdev = avg_stdev(latencies) preavg, predev = self.last_lat self.last_lat = curavg, curdev del latencies[:] if curavg > preavg + predev: if slot.concurrency > 1: slot.concurrency -= 1 elif slot.concurrency < self.MAX_CONCURRENCY: slot.concurrency += 1 def _adjust_delay(self, slot, latency, response): """Define delay adjustment policy""" # if latency is bigger than old delay, then use latency instead of mean. Works better with problematic sites new_delay = (slot.delay + latency) / 2.0 if latency < slot.delay else latency # dont adjust delay if response status != 200 and new delay is smaller than old one, # as error pages (and redirections) are usually small and so tend to reduce latency, thus provoking a positive feedback # by reducing delay instead of increase. if response.status == 200 or new_delay > slot.delay: slot.delay = new_delay def avg_stdev(lst): """Return average and standard deviation of the given list""" avg = sum(lst)/len(lst) sdsq = sum((x-avg) ** 2 for x in lst) stdev = (sdsq / (len(lst) -1)) ** 0.5 return avg, stdev Scrapy-0.14.4/scrapy/contrib/closespider.py0000600000016101777760000000577111754531743021004 0ustar buildbotnogroup"""CloseSpider is an extension that forces spiders to be closed after certain conditions are met. See documentation in docs/topics/extensions.rst """ import warnings from collections import defaultdict from twisted.internet import reactor from twisted.python import log as txlog from scrapy.xlib.pydispatch import dispatcher from scrapy import signals, log from scrapy.exceptions import ScrapyDeprecationWarning from scrapy.conf import settings class CloseSpider(object): def __init__(self, crawler): self.crawler = crawler self.timeout = settings.getint('CLOSESPIDER_TIMEOUT') self.itemcount = settings.getint('CLOSESPIDER_ITEMCOUNT') # XXX: legacy support - remove for future releases if settings.getint('CLOSESPIDER_ITEMPASSED'): warnings.warn("CLOSESPIDER_ITEMPASSED setting is deprecated, use CLOSESPIDER_ITEMCOUNT instead", ScrapyDeprecationWarning) self.pagecount = settings.getint('CLOSESPIDER_ITEMPASSED') self.pagecount = settings.getint('CLOSESPIDER_PAGECOUNT') self.errorcount = settings.getint('CLOSESPIDER_ERRORCOUNT') self.errorcounts = defaultdict(int) self.pagecounts = defaultdict(int) self.counts = defaultdict(int) self.tasks = {} if self.errorcount: txlog.addObserver(self.catch_log) if self.pagecount: dispatcher.connect(self.page_count, signal=signals.response_received) if self.timeout: dispatcher.connect(self.spider_opened, signal=signals.spider_opened) if self.itemcount: dispatcher.connect(self.item_scraped, signal=signals.item_scraped) dispatcher.connect(self.spider_closed, signal=signals.spider_closed) @classmethod def from_crawler(cls, crawler): return cls(crawler) def catch_log(self, event): if event.get('logLevel') == log.ERROR: spider = event.get('spider') if spider: self.errorcounts[spider] += 1 if self.errorcounts[spider] == self.errorcount: self.crawler.engine.close_spider(spider, 'closespider_errorcount') def page_count(self, response, request, spider): self.pagecounts[spider] += 1 if self.pagecounts[spider] == self.pagecount: self.crawler.engine.close_spider(spider, 'closespider_pagecount') def spider_opened(self, spider): self.tasks[spider] = reactor.callLater(self.timeout, \ self.crawler.engine.close_spider, spider=spider, \ reason='closespider_timeout') def item_scraped(self, item, spider): self.counts[spider] += 1 if self.counts[spider] == self.itemcount: self.crawler.engine.close_spider(spider, 'closespider_itemcount') def spider_closed(self, spider): self.counts.pop(spider, None) self.pagecounts.pop(spider, None) self.errorcounts.pop(spider, None) tsk = self.tasks.pop(spider, None) if tsk and tsk.active(): tsk.cancel() Scrapy-0.14.4/scrapy/contrib/__init__.py0000600000016101777760000000000011754531743020203 0ustar buildbotnogroupScrapy-0.14.4/scrapy/contrib/spiderstate.py0000600000016101777760000000212111754531743021001 0ustar buildbotnogroupfrom __future__ import with_statement import os, cPickle as pickle from scrapy import signals from scrapy.exceptions import NotConfigured from scrapy.xlib.pydispatch import dispatcher class SpiderState(object): """Store and load spider state during a scraping job""" def __init__(self, jobdir=None): self.jobdir = jobdir @classmethod def from_crawler(cls, crawler): obj = cls(crawler.settings.get('JOBDIR')) dispatcher.connect(obj.spider_closed, signal=signals.spider_closed) dispatcher.connect(obj.spider_opened, signal=signals.spider_opened) return obj def spider_closed(self, spider): if self.jobdir: with open(self.statefn, 'wb') as f: pickle.dump(spider.state, f, protocol=2) def spider_opened(self, spider): if self.jobdir and os.path.exists(self.statefn): with open(self.statefn) as f: spider.state = pickle.load(f) else: spider.state = {} @property def statefn(self): return os.path.join(self.jobdir, 'spider.state') Scrapy-0.14.4/scrapy/contrib/memdebug.py0000600000016101777760000000251211754531743020243 0ustar buildbotnogroup""" MemoryDebugger extension See documentation in docs/topics/extensions.rst """ import gc from scrapy.xlib.pydispatch import dispatcher from scrapy import signals from scrapy.exceptions import NotConfigured from scrapy.conf import settings from scrapy.stats import stats from scrapy.utils.trackref import live_refs class MemoryDebugger(object): def __init__(self): try: import libxml2 self.libxml2 = libxml2 except ImportError: self.libxml2 = None if not settings.getbool('MEMDEBUG_ENABLED'): raise NotConfigured dispatcher.connect(self.engine_started, signals.engine_started) dispatcher.connect(self.engine_stopped, signals.engine_stopped) def engine_started(self): if self.libxml2: self.libxml2.debugMemory(1) def engine_stopped(self): if self.libxml2: self.libxml2.cleanupParser() stats.set_value('memdebug/libxml2_leaked_bytes', self.libxml2.debugMemory(1)) gc.collect() stats.set_value('memdebug/gc_garbage_count', len(gc.garbage)) if settings.getbool('TRACK_REFS'): for cls, wdict in live_refs.iteritems(): if not wdict: continue stats.set_value('memdebug/live_refs/%s' % cls.__name__, len(wdict)) Scrapy-0.14.4/scrapy/contrib/httpcache.py0000600000016101777760000000415411754531743020425 0ustar buildbotnogroupfrom __future__ import with_statement import os from time import time import cPickle as pickle from scrapy.http import Headers from scrapy.responsetypes import responsetypes from scrapy.utils.request import request_fingerprint from scrapy.utils.project import data_path from scrapy import conf class DbmCacheStorage(object): def __init__(self, settings=conf.settings): self.cachedir = data_path(settings['HTTPCACHE_DIR']) self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS') self.dbmodule = __import__(settings['HTTPCACHE_DBM_MODULE']) self.dbs = {} def open_spider(self, spider): dbpath = os.path.join(self.cachedir, '%s.db' % spider.name) self.dbs[spider] = self.dbmodule.open(dbpath, 'c') def close_spider(self, spider): self.dbs[spider].close() def retrieve_response(self, spider, request): data = self._read_data(spider, request) if data is None: return # not cached url = data['url'] status = data['status'] headers = Headers(data['headers']) body = data['body'] respcls = responsetypes.from_args(headers=headers, url=url) response = respcls(url=url, headers=headers, status=status, body=body) return response def store_response(self, spider, request, response): key = self._request_key(request) data = { 'status': response.status, 'url': response.url, 'headers': dict(response.headers), 'body': response.body, } self.dbs[spider]['%s_data' % key] = pickle.dumps(data, protocol=2) self.dbs[spider]['%s_time' % key] = str(time()) def _read_data(self, spider, request): key = self._request_key(request) db = self.dbs[spider] tkey = '%s_time' % key if not db.has_key(tkey): return # not found ts = db[tkey] if 0 < self.expiration_secs < time() - float(ts): return # expired return pickle.loads(db['%s_data' % key]) def _request_key(self, request): return request_fingerprint(request) Scrapy-0.14.4/scrapy/statscol.py0000600000016101777760000000573411754531743016663 0ustar buildbotnogroup""" Scrapy extension for collecting scraping stats """ import pprint from scrapy.xlib.pydispatch import dispatcher from scrapy.signals import stats_spider_opened, stats_spider_closing, \ stats_spider_closed from scrapy.utils.signal import send_catch_log from scrapy import signals from scrapy import log from scrapy.conf import settings class StatsCollector(object): def __init__(self): self._dump = settings.getbool('STATS_DUMP') self._stats = {None: {}} # None is for global stats def get_value(self, key, default=None, spider=None): return self._stats[spider].get(key, default) def get_stats(self, spider=None): return self._stats[spider] def set_value(self, key, value, spider=None): self._stats[spider][key] = value def set_stats(self, stats, spider=None): self._stats[spider] = stats def inc_value(self, key, count=1, start=0, spider=None): d = self._stats[spider] d[key] = d.setdefault(key, start) + count def max_value(self, key, value, spider=None): d = self._stats[spider] d[key] = max(d.setdefault(key, value), value) def min_value(self, key, value, spider=None): d = self._stats[spider] d[key] = min(d.setdefault(key, value), value) def clear_stats(self, spider=None): self._stats[spider].clear() def iter_spider_stats(self): return [x for x in self._stats.iteritems() if x[0]] def open_spider(self, spider): self._stats[spider] = {} send_catch_log(stats_spider_opened, spider=spider) def close_spider(self, spider, reason): send_catch_log(stats_spider_closing, spider=spider, reason=reason) stats = self._stats.pop(spider) send_catch_log(stats_spider_closed, spider=spider, reason=reason, \ spider_stats=stats) if self._dump: log.msg("Dumping spider stats:\n" + pprint.pformat(stats), \ spider=spider) self._persist_stats(stats, spider) def engine_stopped(self): stats = self.get_stats() if self._dump: log.msg("Dumping global stats:\n" + pprint.pformat(stats)) self._persist_stats(stats, spider=None) def _persist_stats(self, stats, spider=None): pass class MemoryStatsCollector(StatsCollector): def __init__(self): super(MemoryStatsCollector, self).__init__() self.spider_stats = {} def _persist_stats(self, stats, spider=None): if spider is not None: self.spider_stats[spider.name] = stats class DummyStatsCollector(StatsCollector): def get_value(self, key, default=None, spider=None): return default def set_value(self, key, value, spider=None): pass def set_stats(self, stats, spider=None): pass def inc_value(self, key, count=1, start=0, spider=None): pass def max_value(self, key, value, spider=None): pass def min_value(self, key, value, spider=None): pass Scrapy-0.14.4/scrapy/core/0000700000016101777760000000000011754532077015373 5ustar buildbotnogroupScrapy-0.14.4/scrapy/core/scheduler.py0000600000016101777760000000702511754531743017730 0ustar buildbotnogroupfrom __future__ import with_statement import os from os.path import join, exists from scrapy.utils.pqueue import PriorityQueue from scrapy.utils.reqser import request_to_dict, request_from_dict from scrapy.utils.misc import load_object from scrapy.utils.job import job_dir from scrapy.utils.py26 import json from scrapy.stats import stats from scrapy import log class Scheduler(object): def __init__(self, dupefilter, jobdir=None, dqclass=None, mqclass=None, logunser=False): self.df = dupefilter self.dqdir = self._dqdir(jobdir) self.dqclass = dqclass self.mqclass = mqclass self.logunser = logunser @classmethod def from_settings(cls, settings): dupefilter_cls = load_object(settings['DUPEFILTER_CLASS']) dupefilter = dupefilter_cls.from_settings(settings) dqclass = load_object(settings['SCHEDULER_DISK_QUEUE']) mqclass = load_object(settings['SCHEDULER_MEMORY_QUEUE']) logunser = settings.getbool('LOG_UNSERIALIZABLE_REQUESTS') return cls(dupefilter, job_dir(settings), dqclass, mqclass, logunser) def has_pending_requests(self): return len(self) > 0 def open(self, spider): self.spider = spider self.mqs = PriorityQueue(self._newmq) self.dqs = self._dq() if self.dqdir else None return self.df.open() def close(self, reason): if self.dqs: prios = self.dqs.close() with open(join(self.dqdir, 'active.json'), 'w') as f: json.dump(prios, f) return self.df.close(reason) def enqueue_request(self, request): if not request.dont_filter and self.df.request_seen(request): return if not self._dqpush(request): self._mqpush(request) def next_request(self): return self.mqs.pop() or self._dqpop() def __len__(self): return len(self.dqs) + len(self.mqs) if self.dqs else len(self.mqs) def _dqpush(self, request): if self.dqs is None: return try: reqd = request_to_dict(request, self.spider) self.dqs.push(reqd, -request.priority) except ValueError, e: # non serializable request if self.logunser: log.msg("Unable to serialize request: %s - reason: %s" % \ (request, str(e)), level=log.ERROR, spider=self.spider) return else: stats.inc_value('scheduler/disk_enqueued', spider=self.spider) return True def _mqpush(self, request): stats.inc_value('scheduler/memory_enqueued', spider=self.spider) self.mqs.push(request, -request.priority) def _dqpop(self): if self.dqs: d = self.dqs.pop() if d: return request_from_dict(d, self.spider) def _newmq(self, priority): return self.mqclass() def _newdq(self, priority): return self.dqclass(join(self.dqdir, 'p%s' % priority)) def _dq(self): activef = join(self.dqdir, 'active.json') if exists(activef): with open(activef) as f: prios = json.load(f) else: prios = () q = PriorityQueue(self._newdq, startprios=prios) if q: log.msg("Resuming crawl (%d requests scheduled)" % len(q), \ spider=self.spider) return q def _dqdir(self, jobdir): if jobdir: dqdir = join(jobdir, 'requests.queue') if not exists(dqdir): os.makedirs(dqdir) return dqdir Scrapy-0.14.4/scrapy/core/engine.py0000600000016101777760000002445011754531743017220 0ustar buildbotnogroup""" This is the Scrapy engine which controls the Scheduler, Downloader and Spiders. For more information see docs/topics/architecture.rst """ import warnings from time import time from twisted.internet import defer from twisted.python.failure import Failure from scrapy import log, signals from scrapy.stats import stats from scrapy.core.downloader import Downloader from scrapy.core.scraper import Scraper from scrapy.exceptions import DontCloseSpider, ScrapyDeprecationWarning from scrapy.http import Response, Request from scrapy.utils.misc import load_object from scrapy.utils.signal import send_catch_log, send_catch_log_deferred from scrapy.utils.reactor import CallLaterOnce class Slot(object): def __init__(self, start_requests, close_if_idle, nextcall, scheduler): self.closing = False self.inprogress = set() # requests in progress self.start_requests = iter(start_requests) self.close_if_idle = close_if_idle self.nextcall = nextcall self.scheduler = scheduler def add_request(self, request): self.inprogress.add(request) def remove_request(self, request): self.inprogress.remove(request) self._maybe_fire_closing() def close(self): self.closing = defer.Deferred() self._maybe_fire_closing() return self.closing def _maybe_fire_closing(self): if self.closing and not self.inprogress: if self.nextcall: self.nextcall.cancel() self.closing.callback(None) class ExecutionEngine(object): def __init__(self, crawler, spider_closed_callback): self.settings = crawler.settings self.slots = {} self.running = False self.paused = False self.scheduler_cls = load_object(self.settings['SCHEDULER']) self.downloader = Downloader(crawler) self.scraper = Scraper(crawler) self._concurrent_spiders = self.settings.getint('CONCURRENT_SPIDERS', 1) if self._concurrent_spiders != 1: warnings.warn("CONCURRENT_SPIDERS settings is deprecated, use " \ "Scrapyd max_proc config instead", ScrapyDeprecationWarning) self._spider_closed_callback = spider_closed_callback @defer.inlineCallbacks def start(self): """Start the execution engine""" assert not self.running, "Engine already running" self.start_time = time() yield send_catch_log_deferred(signal=signals.engine_started) self.running = True def stop(self): """Stop the execution engine gracefully""" assert self.running, "Engine not running" self.running = False dfd = self._close_all_spiders() return dfd.addBoth(lambda _: self._finish_stopping_engine()) def pause(self): """Pause the execution engine""" self.paused = True def unpause(self): """Resume the execution engine""" self.paused = False def _next_request(self, spider): try: slot = self.slots[spider] except KeyError: return if self.paused: slot.nextcall.schedule(5) return while not self._needs_backout(spider): if not self._next_request_from_scheduler(spider): break if slot.start_requests and not self._needs_backout(spider): try: request = slot.start_requests.next() except StopIteration: slot.start_requests = None except Exception, exc: log.err(None, 'Obtaining request from start requests', \ spider=spider) else: self.crawl(request, spider) if self.spider_is_idle(spider) and slot.close_if_idle: self._spider_idle(spider) def _needs_backout(self, spider): slot = self.slots[spider] return not self.running \ or slot.closing \ or self.downloader.needs_backout() \ or self.scraper.slots[spider].needs_backout() def _next_request_from_scheduler(self, spider): slot = self.slots[spider] request = slot.scheduler.next_request() if not request: return d = self._download(request, spider) d.addBoth(self._handle_downloader_output, request, spider) d.addErrback(log.msg, spider=spider) d.addBoth(lambda _: slot.remove_request(request)) d.addErrback(log.msg, spider=spider) d.addBoth(lambda _: slot.nextcall.schedule()) d.addErrback(log.msg, spider=spider) return d def _handle_downloader_output(self, response, request, spider): assert isinstance(response, (Request, Response, Failure)), response # downloader middleware can return requests (for example, redirects) if isinstance(response, Request): self.crawl(response, spider) return # response is a Response or Failure d = self.scraper.enqueue_scrape(response, request, spider) d.addErrback(log.err, spider=spider) return d def spider_is_idle(self, spider): scraper_idle = spider in self.scraper.slots \ and self.scraper.slots[spider].is_idle() pending = self.slots[spider].scheduler.has_pending_requests() downloading = bool(self.downloader.slots) idle = scraper_idle and not (pending or downloading) return idle @property def open_spiders(self): return self.slots.keys() def has_capacity(self): """Does the engine have capacity to handle more spiders""" return len(self.slots) < self._concurrent_spiders def crawl(self, request, spider): assert spider in self.open_spiders, \ "Spider %r not opened when crawling: %s" % (spider.name, request) self.schedule(request, spider) self.slots[spider].nextcall.schedule() def schedule(self, request, spider): return self.slots[spider].scheduler.enqueue_request(request) def download(self, request, spider): slot = self.slots[spider] slot.add_request(request) d = self._download(request, spider) d.addBoth(self._downloaded, slot, request, spider) return d def _downloaded(self, response, slot, request, spider): slot.remove_request(request) return self.download(response, spider) \ if isinstance(response, Request) else response def _download(self, request, spider): slot = self.slots[spider] slot.add_request(request) def _on_success(response): assert isinstance(response, (Response, Request)) if isinstance(response, Response): response.request = request # tie request to response received log.msg(log.formatter.crawled(request, response, spider), \ level=log.DEBUG, spider=spider) send_catch_log(signal=signals.response_received, \ response=response, request=request, spider=spider) return response def _on_error(failure): failure.request = request return failure def _on_complete(_): slot.nextcall.schedule() return _ dwld = self.downloader.fetch(request, spider) dwld.addCallbacks(_on_success, _on_error) dwld.addBoth(_on_complete) return dwld @defer.inlineCallbacks def open_spider(self, spider, start_requests=None, close_if_idle=True): assert self.has_capacity(), "No free spider slots when opening %r" % \ spider.name log.msg("Spider opened", spider=spider) nextcall = CallLaterOnce(self._next_request, spider) scheduler = self.scheduler_cls.from_settings(self.settings) slot = Slot(start_requests or (), close_if_idle, nextcall, scheduler) self.slots[spider] = slot yield scheduler.open(spider) yield self.scraper.open_spider(spider) stats.open_spider(spider) yield send_catch_log_deferred(signals.spider_opened, spider=spider) slot.nextcall.schedule() def _spider_idle(self, spider): """Called when a spider gets idle. This function is called when there are no remaining pages to download or schedule. It can be called multiple times. If some extension raises a DontCloseSpider exception (in the spider_idle signal handler) the spider is not closed until the next loop and this function is guaranteed to be called (at least) once again for this spider. """ res = send_catch_log(signal=signals.spider_idle, \ spider=spider, dont_log=DontCloseSpider) if any(isinstance(x, Failure) and isinstance(x.value, DontCloseSpider) \ for _, x in res): self.slots[spider].nextcall.schedule(5) return if self.spider_is_idle(spider): self.close_spider(spider, reason='finished') def close_spider(self, spider, reason='cancelled'): """Close (cancel) spider and clear all its outstanding requests""" slot = self.slots[spider] if slot.closing: return slot.closing log.msg("Closing spider (%s)" % reason, spider=spider) dfd = slot.close() dfd.addBoth(lambda _: self.scraper.close_spider(spider)) dfd.addErrback(log.err, spider=spider) dfd.addBoth(lambda _: slot.scheduler.close(reason)) dfd.addErrback(log.err, spider=spider) dfd.addBoth(lambda _: send_catch_log_deferred(signal=signals.spider_closed, \ spider=spider, reason=reason)) dfd.addErrback(log.err, spider=spider) dfd.addBoth(lambda _: stats.close_spider(spider, reason=reason)) dfd.addErrback(log.err, spider=spider) dfd.addBoth(lambda _: log.msg("Spider closed (%s)" % reason, spider=spider)) dfd.addBoth(lambda _: self.slots.pop(spider)) dfd.addErrback(log.err, spider=spider) dfd.addBoth(lambda _: self._spider_closed_callback(spider)) return dfd def _close_all_spiders(self): dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders] dlist = defer.DeferredList(dfds) return dlist @defer.inlineCallbacks def _finish_stopping_engine(self): yield send_catch_log_deferred(signal=signals.engine_stopped) yield stats.engine_stopped() Scrapy-0.14.4/scrapy/core/spidermw.py0000600000016101777760000000570711754531743017611 0ustar buildbotnogroup""" Spider Middleware manager See documentation in docs/topics/spider-middleware.rst """ from twisted.python.failure import Failure from scrapy.middleware import MiddlewareManager from scrapy.utils.defer import mustbe_deferred from scrapy.utils.conf import build_component_list def _isiterable(possible_iterator): return hasattr(possible_iterator, '__iter__') class SpiderMiddlewareManager(MiddlewareManager): component_name = 'spider middleware' @classmethod def _get_mwlist_from_settings(cls, settings): return build_component_list(settings['SPIDER_MIDDLEWARES_BASE'], \ settings['SPIDER_MIDDLEWARES']) def _add_middleware(self, mw): super(SpiderMiddlewareManager, self)._add_middleware(mw) if hasattr(mw, 'process_spider_input'): self.methods['process_spider_input'].append(mw.process_spider_input) if hasattr(mw, 'process_spider_output'): self.methods['process_spider_output'].insert(0, mw.process_spider_output) if hasattr(mw, 'process_spider_exception'): self.methods['process_spider_exception'].insert(0, mw.process_spider_exception) def scrape_response(self, scrape_func, response, request, spider): fname = lambda f:'%s.%s' % (f.im_self.__class__.__name__, f.im_func.__name__) def process_spider_input(response): for method in self.methods['process_spider_input']: try: result = method(response=response, spider=spider) assert result is None, \ 'Middleware %s must returns None or ' \ 'raise an exception, got %s ' \ % (fname(method), type(result)) except: return scrape_func(Failure(), request, spider) return scrape_func(response, request, spider) def process_spider_exception(_failure): exception = _failure.value for method in self.methods['process_spider_exception']: result = method(response=response, exception=exception, spider=spider) assert result is None or _isiterable(result), \ 'Middleware %s must returns None, or an iterable object, got %s ' % \ (fname(method), type(result)) if result is not None: return result return _failure def process_spider_output(result): for method in self.methods['process_spider_output']: result = method(response=response, result=result, spider=spider) assert _isiterable(result), \ 'Middleware %s must returns an iterable object, got %s ' % \ (fname(method), type(result)) return result dfd = mustbe_deferred(process_spider_input, response) dfd.addErrback(process_spider_exception) dfd.addCallback(process_spider_output) return dfd Scrapy-0.14.4/scrapy/core/scraper.py0000600000016101777760000002075611754531743017417 0ustar buildbotnogroup"""This module implements the Scraper component which parses responses and extracts information from them""" from collections import deque from twisted.python.failure import Failure from twisted.internet import defer from scrapy.utils.defer import defer_result, defer_succeed, parallel, iter_errback from scrapy.utils.spider import iterate_spider_output from scrapy.utils.misc import load_object from scrapy.utils.signal import send_catch_log, send_catch_log_deferred from scrapy.exceptions import CloseSpider, DropItem from scrapy import signals from scrapy.http import Request, Response from scrapy.item import BaseItem from scrapy.core.spidermw import SpiderMiddlewareManager from scrapy import log from scrapy.stats import stats class Slot(object): """Scraper slot (one per running spider)""" MIN_RESPONSE_SIZE = 1024 def __init__(self, max_active_size=5000000): self.max_active_size = max_active_size self.queue = deque() self.active = set() self.active_size = 0 self.itemproc_size = 0 self.closing = None def add_response_request(self, response, request): deferred = defer.Deferred() self.queue.append((response, request, deferred)) if isinstance(response, Response): self.active_size += max(len(response.body), self.MIN_RESPONSE_SIZE) else: self.active_size += self.MIN_RESPONSE_SIZE return deferred def next_response_request_deferred(self): response, request, deferred = self.queue.popleft() self.active.add(request) return response, request, deferred def finish_response(self, response, request): self.active.remove(request) if isinstance(response, Response): self.active_size -= max(len(response.body), self.MIN_RESPONSE_SIZE) else: self.active_size -= self.MIN_RESPONSE_SIZE def is_idle(self): return not (self.queue or self.active) def needs_backout(self): return self.active_size > self.max_active_size class Scraper(object): def __init__(self, crawler): self.slots = {} self.spidermw = SpiderMiddlewareManager.from_crawler(crawler) itemproc_cls = load_object(crawler.settings['ITEM_PROCESSOR']) self.itemproc = itemproc_cls.from_crawler(crawler) self.concurrent_items = crawler.settings.getint('CONCURRENT_ITEMS') self.crawler = crawler @defer.inlineCallbacks def open_spider(self, spider): """Open the given spider for scraping and allocate resources for it""" assert spider not in self.slots, "Spider already opened: %s" % spider self.slots[spider] = Slot() yield self.itemproc.open_spider(spider) def close_spider(self, spider): """Close a spider being scraped and release its resources""" assert spider in self.slots, "Spider not opened: %s" % spider slot = self.slots[spider] slot.closing = defer.Deferred() slot.closing.addCallback(self.itemproc.close_spider) self._check_if_closing(spider, slot) return slot.closing def is_idle(self): """Return True if there isn't any more spiders to process""" return not self.slots def _check_if_closing(self, spider, slot): if slot.closing and slot.is_idle(): del self.slots[spider] slot.closing.callback(spider) def enqueue_scrape(self, response, request, spider): slot = self.slots[spider] dfd = slot.add_response_request(response, request) def finish_scraping(_): slot.finish_response(response, request) self._check_if_closing(spider, slot) self._scrape_next(spider, slot) return _ dfd.addBoth(finish_scraping) dfd.addErrback(log.err, 'Scraper bug processing %s' % request, \ spider=spider) self._scrape_next(spider, slot) return dfd def _scrape_next(self, spider, slot): while slot.queue: response, request, deferred = slot.next_response_request_deferred() self._scrape(response, request, spider).chainDeferred(deferred) def _scrape(self, response, request, spider): """Handle the downloaded response or failure trough the spider callback/errback""" assert isinstance(response, (Response, Failure)) dfd = self._scrape2(response, request, spider) # returns spiders processed output dfd.addErrback(self.handle_spider_error, request, response, spider) dfd.addCallback(self.handle_spider_output, request, response, spider) return dfd def _scrape2(self, request_result, request, spider): """Handle the diferent cases of request's result been a Response or a Failure""" if not isinstance(request_result, Failure): return self.spidermw.scrape_response(self.call_spider, \ request_result, request, spider) else: # FIXME: don't ignore errors in spider middleware dfd = self.call_spider(request_result, request, spider) return dfd.addErrback(self._log_download_errors, \ request_result, request, spider) def call_spider(self, result, request, spider): dfd = defer_result(result) dfd.addCallbacks(request.callback or spider.parse, request.errback) return dfd.addCallback(iterate_spider_output) def handle_spider_error(self, _failure, request, response, spider): exc = _failure.value if isinstance(exc, CloseSpider): self.crawler.engine.close_spider(spider, exc.reason or 'cancelled') return log.err(_failure, "Spider error processing %s" % request, spider=spider) send_catch_log(signal=signals.spider_error, failure=_failure, response=response, \ spider=spider) stats.inc_value("spider_exceptions/%s" % _failure.value.__class__.__name__, \ spider=spider) def handle_spider_output(self, result, request, response, spider): if not result: return defer_succeed(None) it = iter_errback(result, self.handle_spider_error, request, response, spider) dfd = parallel(it, self.concurrent_items, self._process_spidermw_output, request, response, spider) return dfd def _process_spidermw_output(self, output, request, response, spider): """Process each Request/Item (given in the output parameter) returned from the given spider """ if isinstance(output, Request): send_catch_log(signal=signals.request_received, request=output, \ spider=spider) self.crawler.engine.crawl(request=output, spider=spider) elif isinstance(output, BaseItem): self.slots[spider].itemproc_size += 1 dfd = self.itemproc.process_item(output, spider) dfd.addBoth(self._itemproc_finished, output, response, spider) return dfd elif output is None: pass else: log.msg("Spider must return Request, BaseItem or None, got %r in %s" % \ (type(output).__name__, request), log.ERROR, spider=spider) def _log_download_errors(self, spider_failure, download_failure, request, spider): """Log and silence errors that come from the engine (typically download errors that got propagated thru here) """ if spider_failure is download_failure: log.msg("Error downloading %s: %s" % \ (request, spider_failure.getErrorMessage()), log.ERROR, spider=spider) return return spider_failure def _itemproc_finished(self, output, item, response, spider): """ItemProcessor finished for the given ``item`` and returned ``output`` """ self.slots[spider].itemproc_size -= 1 if isinstance(output, Failure): ex = output.value if isinstance(ex, DropItem): log.msg(log.formatter.dropped(item, ex, response, spider), \ level=log.WARNING, spider=spider) return send_catch_log_deferred(signal=signals.item_dropped, \ item=item, spider=spider, exception=output.value) else: log.err(output, 'Error processing %s' % item, spider=spider) else: log.msg(log.formatter.scraped(output, response, spider), \ log.DEBUG, spider=spider) return send_catch_log_deferred(signal=signals.item_scraped, \ item=output, response=response, spider=spider) Scrapy-0.14.4/scrapy/core/__init__.py0000600000016101777760000000006311754531743017504 0ustar buildbotnogroup""" Scrapy core library classes and functions. """ Scrapy-0.14.4/scrapy/core/downloader/0000700000016101777760000000000011754532077017531 5ustar buildbotnogroupScrapy-0.14.4/scrapy/core/downloader/middleware.py0000600000016101777760000000577411754531743022236 0ustar buildbotnogroup""" Downloader Middleware manager See documentation in docs/topics/downloader-middleware.rst """ from scrapy.http import Request, Response from scrapy.middleware import MiddlewareManager from scrapy.utils.defer import mustbe_deferred from scrapy.utils.conf import build_component_list class DownloaderMiddlewareManager(MiddlewareManager): component_name = 'downloader middleware' @classmethod def _get_mwlist_from_settings(cls, settings): return build_component_list(settings['DOWNLOADER_MIDDLEWARES_BASE'], \ settings['DOWNLOADER_MIDDLEWARES']) def _add_middleware(self, mw): if hasattr(mw, 'process_request'): self.methods['process_request'].append(mw.process_request) if hasattr(mw, 'process_response'): self.methods['process_response'].insert(0, mw.process_response) if hasattr(mw, 'process_exception'): self.methods['process_exception'].insert(0, mw.process_exception) def download(self, download_func, request, spider): def process_request(request): for method in self.methods['process_request']: response = method(request=request, spider=spider) assert response is None or isinstance(response, (Response, Request)), \ 'Middleware %s.process_request must return None, Response or Request, got %s' % \ (method.im_self.__class__.__name__, response.__class__.__name__) if response: return response return download_func(request=request, spider=spider) def process_response(response): assert response is not None, 'Received None in process_response' if isinstance(response, Request): return response for method in self.methods['process_response']: response = method(request=request, response=response, spider=spider) assert isinstance(response, (Response, Request)), \ 'Middleware %s.process_response must return Response or Request, got %s' % \ (method.im_self.__class__.__name__, type(response)) if isinstance(response, Request): return response return response def process_exception(_failure): exception = _failure.value for method in self.methods['process_exception']: response = method(request=request, exception=exception, spider=spider) assert response is None or isinstance(response, (Response, Request)), \ 'Middleware %s.process_exception must return None, Response or Request, got %s' % \ (method.im_self.__class__.__name__, type(response)) if response: return response return _failure deferred = mustbe_deferred(process_request, request) deferred.addErrback(process_exception) deferred.addCallback(process_response) return deferred Scrapy-0.14.4/scrapy/core/downloader/handlers/0000700000016101777760000000000011754532077021331 5ustar buildbotnogroupScrapy-0.14.4/scrapy/core/downloader/handlers/http.py0000600000016101777760000000231011754531743022657 0ustar buildbotnogroup"""Download handlers for http and https schemes""" from twisted.internet import reactor from scrapy.exceptions import NotSupported from scrapy.utils.misc import load_object from scrapy.conf import settings from scrapy import optional_features ssl_supported = 'ssl' in optional_features if ssl_supported: from twisted.internet.ssl import ClientContextFactory HTTPClientFactory = load_object(settings['DOWNLOADER_HTTPCLIENTFACTORY']) class HttpDownloadHandler(object): def __init__(self, httpclientfactory=HTTPClientFactory): self.httpclientfactory = httpclientfactory def download_request(self, request, spider): """Return a deferred for the HTTP download""" factory = self.httpclientfactory(request) self._connect(factory) return factory.deferred def _connect(self, factory): host, port = factory.host, factory.port if factory.scheme == 'https': if ssl_supported: return reactor.connectSSL(host, port, factory, \ ClientContextFactory()) raise NotSupported("HTTPS not supported: install pyopenssl library") else: return reactor.connectTCP(host, port, factory) Scrapy-0.14.4/scrapy/core/downloader/handlers/file.py0000600000016101777760000000067111754531743022627 0ustar buildbotnogroupfrom w3lib.url import file_uri_to_path from scrapy.responsetypes import responsetypes from scrapy.utils.decorator import defers class FileDownloadHandler(object): @defers def download_request(self, request, spider): filepath = file_uri_to_path(request.url) body = open(filepath, 'rb').read() respcls = responsetypes.from_args(filename=filepath, body=body) return respcls(url=request.url, body=body) Scrapy-0.14.4/scrapy/core/downloader/handlers/__init__.py0000600000016101777760000000225111754531743023443 0ustar buildbotnogroup"""Download handlers for different schemes""" from scrapy.exceptions import NotSupported, NotConfigured from scrapy.utils.httpobj import urlparse_cached from scrapy.conf import settings from scrapy.utils.misc import load_object class DownloadHandlers(object): def __init__(self): self._handlers = {} self._notconfigured = {} handlers = settings.get('DOWNLOAD_HANDLERS_BASE') handlers.update(settings.get('DOWNLOAD_HANDLERS', {})) for scheme, clspath in handlers.iteritems(): cls = load_object(clspath) try: dh = cls() except NotConfigured, ex: self._notconfigured[scheme] = str(ex) else: self._handlers[scheme] = dh.download_request def download_request(self, request, spider): scheme = urlparse_cached(request).scheme try: handler = self._handlers[scheme] except KeyError: msg = self._notconfigured.get(scheme, \ 'no handler available for that scheme') raise NotSupported("Unsupported URL scheme '%s': %s" % (scheme, msg)) return handler(request, spider) Scrapy-0.14.4/scrapy/core/downloader/handlers/s3.py0000600000016101777760000000435311754531743022236 0ustar buildbotnogroupfrom scrapy import optional_features from scrapy.exceptions import NotConfigured from scrapy.utils.httpobj import urlparse_cached from scrapy.conf import settings from .http import HttpDownloadHandler try: from boto.s3.connection import S3Connection except ImportError: S3Connection = object class _v19_S3Connection(S3Connection): """A dummy S3Connection wrapper that doesn't do any syncronous download""" def _mexe(self, method, bucket, key, headers, *args, **kwargs): return headers class _v20_S3Connection(S3Connection): """A dummy S3Connection wrapper that doesn't do any syncronous download""" def _mexe(self, http_request, *args): http_request.authorize(connection=self) return http_request.headers try: import boto.auth except ImportError: _S3Connection = _v19_S3Connection else: _S3Connection = _v20_S3Connection class S3DownloadHandler(object): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, \ httpdownloadhandler=HttpDownloadHandler): if 'boto' not in optional_features: raise NotConfigured("missing boto library") if not aws_access_key_id: aws_access_key_id = settings['AWS_ACCESS_KEY_ID'] if not aws_secret_access_key: aws_secret_access_key = settings['AWS_SECRET_ACCESS_KEY'] try: self.conn = _S3Connection(aws_access_key_id, aws_secret_access_key) except Exception, ex: raise NotConfigured(str(ex)) self._download_http = httpdownloadhandler().download_request def download_request(self, request, spider): p = urlparse_cached(request) scheme = 'https' if request.meta.get('is_secure') else 'http' bucket = p.hostname path = p.path + '?' + p.query if p.query else p.path url = '%s://%s.s3.amazonaws.com%s' % (scheme, bucket, path) signed_headers = self.conn.make_request( method=request.method, bucket=bucket, key=p.path, query_args=p.query, headers=request.headers, data=request.body) httpreq = request.replace(url=url, headers=signed_headers) return self._download_http(httpreq, spider) Scrapy-0.14.4/scrapy/core/downloader/webclient.py0000600000016101777760000001121011754531743022053 0ustar buildbotnogroupfrom time import time from urlparse import urlparse, urlunparse, urldefrag from twisted.python import failure from twisted.web.client import HTTPClientFactory from twisted.web.http import HTTPClient from twisted.internet import defer from scrapy.http import Headers from scrapy.utils.httpobj import urlparse_cached from scrapy.responsetypes import responsetypes def _parsed_url_args(parsed): path = urlunparse(('', '', parsed.path or '/', parsed.params, parsed.query, '')) host = parsed.hostname port = parsed.port scheme = parsed.scheme netloc = parsed.netloc if port is None: port = 443 if scheme == 'https' else 80 return scheme, netloc, host, port, path def _parse(url): url = url.strip() parsed = urlparse(url) return _parsed_url_args(parsed) class ScrapyHTTPPageGetter(HTTPClient): delimiter = '\n' def connectionMade(self): self.headers = Headers() # bucket for response headers # Method command self.sendCommand(self.factory.method, self.factory.path) # Headers for key, values in self.factory.headers.items(): for value in values: self.sendHeader(key, value) self.endHeaders() # Body if self.factory.body is not None: self.transport.write(self.factory.body) def lineReceived(self, line): return HTTPClient.lineReceived(self, line.rstrip()) def handleHeader(self, key, value): self.headers.appendlist(key, value) def handleStatus(self, version, status, message): self.factory.gotStatus(version, status, message) def handleEndHeaders(self): self.factory.gotHeaders(self.headers) def connectionLost(self, reason): HTTPClient.connectionLost(self, reason) self.factory.noPage(reason) def handleResponse(self, response): if self.factory.method.upper() == 'HEAD': self.factory.page('') else: self.factory.page(response) self.transport.loseConnection() def timeout(self): self.transport.loseConnection() self.factory.noPage(\ defer.TimeoutError("Getting %s took longer than %s seconds." % \ (self.factory.url, self.factory.timeout))) class ScrapyHTTPClientFactory(HTTPClientFactory): """Scrapy implementation of the HTTPClientFactory overwriting the serUrl method to make use of our Url object that cache the parse result. """ protocol = ScrapyHTTPPageGetter waiting = 1 noisy = False followRedirect = False afterFoundGet = False def __init__(self, request, timeout=180): self.url = urldefrag(request.url)[0] self.method = request.method self.body = request.body or None self.headers = Headers(request.headers) self.response_headers = None self.timeout = request.meta.get('download_timeout') or timeout self.start_time = time() self.deferred = defer.Deferred().addCallback(self._build_response, request) # Fixes Twisted 11.1.0+ support as HTTPClientFactory is expected # to have _disconnectedDeferred. See Twisted r32329. # As Scrapy implements it's own logic to handle redirects is not # needed to add the callback _waitForDisconnect. # Specifically this avoids the AttributeError exception when # clientConnectionFailed method is called. self._disconnectedDeferred = defer.Deferred() self._set_connection_attributes(request) # set Host header based on url self.headers.setdefault('Host', self.netloc) # set Content-Length based len of body if self.body is not None: self.headers['Content-Length'] = len(self.body) # just in case a broken http/1.1 decides to keep connection alive self.headers.setdefault("Connection", "close") def _build_response(self, body, request): request.meta['download_latency'] = self.headers_time-self.start_time status = int(self.status) headers = Headers(self.response_headers) respcls = responsetypes.from_args(headers=headers, url=self.url) return respcls(url=self.url, status=status, headers=headers, body=body) def _set_connection_attributes(self, request): parsed = urlparse_cached(request) self.scheme, self.netloc, self.host, self.port, self.path = _parsed_url_args(parsed) proxy = request.meta.get('proxy') if proxy: self.scheme, _, self.host, self.port, _ = _parse(proxy) self.path = self.url def gotHeaders(self, headers): self.headers_time = time() self.response_headers = headers Scrapy-0.14.4/scrapy/core/downloader/__init__.py0000600000016101777760000001376511754531743021657 0ustar buildbotnogroupimport random import warnings from time import time from collections import deque from functools import partial from twisted.internet import reactor, defer from twisted.python.failure import Failure from scrapy.utils.defer import mustbe_deferred from scrapy.utils.signal import send_catch_log from scrapy.utils.httpobj import urlparse_cached from scrapy.resolver import dnscache from scrapy.exceptions import ScrapyDeprecationWarning from scrapy import signals from scrapy import log from .middleware import DownloaderMiddlewareManager from .handlers import DownloadHandlers class Slot(object): """Downloader slot""" def __init__(self, concurrency, delay, settings): self.concurrency = concurrency self.delay = delay self.randomize_delay = settings.getbool('RANDOMIZE_DOWNLOAD_DELAY') self.active = set() self.queue = deque() self.transferring = set() self.lastseen = 0 def free_transfer_slots(self): return self.concurrency - len(self.transferring) def download_delay(self): if self.randomize_delay: return random.uniform(0.5*self.delay, 1.5*self.delay) return self.delay def _get_concurrency_delay(concurrency, spider, settings): delay = settings.getfloat('DOWNLOAD_DELAY') if hasattr(spider, 'DOWNLOAD_DELAY'): warnings.warn("%s.DOWNLOAD_DELAY attribute is deprecated, use %s.download_delay instead" % (type(spider).__name__, type(spider).__name__)) delay = spider.DOWNLOAD_DELAY if hasattr(spider, 'download_delay'): delay = spider.download_delay # TODO: remove for Scrapy 0.15 c = settings.getint('CONCURRENT_REQUESTS_PER_SPIDER') if c: warnings.warn("CONCURRENT_REQUESTS_PER_SPIDER setting is deprecated, " \ "use CONCURRENT_REQUESTS_PER_DOMAIN instead", ScrapyDeprecationWarning) concurrency = c # ---------------------------- if hasattr(spider, 'max_concurrent_requests'): concurrency = spider.max_concurrent_requests if delay > 0: concurrency = 1 # force concurrency=1 if download delay required return concurrency, delay class Downloader(object): def __init__(self, crawler): self.settings = crawler.settings self.slots = {} self.active = set() self.handlers = DownloadHandlers() self.total_concurrency = self.settings.getint('CONCURRENT_REQUESTS') self.domain_concurrency = self.settings.getint('CONCURRENT_REQUESTS_PER_DOMAIN') self.ip_concurrency = self.settings.getint('CONCURRENT_REQUESTS_PER_IP') self.middleware = DownloaderMiddlewareManager.from_crawler(crawler) self.inactive_slots = {} def fetch(self, request, spider): key, slot = self._get_slot(request, spider) self.active.add(request) slot.active.add(request) def _deactivate(response): self.active.remove(request) slot.active.remove(request) if not slot.active: # remove empty slots self.inactive_slots[key] = self.slots.pop(key) return response dlfunc = partial(self._enqueue_request, slot=slot) dfd = self.middleware.download(dlfunc, request, spider) return dfd.addBoth(_deactivate) def needs_backout(self): return len(self.active) >= self.total_concurrency def _get_slot(self, request, spider): key = urlparse_cached(request).hostname or '' if self.ip_concurrency: key = dnscache.get(key, key) if key not in self.slots: if key in self.inactive_slots: self.slots[key] = self.inactive_slots.pop(key) else: if self.ip_concurrency: concurrency = self.ip_concurrency else: concurrency = self.domain_concurrency concurrency, delay = _get_concurrency_delay(concurrency, spider, self.settings) self.slots[key] = Slot(concurrency, delay, self.settings) return key, self.slots[key] def _enqueue_request(self, request, spider, slot): def _downloaded(response): send_catch_log(signal=signals.response_downloaded, \ response=response, request=request, spider=spider) return response deferred = defer.Deferred().addCallback(_downloaded) slot.queue.append((request, deferred)) self._process_queue(spider, slot) return deferred def _process_queue(self, spider, slot): # Delay queue processing if a download_delay is configured now = time() delay = slot.download_delay() if delay: penalty = delay - now + slot.lastseen if penalty > 0 and slot.free_transfer_slots(): d = defer.Deferred() d.addCallback(self._process_queue, slot) reactor.callLater(penalty, d.callback, spider) return slot.lastseen = now # Process enqueued requests if there are free slots to transfer for this slot while slot.queue and slot.free_transfer_slots() > 0: request, deferred = slot.queue.popleft() dfd = self._download(slot, request, spider) dfd.chainDeferred(deferred) def _download(self, slot, request, spider): # The order is very important for the following deferreds. Do not change! # 1. Create the download deferred dfd = mustbe_deferred(self.handlers.download_request, request, spider) # 2. After response arrives, remove the request from transferring # state to free up the transferring slot so it can be used by the # following requests (perhaps those which came from the downloader # middleware itself) slot.transferring.add(request) def finish_transferring(_): slot.transferring.remove(request) self._process_queue(spider, slot) return _ return dfd.addBoth(finish_transferring) def is_idle(self): return not self.slots Scrapy-0.14.4/scrapy/settings/0000700000016101777760000000000011754532077016303 5ustar buildbotnogroupScrapy-0.14.4/scrapy/settings/default_settings.py0000600000016101777760000001746411754531743022236 0ustar buildbotnogroup""" This module contains the default values for all settings used by Scrapy. For more information about these settings you can read the settings documentation in docs/topics/settings.rst Scrapy developers, if you add a setting here remember to: * add it in alphabetical order * group similar settings without leaving blank lines * add its documentation to the available settings documentation (docs/topics/settings.rst) """ import sys, os from os.path import join, abspath, dirname BOT_NAME = 'scrapybot' BOT_VERSION = '1.0' CLOSESPIDER_TIMEOUT = 0 CLOSESPIDER_PAGECOUNT = 0 CLOSESPIDER_ITEMCOUNT = 0 COMMANDS_MODULE = '' CONCURRENT_ITEMS = 100 CONCURRENT_REQUESTS = 16 CONCURRENT_REQUESTS_PER_DOMAIN = 8 CONCURRENT_REQUESTS_PER_IP = 0 COOKIES_ENABLED = True COOKIES_DEBUG = False DEFAULT_ITEM_CLASS = 'scrapy.item.Item' DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en', } DEFAULT_RESPONSE_ENCODING = 'ascii' DEPTH_LIMIT = 0 DEPTH_STATS = True DEPTH_PRIORITY = 0 DNSCACHE_ENABLED = True DOWNLOAD_DELAY = 0 DOWNLOAD_HANDLERS = {} DOWNLOAD_HANDLERS_BASE = { 'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler', 'http': 'scrapy.core.downloader.handlers.http.HttpDownloadHandler', 'https': 'scrapy.core.downloader.handlers.http.HttpDownloadHandler', 's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler', } DOWNLOAD_TIMEOUT = 180 # 3mins DOWNLOADER_DEBUG = False DOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory' DOWNLOADER_MIDDLEWARES = {} DOWNLOADER_MIDDLEWARES_BASE = { # Engine side 'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100, 'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300, 'scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware': 350, 'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400, 'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500, 'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550, 'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600, 'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700, 'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750, 'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 800, 'scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware': 830, 'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850, 'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900, # Downloader side } DOWNLOADER_STATS = True DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter' try: EDITOR = os.environ['EDITOR'] except KeyError: if sys.platform == 'win32': EDITOR = '%s -m idlelib.idle' else: EDITOR = 'vi' ENCODING_ALIASES = {} ENCODING_ALIASES_BASE = { # gb2312 is superseded by gb18030 'gb2312': 'gb18030', 'chinese': 'gb18030', 'csiso58gb231280': 'gb18030', 'euc- cn': 'gb18030', 'euccn': 'gb18030', 'eucgb2312-cn': 'gb18030', 'gb2312-1980': 'gb18030', 'gb2312-80': 'gb18030', 'iso- ir-58': 'gb18030', # gbk is superseded by gb18030 'gbk': 'gb18030', '936': 'gb18030', 'cp936': 'gb18030', 'ms936': 'gb18030', # latin_1 is a subset of cp1252 'latin_1': 'cp1252', 'iso-8859-1': 'cp1252', 'iso8859-1': 'cp1252', '8859': 'cp1252', 'cp819': 'cp1252', 'latin': 'cp1252', 'latin1': 'cp1252', 'l1': 'cp1252', # others 'zh-cn': 'gb18030', 'win-1251': 'cp1251', 'macintosh' : 'mac_roman', 'x-sjis': 'shift_jis', } EXTENSIONS = {} EXTENSIONS_BASE = { 'scrapy.contrib.corestats.CoreStats': 0, 'scrapy.webservice.WebService': 0, 'scrapy.telnet.TelnetConsole': 0, 'scrapy.contrib.memusage.MemoryUsage': 0, 'scrapy.contrib.memdebug.MemoryDebugger': 0, 'scrapy.contrib.closespider.CloseSpider': 0, 'scrapy.contrib.feedexport.FeedExporter': 0, 'scrapy.contrib.logstats.LogStats': 0, 'scrapy.contrib.spiderstate.SpiderState': 0, } FEED_URI = None FEED_URI_PARAMS = None # a function to extend uri arguments FEED_FORMAT = 'jsonlines' FEED_STORE_EMPTY = False FEED_STORAGES = {} FEED_STORAGES_BASE = { '': 'scrapy.contrib.feedexport.FileFeedStorage', 'file': 'scrapy.contrib.feedexport.FileFeedStorage', 'stdout': 'scrapy.contrib.feedexport.StdoutFeedStorage', 's3': 'scrapy.contrib.feedexport.S3FeedStorage', 'ftp': 'scrapy.contrib.feedexport.FTPFeedStorage', } FEED_EXPORTERS = {} FEED_EXPORTERS_BASE = { 'json': 'scrapy.contrib.exporter.JsonItemExporter', 'jsonlines': 'scrapy.contrib.exporter.JsonLinesItemExporter', 'csv': 'scrapy.contrib.exporter.CsvItemExporter', 'xml': 'scrapy.contrib.exporter.XmlItemExporter', 'marshal': 'scrapy.contrib.exporter.MarshalItemExporter', 'pickle': 'scrapy.contrib.exporter.PickleItemExporter', } HTTPCACHE_ENABLED = False HTTPCACHE_DIR = 'httpcache' HTTPCACHE_IGNORE_MISSING = False HTTPCACHE_STORAGE = 'scrapy.contrib.downloadermiddleware.httpcache.FilesystemCacheStorage' HTTPCACHE_EXPIRATION_SECS = 0 HTTPCACHE_IGNORE_HTTP_CODES = [] HTTPCACHE_IGNORE_SCHEMES = ['file'] HTTPCACHE_DBM_MODULE = 'anydbm' ITEM_PROCESSOR = 'scrapy.contrib.pipeline.ItemPipelineManager' # Item pipelines are typically set in specific commands settings ITEM_PIPELINES = [] LOG_ENABLED = True LOG_ENCODING = 'utf-8' LOG_FORMATTER = 'scrapy.logformatter.LogFormatter' LOG_STDOUT = False LOG_LEVEL = 'DEBUG' LOG_FILE = None LOG_UNSERIALIZABLE_REQUESTS = False LOGSTATS_INTERVAL = 60.0 MAIL_DEBUG = False MAIL_HOST = 'localhost' MAIL_PORT = 25 MAIL_FROM = 'scrapy@localhost' MAIL_PASS = None MAIL_USER = None MEMDEBUG_ENABLED = False # enable memory debugging MEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown MEMUSAGE_ENABLED = 1 MEMUSAGE_LIMIT_MB = 0 MEMUSAGE_NOTIFY_MAIL = [] MEMUSAGE_REPORT = False MEMUSAGE_WARNING_MB = 0 NEWSPIDER_MODULE = '' RANDOMIZE_DOWNLOAD_DELAY = True REDIRECT_ENABLED = True REDIRECT_MAX_METAREFRESH_DELAY = 100 REDIRECT_MAX_TIMES = 20 # uses Firefox default setting REDIRECT_PRIORITY_ADJUST = +2 RETRY_ENABLED = True RETRY_TIMES = 2 # initial response + 2 retries = 3 requests RETRY_HTTP_CODES = [500, 503, 504, 400, 408] RETRY_PRIORITY_ADJUST = -1 ROBOTSTXT_OBEY = False SCHEDULER = 'scrapy.core.scheduler.Scheduler' SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleLifoDiskQueue' SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.LifoMemoryQueue' SELECTORS_BACKEND = None # possible values: libxml2, lxml SPIDER_MANAGER_CLASS = 'scrapy.spidermanager.SpiderManager' SPIDER_MIDDLEWARES = {} SPIDER_MIDDLEWARES_BASE = { # Engine side 'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': 50, 'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500, 'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': 700, 'scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware': 800, 'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': 900, # Spider side } SPIDER_MODULES = [] STATS_CLASS = 'scrapy.statscol.MemoryStatsCollector' STATS_ENABLED = True STATS_DUMP = True STATSMAILER_RCPTS = [] TEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates')) URLLENGTH_LIMIT = 2083 USER_AGENT = '%s/%s' % (BOT_NAME, BOT_VERSION) TELNETCONSOLE_ENABLED = 1 TELNETCONSOLE_PORT = [6023, 6073] TELNETCONSOLE_HOST = '0.0.0.0' WEBSERVICE_ENABLED = True WEBSERVICE_LOGFILE = None WEBSERVICE_PORT = [6080, 7030] WEBSERVICE_HOST = '0.0.0.0' WEBSERVICE_RESOURCES = {} WEBSERVICE_RESOURCES_BASE = { 'scrapy.contrib.webservice.crawler.CrawlerResource': 1, 'scrapy.contrib.webservice.enginestatus.EngineStatusResource': 1, 'scrapy.contrib.webservice.stats.StatsResource': 1, } Scrapy-0.14.4/scrapy/settings/__init__.py0000600000016101777760000000514211754531743020417 0ustar buildbotnogroupfrom . import default_settings class Settings(object): def __init__(self, values=None): self.values = values.copy() if values else {} self.global_defaults = default_settings def __getitem__(self, opt_name): if opt_name in self.values: return self.values[opt_name] return getattr(self.global_defaults, opt_name, None) def get(self, name, default=None): return self[name] if self[name] is not None else default def getbool(self, name, default=False): """ True is: 1, '1', True False is: 0, '0', False, None """ return bool(int(self.get(name, default))) def getint(self, name, default=0): return int(self.get(name, default)) def getfloat(self, name, default=0.0): return float(self.get(name, default)) def getlist(self, name, default=None): value = self.get(name) if value is None: return default or [] elif hasattr(value, '__iter__'): return value else: return str(value).split(',') class CrawlerSettings(Settings): def __init__(self, settings_module=None, **kw): super(CrawlerSettings, self).__init__(**kw) self.settings_module = settings_module self.overrides = {} self.defaults = {} def __getitem__(self, opt_name): if opt_name in self.overrides: return self.overrides[opt_name] if self.settings_module and hasattr(self.settings_module, opt_name): return getattr(self.settings_module, opt_name) if opt_name in self.defaults: return self.defaults[opt_name] return super(CrawlerSettings, self).__getitem__(opt_name) def __str__(self): return "" % self.settings_module class SpiderSettings(Settings): def __init__(self, spider, crawler_settings, **kw): super(SpiderSettings, self).__init__(**kw) self.spider = spider self.cset = crawler_settings def __getitem__(self, opt_name): if opt_name in self.cset.overrides: return self.cset.overrides[opt_name] if hasattr(self.spider, opt_name): return getattr(self.spider, opt_name) if self.cset.settings_module and hasattr(self.cset.settings_module, opt_name): return getattr(self.cset.settings_module, opt_name) if opt_name in self.cset.defaults: return self.cset.defaults[opt_name] return super(SpiderSettings, self).__getitem__(opt_name) def __str__(self): return "" % self.spider.name Scrapy-0.14.4/scrapy/xlib/0000700000016101777760000000000011754532100015364 5ustar buildbotnogroupScrapy-0.14.4/scrapy/xlib/pydispatch/0000700000016101777760000000000011754532100017534 5ustar buildbotnogroupScrapy-0.14.4/scrapy/xlib/pydispatch/robustapply.py0000600000016101777760000000327111754531743022513 0ustar buildbotnogroup"""Robust apply mechanism Provides a function "call", which can sort out what arguments a given callable object can take, and subset the given arguments to match only those which are acceptable. """ def function( receiver ): """Get function-like callable object for given receiver returns (function_or_method, codeObject, fromMethod) If fromMethod is true, then the callable already has its first argument bound """ if hasattr(receiver, '__call__'): # receiver is a class instance; assume it is callable. # Reassign receiver to the actual method that will be called. if hasattr( receiver.__call__, 'im_func') or hasattr( receiver.__call__, 'im_code'): receiver = receiver.__call__ if hasattr( receiver, 'im_func' ): # an instance-method... return receiver, receiver.im_func.func_code, 1 elif not hasattr( receiver, 'func_code'): raise ValueError('unknown reciever type %s %s'%(receiver, type(receiver))) return receiver, receiver.func_code, 0 def robustApply(receiver, *arguments, **named): """Call receiver with arguments and an appropriate subset of named """ receiver, codeObject, startIndex = function( receiver ) acceptable = codeObject.co_varnames[startIndex+len(arguments):codeObject.co_argcount] for name in codeObject.co_varnames[startIndex:startIndex+len(arguments)]: if named.has_key( name ): raise TypeError( """Argument %r specified both positionally and as a keyword for calling %r"""% ( name, receiver, ) ) if not (codeObject.co_flags & 8): # fc does not have a **kwds type parameter, therefore # remove unacceptable arguments. for arg in named.keys(): if arg not in acceptable: del named[arg] return receiver(*arguments, **named) Scrapy-0.14.4/scrapy/xlib/pydispatch/license.txt0000600000016101777760000000303411754531743021735 0ustar buildbotnogroupPyDispatcher License Copyright (c) 2001-2006, Patrick K. O'Brien and Contributors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. The name of Patrick K. O'Brien, or the name of any Contributor, may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Scrapy-0.14.4/scrapy/xlib/pydispatch/errors.py0000600000016101777760000000055311754531743021443 0ustar buildbotnogroup"""Error types for dispatcher mechanism """ class DispatcherError(Exception): """Base class for all Dispatcher errors""" class DispatcherKeyError(KeyError, DispatcherError): """Error raised when unknown (sender,signal) set specified""" class DispatcherTypeError(TypeError, DispatcherError): """Error raised when inappropriate signal-type specified (None)""" Scrapy-0.14.4/scrapy/xlib/pydispatch/__init__.py0000600000016101777760000000025311754531743021663 0ustar buildbotnogroup"""Multi-consumer multi-producer dispatching mechanism """ __version__ = "2.0.0" __author__ = "Patrick K. O'Brien" __license__ = "BSD-style, see license.txt for details" Scrapy-0.14.4/scrapy/xlib/pydispatch/robust.py0000600000016101777760000000350111754531743021441 0ustar buildbotnogroup"""Module implementing error-catching version of send (sendRobust)""" from scrapy.xlib.pydispatch.dispatcher import Any, Anonymous, liveReceivers, getAllReceivers from scrapy.xlib.pydispatch.robustapply import robustApply def sendRobust( signal=Any, sender=Anonymous, *arguments, **named ): """Send signal from sender to all connected receivers catching errors signal -- (hashable) signal value, see connect for details sender -- the sender of the signal if Any, only receivers registered for Any will receive the message. if Anonymous, only receivers registered to receive messages from Anonymous or Any will receive the message Otherwise can be any python object (normally one registered with a connect if you actually want something to occur). arguments -- positional arguments which will be passed to *all* receivers. Note that this may raise TypeErrors if the receivers do not allow the particular arguments. Note also that arguments are applied before named arguments, so they should be used with care. named -- named arguments which will be filtered according to the parameters of the receivers to only provide those acceptable to the receiver. Return a list of tuple pairs [(receiver, response), ... ] if any receiver raises an error (specifically any subclass of Exception), the error instance is returned as the result for that receiver. """ # Call each receiver with whatever arguments it can accept. # Return a list of tuple pairs [(receiver, response), ... ]. responses = [] for receiver in liveReceivers(getAllReceivers(sender, signal)): try: response = robustApply( receiver, signal=signal, sender=sender, *arguments, **named ) except Exception, err: responses.append((receiver, err)) else: responses.append((receiver, response)) return responses Scrapy-0.14.4/scrapy/xlib/pydispatch/dispatcher.py0000600000016101777760000003447411754531743022266 0ustar buildbotnogroup"""Multiple-producer-multiple-consumer signal-dispatching dispatcher is the core of the PyDispatcher system, providing the primary API and the core logic for the system. Module attributes of note: Any -- Singleton used to signal either "Any Sender" or "Any Signal". See documentation of the _Any class. Anonymous -- Singleton used to signal "Anonymous Sender" See documentation of the _Anonymous class. Internal attributes: WEAKREF_TYPES -- tuple of types/classes which represent weak references to receivers, and thus must be de- referenced on retrieval to retrieve the callable object connections -- { senderkey (id) : { signal : [receivers...]}} senders -- { senderkey (id) : weakref(sender) } used for cleaning up sender references on sender deletion sendersBack -- { receiverkey (id) : [senderkey (id)...] } used for cleaning up receiver references on receiver deletion, (considerably speeds up the cleanup process vs. the original code.) """ from __future__ import generators import types, weakref from scrapy.xlib.pydispatch import saferef, robustapply, errors __author__ = "Patrick K. O'Brien " __cvsid__ = "$Id: dispatcher.py,v 1.1.1.1 2006/07/07 15:59:38 mcfletch Exp $" __version__ = "$Revision: 1.1.1.1 $"[11:-2] try: True except NameError: True = 1==1 False = 1==0 class _Parameter: """Used to represent default parameter values.""" def __repr__(self): return self.__class__.__name__ class _Any(_Parameter): """Singleton used to signal either "Any Sender" or "Any Signal" The Any object can be used with connect, disconnect, send, or sendExact to signal that the parameter given Any should react to all senders/signals, not just a particular sender/signal. """ Any = _Any() class _Anonymous(_Parameter): """Singleton used to signal "Anonymous Sender" The Anonymous object is used to signal that the sender of a message is not specified (as distinct from being "any sender"). Registering callbacks for Anonymous will only receive messages sent without senders. Sending with anonymous will only send messages to those receivers registered for Any or Anonymous. Note: The default sender for connect is Any, while the default sender for send is Anonymous. This has the effect that if you do not specify any senders in either function then all messages are routed as though there was a single sender (Anonymous) being used everywhere. """ Anonymous = _Anonymous() WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref) connections = {} senders = {} sendersBack = {} def connect(receiver, signal=Any, sender=Any, weak=True): """Connect receiver to sender for signal receiver -- a callable Python object which is to receive messages/signals/events. Receivers must be hashable objects. if weak is True, then receiver must be weak-referencable (more precisely saferef.safeRef() must be able to create a reference to the receiver). Receivers are fairly flexible in their specification, as the machinery in the robustApply module takes care of most of the details regarding figuring out appropriate subsets of the sent arguments to apply to a given receiver. Note: if receiver is itself a weak reference (a callable), it will be de-referenced by the system's machinery, so *generally* weak references are not suitable as receivers, though some use might be found for the facility whereby a higher-level library passes in pre-weakrefed receiver references. signal -- the signal to which the receiver should respond if Any, receiver will receive any signal from the indicated sender (which might also be Any, but is not necessarily Any). Otherwise must be a hashable Python object other than None (DispatcherError raised on None). sender -- the sender to which the receiver should respond if Any, receiver will receive the indicated signals from any sender. if Anonymous, receiver will only receive indicated signals from send/sendExact which do not specify a sender, or specify Anonymous explicitly as the sender. Otherwise can be any python object. weak -- whether to use weak references to the receiver By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. returns None, may raise DispatcherTypeError """ if signal is None: raise errors.DispatcherTypeError( 'Signal cannot be None (receiver=%r sender=%r)'%( receiver,sender) ) if weak: receiver = saferef.safeRef(receiver, onDelete=_removeReceiver) senderkey = id(sender) if connections.has_key(senderkey): signals = connections[senderkey] else: connections[senderkey] = signals = {} # Keep track of senders for cleanup. # Is Anonymous something we want to clean up? if sender not in (None, Anonymous, Any): def remove(object, senderkey=senderkey): _removeSender(senderkey=senderkey) # Skip objects that can not be weakly referenced, which means # they won't be automatically cleaned up, but that's too bad. try: weakSender = weakref.ref(sender, remove) senders[senderkey] = weakSender except: pass receiverID = id(receiver) # get current set, remove any current references to # this receiver in the set, including back-references if signals.has_key(signal): receivers = signals[signal] _removeOldBackRefs(senderkey, signal, receiver, receivers) else: receivers = signals[signal] = [] try: current = sendersBack.get( receiverID ) if current is None: sendersBack[ receiverID ] = current = [] if senderkey not in current: current.append(senderkey) except: pass receivers.append(receiver) def disconnect(receiver, signal=Any, sender=Any, weak=True): """Disconnect receiver from sender for signal receiver -- the registered receiver to disconnect signal -- the registered signal to disconnect sender -- the registered sender to disconnect weak -- the weakref state to disconnect disconnect reverses the process of connect, the semantics for the individual elements are logically equivalent to a tuple of (receiver, signal, sender, weak) used as a key to be deleted from the internal routing tables. (The actual process is slightly more complex but the semantics are basically the same). Note: Using disconnect is not required to cleanup routing when an object is deleted, the framework will remove routes for deleted objects automatically. It's only necessary to disconnect if you want to stop routing to a live object. returns None, may raise DispatcherTypeError or DispatcherKeyError """ if signal is None: raise errors.DispatcherTypeError( 'Signal cannot be None (receiver=%r sender=%r)'%( receiver,sender) ) if weak: receiver = saferef.safeRef(receiver) senderkey = id(sender) try: signals = connections[senderkey] receivers = signals[signal] except KeyError: raise errors.DispatcherKeyError( """No receivers found for signal %r from sender %r""" %( signal, sender ) ) try: # also removes from receivers _removeOldBackRefs(senderkey, signal, receiver, receivers) except ValueError: raise errors.DispatcherKeyError( """No connection to receiver %s for signal %s from sender %s""" %( receiver, signal, sender ) ) _cleanupConnections(senderkey, signal) def getReceivers( sender = Any, signal = Any ): """Get list of receivers from global tables This utility function allows you to retrieve the raw list of receivers from the connections table for the given sender and signal pair. Note: there is no guarantee that this is the actual list stored in the connections table, so the value should be treated as a simple iterable/truth value rather than, for instance a list to which you might append new records. Normally you would use liveReceivers( getReceivers( ...)) to retrieve the actual receiver objects as an iterable object. """ try: return connections[id(sender)][signal] except KeyError: return [] def liveReceivers(receivers): """Filter sequence of receivers to get resolved, live receivers This is a generator which will iterate over the passed sequence, checking for weak references and resolving them, then returning all live receivers. """ for receiver in receivers: if isinstance( receiver, WEAKREF_TYPES): # Dereference the weak reference. receiver = receiver() if receiver is not None: yield receiver else: yield receiver def getAllReceivers( sender = Any, signal = Any ): """Get list of all receivers from global tables This gets all receivers which should receive the given signal from sender, each receiver should be produced only once by the resulting generator """ receivers = {} for set in ( # Get receivers that receive *this* signal from *this* sender. getReceivers( sender, signal ), # Add receivers that receive *any* signal from *this* sender. getReceivers( sender, Any ), # Add receivers that receive *this* signal from *any* sender. getReceivers( Any, signal ), # Add receivers that receive *any* signal from *any* sender. getReceivers( Any, Any ), ): for receiver in set: if receiver: # filter out dead instance-method weakrefs try: if not receivers.has_key( receiver ): receivers[receiver] = 1 yield receiver except TypeError: # dead weakrefs raise TypeError on hash... pass def send(signal=Any, sender=Anonymous, *arguments, **named): """Send signal from sender to all connected receivers. signal -- (hashable) signal value, see connect for details sender -- the sender of the signal if Any, only receivers registered for Any will receive the message. if Anonymous, only receivers registered to receive messages from Anonymous or Any will receive the message Otherwise can be any python object (normally one registered with a connect if you actually want something to occur). arguments -- positional arguments which will be passed to *all* receivers. Note that this may raise TypeErrors if the receivers do not allow the particular arguments. Note also that arguments are applied before named arguments, so they should be used with care. named -- named arguments which will be filtered according to the parameters of the receivers to only provide those acceptable to the receiver. Return a list of tuple pairs [(receiver, response), ... ] if any receiver raises an error, the error propagates back through send, terminating the dispatch loop, so it is quite possible to not have all receivers called if a raises an error. """ # Call each receiver with whatever arguments it can accept. # Return a list of tuple pairs [(receiver, response), ... ]. responses = [] for receiver in liveReceivers(getAllReceivers(sender, signal)): response = robustapply.robustApply( receiver, signal=signal, sender=sender, *arguments, **named ) responses.append((receiver, response)) return responses def sendExact( signal=Any, sender=Anonymous, *arguments, **named ): """Send signal only to those receivers registered for exact message sendExact allows for avoiding Any/Anonymous registered handlers, sending only to those receivers explicitly registered for a particular signal on a particular sender. """ responses = [] for receiver in liveReceivers(getReceivers(sender, signal)): response = robustapply.robustApply( receiver, signal=signal, sender=sender, *arguments, **named ) responses.append((receiver, response)) return responses def _removeReceiver(receiver): """Remove receiver from connections.""" if not sendersBack: # During module cleanup the mapping will be replaced with None return False backKey = id(receiver) try: backSet = sendersBack.pop(backKey) except KeyError, err: return False else: for senderkey in backSet: try: signals = connections[senderkey].keys() except KeyError,err: pass else: for signal in signals: try: receivers = connections[senderkey][signal] except KeyError: pass else: try: receivers.remove( receiver ) except Exception, err: pass _cleanupConnections(senderkey, signal) def _cleanupConnections(senderkey, signal): """Delete any empty signals for senderkey. Delete senderkey if empty.""" try: receivers = connections[senderkey][signal] except: pass else: if not receivers: # No more connected receivers. Therefore, remove the signal. try: signals = connections[senderkey] except KeyError: pass else: del signals[signal] if not signals: # No more signal connections. Therefore, remove the sender. _removeSender(senderkey) def _removeSender(senderkey): """Remove senderkey from connections.""" _removeBackrefs(senderkey) try: del connections[senderkey] except KeyError: pass # Senderkey will only be in senders dictionary if sender # could be weakly referenced. try: del senders[senderkey] except: pass def _removeBackrefs( senderkey): """Remove all back-references to this senderkey""" try: signals = connections[senderkey] except KeyError: signals = None else: items = signals.items() def allReceivers( ): for signal,set in items: for item in set: yield item for receiver in allReceivers(): _killBackref( receiver, senderkey ) def _removeOldBackRefs(senderkey, signal, receiver, receivers): """Kill old sendersBack references from receiver This guards against multiple registration of the same receiver for a given signal and sender leaking memory as old back reference records build up. Also removes old receiver instance from receivers """ try: index = receivers.index(receiver) # need to scan back references here and remove senderkey except ValueError: return False else: oldReceiver = receivers[index] del receivers[index] found = 0 signals = connections.get(signal) if signals is not None: for sig,recs in connections.get(signal,{}).iteritems(): if sig != signal: for rec in recs: if rec is oldReceiver: found = 1 break if not found: _killBackref( oldReceiver, senderkey ) return True return False def _killBackref( receiver, senderkey ): """Do the actual removal of back reference from receiver to senderkey""" receiverkey = id(receiver) set = sendersBack.get( receiverkey, () ) while senderkey in set: try: set.remove( senderkey ) except: break if not set: try: del sendersBack[ receiverkey ] except KeyError: pass return True Scrapy-0.14.4/scrapy/xlib/pydispatch/saferef.py0000600000016101777760000001362411754531743021545 0ustar buildbotnogroup"""Refactored "safe reference" from dispatcher.py""" import weakref, traceback def safeRef(target, onDelete = None): """Return a *safe* weak reference to a callable target target -- the object to be weakly referenced, if it's a bound method reference, will create a BoundMethodWeakref, otherwise creates a simple weakref. onDelete -- if provided, will have a hard reference stored to the callable to be called after the safe reference goes out of scope with the reference object, (either a weakref or a BoundMethodWeakref) as argument. """ if hasattr(target, 'im_self'): if target.im_self is not None: # Turn a bound method into a BoundMethodWeakref instance. # Keep track of these instances for lookup by disconnect(). assert hasattr(target, 'im_func'), """safeRef target %r has im_self, but no im_func, don't know how to create reference"""%( target,) reference = BoundMethodWeakref( target=target, onDelete=onDelete ) return reference if onDelete is not None: return weakref.ref(target, onDelete) else: return weakref.ref( target ) class BoundMethodWeakref(object): """'Safe' and reusable weak references to instance methods BoundMethodWeakref objects provide a mechanism for referencing a bound method without requiring that the method object itself (which is normally a transient object) is kept alive. Instead, the BoundMethodWeakref object keeps weak references to both the object and the function which together define the instance method. Attributes: key -- the identity key for the reference, calculated by the class's calculateKey method applied to the target instance method deletionMethods -- sequence of callable objects taking single argument, a reference to this object which will be called when *either* the target object or target function is garbage collected (i.e. when this object becomes invalid). These are specified as the onDelete parameters of safeRef calls. weakSelf -- weak reference to the target object weakFunc -- weak reference to the target function Class Attributes: _allInstances -- class attribute pointing to all live BoundMethodWeakref objects indexed by the class's calculateKey(target) method applied to the target objects. This weak value dictionary is used to short-circuit creation so that multiple references to the same (object, function) pair produce the same BoundMethodWeakref instance. """ _allInstances = weakref.WeakValueDictionary() def __new__( cls, target, onDelete=None, *arguments,**named ): """Create new instance or return current instance Basically this method of construction allows us to short-circuit creation of references to already- referenced instance methods. The key corresponding to the target is calculated, and if there is already an existing reference, that is returned, with its deletionMethods attribute updated. Otherwise the new instance is created and registered in the table of already-referenced methods. """ key = cls.calculateKey(target) current =cls._allInstances.get(key) if current is not None: current.deletionMethods.append( onDelete) return current else: base = super( BoundMethodWeakref, cls).__new__( cls ) cls._allInstances[key] = base base.__init__( target, onDelete, *arguments,**named) return base def __init__(self, target, onDelete=None): """Return a weak-reference-like instance for a bound method target -- the instance-method target for the weak reference, must have im_self and im_func attributes and be reconstructable via: target.im_func.__get__( target.im_self ) which is true of built-in instance methods. onDelete -- optional callback which will be called when this weak reference ceases to be valid (i.e. either the object or the function is garbage collected). Should take a single argument, which will be passed a pointer to this object. """ def remove(weak, self=self): """Set self.isDead to true when method or instance is destroyed""" methods = self.deletionMethods[:] del self.deletionMethods[:] try: del self.__class__._allInstances[ self.key ] except KeyError: pass for function in methods: try: if callable( function ): function( self ) except Exception, e: try: traceback.print_exc() except AttributeError, err: print '''Exception during saferef %s cleanup function %s: %s'''%( self, function, e ) self.deletionMethods = [onDelete] self.key = self.calculateKey( target ) self.weakSelf = weakref.ref(target.im_self, remove) self.weakFunc = weakref.ref(target.im_func, remove) self.selfName = target.im_self.__class__.__name__ self.funcName = str(target.im_func.__name__) def calculateKey( cls, target ): """Calculate the reference key for this reference Currently this is a two-tuple of the id()'s of the target object and the target function respectively. """ return (id(target.im_self),id(target.im_func)) calculateKey = classmethod( calculateKey ) def __str__(self): """Give a friendly representation of the object""" return """%s( %s.%s )"""%( self.__class__.__name__, self.selfName, self.funcName, ) __repr__ = __str__ def __nonzero__( self ): """Whether we are still a valid reference""" return self() is not None def __cmp__( self, other ): """Compare with another reference""" if not isinstance (other,self.__class__): return cmp( self.__class__, type(other) ) return cmp( self.key, other.key) def __call__(self): """Return a strong reference to the bound method If the target cannot be retrieved, then will return None, otherwise returns a bound instance method for our object and function. Note: You may call this method any number of times, as it does not invalidate the reference. """ target = self.weakSelf() if target is not None: function = self.weakFunc() if function is not None: return function.__get__(target) return None Scrapy-0.14.4/scrapy/xlib/BeautifulSoup.py0000600000016101777760000023324511754531743020554 0ustar buildbotnogroup""" FIXME: this module is only needed beacuse scrapy.xlib.ClientForm uses it. We should remove it after we remove the scrapy.xlib.ClientForm module. Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup parses a (possibly invalid) XML or HTML document into a tree representation. It provides methods and Pythonic idioms that make it easy to navigate, search, and modify the tree. A well-formed XML/HTML document yields a well-formed data structure. An ill-formed XML/HTML document yields a correspondingly ill-formed data structure. If your document is only locally well-formed, you can use this library to find and process the well-formed part of it. Beautiful Soup works with Python 2.2 and up. It has no external dependencies, but you'll have more success at converting data to UTF-8 if you also install these three packages: * chardet, for auto-detecting character encodings http://chardet.feedparser.org/ * cjkcodecs and iconv_codec, which add more encodings to the ones supported by stock Python. http://cjkpython.i18n.org/ Beautiful Soup defines classes for two main parsing strategies: * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific language that kind of looks like XML. * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid or invalid. This class has web browser-like heuristics for obtaining a sensible parse tree in the face of common HTML errors. Beautiful Soup also defines a class (UnicodeDammit) for autodetecting the encoding of an HTML or XML document, and converting it to Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/documentation.html Here, have some legalese: Copyright (c) 2004-2010, Leonard Richardson All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the the Beautiful Soup Consortium and All Night Kosher Bakery nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT. """ from __future__ import generators __author__ = "Leonard Richardson (leonardr@segfault.org)" __version__ = "3.0.8.1" __copyright__ = "Copyright (c) 2004-2010 Leonard Richardson" __license__ = "New-style BSD" from sgmllib import SGMLParser, SGMLParseError import codecs import markupbase import types import re import sgmllib try: from htmlentitydefs import name2codepoint except ImportError: name2codepoint = {} try: set except NameError: from sets import Set as set #These hacks make Beautiful Soup able to parse XML with namespaces sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match DEFAULT_OUTPUT_ENCODING = "utf-8" def _match_css_class(str): """Build a RE to match the given CSS class.""" return re.compile(r"(^|.*\s)%s($|\s)" % str) # First, the classes that represent markup elements. class PageElement(object): """Contains the navigational information for some part of the page (either a tag or a piece of text)""" def setup(self, parent=None, previous=None): """Sets up the initial relations between this element and other elements.""" self.parent = parent self.previous = previous self.next = None self.previousSibling = None self.nextSibling = None if self.parent and self.parent.contents: self.previousSibling = self.parent.contents[-1] self.previousSibling.nextSibling = self def replaceWith(self, replaceWith): oldParent = self.parent myIndex = self.parent.index(self) if hasattr(replaceWith, "parent")\ and replaceWith.parent is self.parent: # We're replacing this element with one of its siblings. index = replaceWith.parent.index(replaceWith) if index and index < myIndex: # Furthermore, it comes before this element. That # means that when we extract it, the index of this # element will change. myIndex = myIndex - 1 self.extract() oldParent.insert(myIndex, replaceWith) def replaceWithChildren(self): myParent = self.parent myIndex = self.parent.index(self) self.extract() reversedChildren = list(self.contents) reversedChildren.reverse() for child in reversedChildren: myParent.insert(myIndex, child) def extract(self): """Destructively rips this element out of the tree.""" if self.parent: try: del self.parent.contents[self.parent.index(self)] except ValueError: pass #Find the two elements that would be next to each other if #this element (and any children) hadn't been parsed. Connect #the two. lastChild = self._lastRecursiveChild() nextElement = lastChild.next if self.previous: self.previous.next = nextElement if nextElement: nextElement.previous = self.previous self.previous = None lastChild.next = None self.parent = None if self.previousSibling: self.previousSibling.nextSibling = self.nextSibling if self.nextSibling: self.nextSibling.previousSibling = self.previousSibling self.previousSibling = self.nextSibling = None return self def _lastRecursiveChild(self): "Finds the last element beneath this object to be parsed." lastChild = self while hasattr(lastChild, 'contents') and lastChild.contents: lastChild = lastChild.contents[-1] return lastChild def insert(self, position, newChild): if isinstance(newChild, basestring) \ and not isinstance(newChild, NavigableString): newChild = NavigableString(newChild) position = min(position, len(self.contents)) if hasattr(newChild, 'parent') and newChild.parent is not None: # We're 'inserting' an element that's already one # of this object's children. if newChild.parent is self: index = self.index(newChild) if index > position: # Furthermore we're moving it further down the # list of this object's children. That means that # when we extract this element, our target index # will jump down one. position = position - 1 newChild.extract() newChild.parent = self previousChild = None if position == 0: newChild.previousSibling = None newChild.previous = self else: previousChild = self.contents[position-1] newChild.previousSibling = previousChild newChild.previousSibling.nextSibling = newChild newChild.previous = previousChild._lastRecursiveChild() if newChild.previous: newChild.previous.next = newChild newChildsLastElement = newChild._lastRecursiveChild() if position >= len(self.contents): newChild.nextSibling = None parent = self parentsNextSibling = None while not parentsNextSibling: parentsNextSibling = parent.nextSibling parent = parent.parent if not parent: # This is the last element in the document. break if parentsNextSibling: newChildsLastElement.next = parentsNextSibling else: newChildsLastElement.next = None else: nextChild = self.contents[position] newChild.nextSibling = nextChild if newChild.nextSibling: newChild.nextSibling.previousSibling = newChild newChildsLastElement.next = nextChild if newChildsLastElement.next: newChildsLastElement.next.previous = newChildsLastElement self.contents.insert(position, newChild) def append(self, tag): """Appends the given tag to the contents of this tag.""" self.insert(len(self.contents), tag) def findNext(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findAllNext, name, attrs, text, **kwargs) def findAllNext(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextGenerator, **kwargs) def findNextSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findNextSiblings, name, attrs, text, **kwargs) def findNextSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextSiblingGenerator, **kwargs) fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x def findPrevious(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs) def findAllPrevious(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousGenerator, **kwargs) fetchPrevious = findAllPrevious # Compatibility with pre-3.x def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findPreviousSiblings, name, attrs, text, **kwargs) def findPreviousSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousSiblingGenerator, **kwargs) fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x def findParent(self, name=None, attrs={}, **kwargs): """Returns the closest parent of this Tag that matches the given criteria.""" # NOTE: We can't use _findOne because findParents takes a different # set of arguments. r = None l = self.findParents(name, attrs, 1) if l: r = l[0] return r def findParents(self, name=None, attrs={}, limit=None, **kwargs): """Returns the parents of this Tag that match the given criteria.""" return self._findAll(name, attrs, None, limit, self.parentGenerator, **kwargs) fetchParents = findParents # Compatibility with pre-3.x #These methods do the real heavy lifting. def _findOne(self, method, name, attrs, text, **kwargs): r = None l = method(name, attrs, text, 1, **kwargs) if l: r = l[0] return r def _findAll(self, name, attrs, text, limit, generator, **kwargs): "Iterates over a generator looking for things that match." if isinstance(name, SoupStrainer): strainer = name # (Possibly) special case some findAll*(...) searches elif text is None and not limit and not attrs and not kwargs: # findAll*(True) if name is True: return [element for element in generator() if isinstance(element, Tag)] # findAll*('tag-name') elif isinstance(name, basestring): return [element for element in generator() if isinstance(element, Tag) and element.name == name] else: strainer = SoupStrainer(name, attrs, text, **kwargs) # Build a SoupStrainer else: strainer = SoupStrainer(name, attrs, text, **kwargs) results = ResultSet(strainer) g = generator() while True: try: i = g.next() except StopIteration: break if i: found = strainer.search(i) if found: results.append(found) if limit and len(results) >= limit: break return results #These Generators can be used to navigate starting from both #NavigableStrings and Tags. def nextGenerator(self): i = self while i is not None: i = i.next yield i def nextSiblingGenerator(self): i = self while i is not None: i = i.nextSibling yield i def previousGenerator(self): i = self while i is not None: i = i.previous yield i def previousSiblingGenerator(self): i = self while i is not None: i = i.previousSibling yield i def parentGenerator(self): i = self while i is not None: i = i.parent yield i # Utility methods def substituteEncoding(self, str, encoding=None): encoding = encoding or "utf-8" return str.replace("%SOUP-ENCODING%", encoding) def toEncoding(self, s, encoding=None): """Encodes an object to a string in some encoding, or to Unicode. .""" if isinstance(s, unicode): if encoding: s = s.encode(encoding) elif isinstance(s, str): if encoding: s = s.encode(encoding) else: s = unicode(s) else: if encoding: s = self.toEncoding(str(s), encoding) else: s = unicode(s) return s class NavigableString(unicode, PageElement): def __new__(cls, value): """Create a new NavigableString. When unpickling a NavigableString, this method is called with the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be passed in to the superclass's __new__ or the superclass won't know how to handle non-ASCII characters. """ if isinstance(value, unicode): return unicode.__new__(cls, value) return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) def __getnewargs__(self): return (NavigableString.__str__(self),) def __getattr__(self, attr): """text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.""" if attr == 'string': return self else: raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) def __unicode__(self): return str(self).decode(DEFAULT_OUTPUT_ENCODING) def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): if encoding: return self.encode(encoding) else: return self class CData(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "" % NavigableString.__str__(self, encoding) class ProcessingInstruction(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): output = self if "%SOUP-ENCODING%" in output: output = self.substituteEncoding(output, encoding) return "" % self.toEncoding(output, encoding) class Comment(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "" % NavigableString.__str__(self, encoding) class Declaration(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "" % NavigableString.__str__(self, encoding) class Tag(PageElement): """Represents a found HTML tag with its attributes and contents.""" def _invert(h): "Cheap function to invert a hash." i = {} for k,v in h.items(): i[v] = k return i XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'", "quot" : '"', "amp" : "&", "lt" : "<", "gt" : ">" } XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS) def _convertEntities(self, match): """Used in a call to re.sub to replace HTML, XML, and numeric entities with the appropriate Unicode characters. If HTML entities are being converted, any unrecognized entities are escaped.""" x = match.group(1) if self.convertHTMLEntities and x in name2codepoint: return unichr(name2codepoint[x]) elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS: if self.convertXMLEntities: return self.XML_ENTITIES_TO_SPECIAL_CHARS[x] else: return u'&%s;' % x elif len(x) > 0 and x[0] == '#': # Handle numeric entities if len(x) > 1 and x[1] == 'x': return unichr(int(x[2:], 16)) else: return unichr(int(x[1:])) elif self.escapeUnrecognizedEntities: return u'&%s;' % x else: return u'&%s;' % x def __init__(self, parser, name, attrs=None, parent=None, previous=None): "Basic constructor." # We don't actually store the parser object: that lets extracted # chunks be garbage-collected self.parserClass = parser.__class__ self.isSelfClosing = parser.isSelfClosingTag(name) self.name = name if attrs is None: attrs = [] self.attrs = attrs self.contents = [] self.setup(parent, previous) self.hidden = False self.containsSubstitutions = False self.convertHTMLEntities = parser.convertHTMLEntities self.convertXMLEntities = parser.convertXMLEntities self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities # Convert any HTML, XML, or numeric entities in the attribute values. convert = lambda(k, val): (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);", self._convertEntities, val)) self.attrs = map(convert, self.attrs) def getString(self): if (len(self.contents) == 1 and isinstance(self.contents[0], NavigableString)): return self.contents[0] def setString(self, string): """Replace the contents of the tag with a string""" self.clear() self.append(string) string = property(getString, setString) def getText(self, separator=u""): if not len(self.contents): return u"" stopNode = self._lastRecursiveChild().next strings = [] current = self.contents[0] while current is not stopNode: if isinstance(current, NavigableString): strings.append(current.strip()) current = current.next return separator.join(strings) text = property(getText) def get(self, key, default=None): """Returns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.""" return self._getAttrMap().get(key, default) def clear(self): """Extract all children.""" for child in self.contents[:]: child.extract() def index(self, element): for i, child in enumerate(self.contents): if child is element: return i raise ValueError("Tag.index: element not in tag") def has_key(self, key): return self._getAttrMap().has_key(key) def __getitem__(self, key): """tag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.""" return self._getAttrMap()[key] def __iter__(self): "Iterating over a tag iterates over its contents." return iter(self.contents) def __len__(self): "The length of a tag is the length of its list of contents." return len(self.contents) def __contains__(self, x): return x in self.contents def __nonzero__(self): "A tag is non-None even if it has no contents." return True def __setitem__(self, key, value): """Setting tag[key] sets the value of the 'key' attribute for the tag.""" self._getAttrMap() self.attrMap[key] = value found = False for i in range(0, len(self.attrs)): if self.attrs[i][0] == key: self.attrs[i] = (key, value) found = True if not found: self.attrs.append((key, value)) self._getAttrMap()[key] = value def __delitem__(self, key): "Deleting tag[key] deletes all 'key' attributes for the tag." for item in self.attrs: if item[0] == key: self.attrs.remove(item) #We don't break because bad HTML can define the same #attribute multiple times. self._getAttrMap() if self.attrMap.has_key(key): del self.attrMap[key] def __call__(self, *args, **kwargs): """Calling a tag like a function is the same as calling its findAll() method. Eg. tag('a') returns a list of all the A tags found within this tag.""" return apply(self.findAll, args, kwargs) def __getattr__(self, tag): #print "Getattr %s.%s" % (self.__class__, tag) if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: return self.find(tag[:-3]) elif tag.find('__') != 0: return self.find(tag) raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag) def __eq__(self, other): """Returns true iff this tag has the same name, the same attributes, and the same contents (recursively) as the given tag. NOTE: right now this will return false if two tags have the same attributes in a different order. Should this be fixed?""" if other is self: return True if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): return False for i in range(0, len(self.contents)): if self.contents[i] != other.contents[i]: return False return True def __ne__(self, other): """Returns true iff this tag is not identical to the other tag, as defined in __eq__.""" return not self == other def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING): """Renders this tag as a string.""" return self.__str__(encoding) def __unicode__(self): return self.__str__(None) BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" + ")") def _sub_entity(self, x): """Used with a regular expression to substitute the appropriate XML entity for an XML special character.""" return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";" def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Returns a string or Unicode representation of this tag and its contents. To get Unicode, pass None for encoding. NOTE: since Python's HTML parser consumes whitespace, this method is not certain to reproduce the whitespace present in the original string.""" encodedName = self.toEncoding(self.name, encoding) attrs = [] if self.attrs: for key, val in self.attrs: fmt = '%s="%s"' if isinstance(val, basestring): if self.containsSubstitutions and '%SOUP-ENCODING%' in val: val = self.substituteEncoding(val, encoding) # The attribute value either: # # * Contains no embedded double quotes or single quotes. # No problem: we enclose it in double quotes. # * Contains embedded single quotes. No problem: # double quotes work here too. # * Contains embedded double quotes. No problem: # we enclose it in single quotes. # * Embeds both single _and_ double quotes. This # can't happen naturally, but it can happen if # you modify an attribute value after parsing # the document. Now we have a bit of a # problem. We solve it by enclosing the # attribute in single quotes, and escaping any # embedded single quotes to XML entities. if '"' in val: fmt = "%s='%s'" if "'" in val: # TODO: replace with apos when # appropriate. val = val.replace("'", "&squot;") # Now we're okay w/r/t quotes. But the attribute # value might also contain angle brackets, or # ampersands that aren't part of entities. We need # to escape those to XML entities too. val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val) attrs.append(fmt % (self.toEncoding(key, encoding), self.toEncoding(val, encoding))) close = '' closeTag = '' if self.isSelfClosing: close = ' /' else: closeTag = '' % encodedName indentTag, indentContents = 0, 0 if prettyPrint: indentTag = indentLevel space = (' ' * (indentTag-1)) indentContents = indentTag + 1 contents = self.renderContents(encoding, prettyPrint, indentContents) if self.hidden: s = contents else: s = [] attributeString = '' if attrs: attributeString = ' ' + ' '.join(attrs) if prettyPrint: s.append(space) s.append('<%s%s%s>' % (encodedName, attributeString, close)) if prettyPrint: s.append("\n") s.append(contents) if prettyPrint and contents and contents[-1] != "\n": s.append("\n") if prettyPrint and closeTag: s.append(space) s.append(closeTag) if prettyPrint and closeTag and self.nextSibling: s.append("\n") s = ''.join(s) return s def decompose(self): """Recursively destroys the contents of this tree.""" self.extract() if len(self.contents) == 0: return current = self.contents[0] while current is not None: next = current.next if isinstance(current, Tag): del current.contents[:] current.parent = None current.previous = None current.previousSibling = None current.next = None current.nextSibling = None current = next def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING): return self.__str__(encoding, True) def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Renders the contents of this tag as a string in the given encoding. If encoding is None, returns a Unicode string..""" s=[] for c in self: text = None if isinstance(c, NavigableString): text = c.__str__(encoding) elif isinstance(c, Tag): s.append(c.__str__(encoding, prettyPrint, indentLevel)) if text and prettyPrint: text = text.strip() if text: if prettyPrint: s.append(" " * (indentLevel-1)) s.append(text) if prettyPrint: s.append("\n") return ''.join(s) #Soup methods def find(self, name=None, attrs={}, recursive=True, text=None, **kwargs): """Return only the first child of this Tag matching the given criteria.""" r = None l = self.findAll(name, attrs, recursive, text, 1, **kwargs) if l: r = l[0] return r findChild = find def findAll(self, name=None, attrs={}, recursive=True, text=None, limit=None, **kwargs): """Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.""" generator = self.recursiveChildGenerator if not recursive: generator = self.childGenerator return self._findAll(name, attrs, text, limit, generator, **kwargs) findChildren = findAll # Pre-3.x compatibility methods first = find fetch = findAll def fetchText(self, text=None, recursive=True, limit=None): return self.findAll(text=text, recursive=recursive, limit=limit) def firstText(self, text=None, recursive=True): return self.find(text=text, recursive=recursive) #Private methods def _getAttrMap(self): """Initializes a map representation of this tag's attributes, if not already initialized.""" if not getattr(self, 'attrMap'): self.attrMap = {} for (key, value) in self.attrs: self.attrMap[key] = value return self.attrMap #Generator methods def childGenerator(self): # Just use the iterator from the contents return iter(self.contents) def recursiveChildGenerator(self): if not len(self.contents): raise StopIteration stopNode = self._lastRecursiveChild().next current = self.contents[0] while current is not stopNode: yield current current = current.next # Next, a couple classes to represent queries and their results. class SoupStrainer: """Encapsulates a number of ways of matching a markup element (tag or text).""" def __init__(self, name=None, attrs={}, text=None, **kwargs): self.name = name if isinstance(attrs, basestring): kwargs['class'] = _match_css_class(attrs) attrs = None if kwargs: if attrs: attrs = attrs.copy() attrs.update(kwargs) else: attrs = kwargs self.attrs = attrs self.text = text def __str__(self): if self.text: return self.text else: return "%s|%s" % (self.name, self.attrs) def searchTag(self, markupName=None, markupAttrs={}): found = None markup = None if isinstance(markupName, Tag): markup = markupName markupAttrs = markup callFunctionWithTagData = callable(self.name) \ and not isinstance(markupName, Tag) if (not self.name) \ or callFunctionWithTagData \ or (markup and self._matches(markup, self.name)) \ or (not markup and self._matches(markupName, self.name)): if callFunctionWithTagData: match = self.name(markupName, markupAttrs) else: match = True markupAttrMap = None for attr, matchAgainst in self.attrs.items(): if not markupAttrMap: if hasattr(markupAttrs, 'get'): markupAttrMap = markupAttrs else: markupAttrMap = {} for k,v in markupAttrs: markupAttrMap[k] = v attrValue = markupAttrMap.get(attr) if not self._matches(attrValue, matchAgainst): match = False break if match: if markup: found = markup else: found = markupName return found def search(self, markup): #print 'looking for %s in %s' % (self, markup) found = None # If given a list of items, scan it for a text element that # matches. if hasattr(markup, "__iter__") \ and not isinstance(markup, Tag): for element in markup: if isinstance(element, NavigableString) \ and self.search(element): found = element break # If it's a Tag, make sure its name or attributes match. # Don't bother with Tags if we're searching for text. elif isinstance(markup, Tag): if not self.text: found = self.searchTag(markup) # If it's text, make sure the text matches. elif isinstance(markup, NavigableString) or \ isinstance(markup, basestring): if self._matches(markup, self.text): found = markup else: raise Exception, "I don't know how to match against a %s" \ % markup.__class__ return found def _matches(self, markup, matchAgainst): #print "Matching %s against %s" % (markup, matchAgainst) result = False if matchAgainst is True: result = markup is not None elif callable(matchAgainst): result = matchAgainst(markup) else: #Custom match methods take the tag as an argument, but all #other ways of matching match the tag name as a string. if isinstance(markup, Tag): markup = markup.name if markup and not isinstance(markup, basestring): markup = unicode(markup) #Now we know that chunk is either a string, or None. if hasattr(matchAgainst, 'match'): # It's a regexp object. result = markup and matchAgainst.search(markup) elif hasattr(matchAgainst, '__iter__'): # list-like result = markup in matchAgainst elif hasattr(matchAgainst, 'items'): result = markup.has_key(matchAgainst) elif matchAgainst and isinstance(markup, basestring): if isinstance(markup, unicode): matchAgainst = unicode(matchAgainst) else: matchAgainst = str(matchAgainst) if not result: result = matchAgainst == markup return result class ResultSet(list): """A ResultSet is just a list that keeps track of the SoupStrainer that created it.""" def __init__(self, source): list.__init__([]) self.source = source # Now, some helper functions. def buildTagMap(default, *args): """Turns a list of maps, lists, or scalars into a single map. Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and NESTING_RESET_TAGS maps out of lists and partial maps.""" built = {} for portion in args: if hasattr(portion, 'items'): #It's a map. Merge it. for k,v in portion.items(): built[k] = v elif hasattr(portion, '__iter__'): # is a list #It's a list. Map each item to the default. for k in portion: built[k] = default else: #It's a scalar. Map it to the default. built[portion] = default return built # Now, the parser classes. class BeautifulStoneSoup(Tag, SGMLParser): """This class contains the basic parser and search code. It defines a parser that knows nothing about tag behavior except for the following: You can't close a tag without closing all the tags it encloses. That is, "" actually means "". [Another possible explanation is "", but since this class defines no SELF_CLOSING_TAGS, it will never use that explanation.] This class is useful for parsing XML or made-up markup languages, or when BeautifulSoup makes an assumption counter to what you were expecting.""" SELF_CLOSING_TAGS = {} NESTABLE_TAGS = {} RESET_NESTING_TAGS = {} QUOTE_TAGS = {} PRESERVE_WHITESPACE_TAGS = [] MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'), lambda x: x.group(1) + ' />'), (re.compile(']*)>'), lambda x: '') ] ROOT_TAG_NAME = u'[document]' HTML_ENTITIES = "html" XML_ENTITIES = "xml" XHTML_ENTITIES = "xhtml" # TODO: This only exists for backwards-compatibility ALL_ENTITIES = XHTML_ENTITIES # Used when determining whether a text node is all whitespace and # can be replaced with a single space. A text node that contains # fancy Unicode spaces (usually non-breaking) should be left # alone. STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, } def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None, markupMassage=True, smartQuotesTo=XML_ENTITIES, convertEntities=None, selfClosingTags=None, isHTML=False): """The Soup object is initialized as the 'root tag', and the provided markup (which can be a string or a file-like object) is fed into the underlying parser. sgmllib will process most bad HTML, and the BeautifulSoup class has some tricks for dealing with some HTML that kills sgmllib, but Beautiful Soup can nonetheless choke or lose data if your data uses self-closing tags or declarations incorrectly. By default, Beautiful Soup uses regexes to sanitize input, avoiding the vast majority of these problems. If the problems don't apply to you, pass in False for markupMassage, and you'll get better performance. The default parser massage techniques fix the two most common instances of invalid HTML that choke sgmllib:
(No space between name of closing tag and tag close) (Extraneous whitespace in declaration) You can pass in a custom list of (RE object, replace method) tuples to get Beautiful Soup to scrub your input the way you want.""" self.parseOnlyThese = parseOnlyThese self.fromEncoding = fromEncoding self.smartQuotesTo = smartQuotesTo self.convertEntities = convertEntities # Set the rules for how we'll deal with the entities we # encounter if self.convertEntities: # It doesn't make sense to convert encoded characters to # entities even while you're converting entities to Unicode. # Just convert it all to Unicode. self.smartQuotesTo = None if convertEntities == self.HTML_ENTITIES: self.convertXMLEntities = False self.convertHTMLEntities = True self.escapeUnrecognizedEntities = True elif convertEntities == self.XHTML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = True self.escapeUnrecognizedEntities = False elif convertEntities == self.XML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False else: self.convertXMLEntities = False self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags) SGMLParser.__init__(self) if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() self.markup = markup self.markupMassage = markupMassage try: self._feed(isHTML=isHTML) except StopParsing: pass self.markup = None # The markup can now be GCed def convert_charref(self, name): """This method fixes a bug in Python's SGMLParser.""" try: n = int(name) except ValueError: return if not 0 <= n <= 127 : # ASCII ends at 127, not 255 return return self.convert_codepoint(n) def _feed(self, inDocumentEncoding=None, isHTML=False): # Convert the document to Unicode. markup = self.markup if isinstance(markup, unicode): if not hasattr(self, 'originalEncoding'): self.originalEncoding = None else: dammit = UnicodeDammit\ (markup, [self.fromEncoding, inDocumentEncoding], smartQuotesTo=self.smartQuotesTo, isHTML=isHTML) markup = dammit.unicode self.originalEncoding = dammit.originalEncoding self.declaredHTMLEncoding = dammit.declaredHTMLEncoding if markup: if self.markupMassage: if not hasattr(self.markupMassage, "__iter__"): self.markupMassage = self.MARKUP_MASSAGE for fix, m in self.markupMassage: markup = fix.sub(m, markup) # TODO: We get rid of markupMassage so that the # soup object can be deepcopied later on. Some # Python installations can't copy regexes. If anyone # was relying on the existence of markupMassage, this # might cause problems. del(self.markupMassage) self.reset() SGMLParser.feed(self, markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def __getattr__(self, methodName): """This method routes method call requests to either the SGMLParser superclass or the Tag superclass, depending on the method name.""" #print "__getattr__ called on %s.%s" % (self.__class__, methodName) if methodName.startswith('start_') or methodName.startswith('end_') \ or methodName.startswith('do_'): return SGMLParser.__getattr__(self, methodName) elif not methodName.startswith('__'): return Tag.__getattr__(self, methodName) else: raise AttributeError def isSelfClosingTag(self, name): """Returns true iff the given string is the name of a self-closing tag according to this parser.""" return self.SELF_CLOSING_TAGS.has_key(name) \ or self.instanceSelfClosingTags.has_key(name) def reset(self): Tag.__init__(self, self, self.ROOT_TAG_NAME) self.hidden = 1 SGMLParser.reset(self) self.currentData = [] self.currentTag = None self.tagStack = [] self.quoteStack = [] self.pushTag(self) def popTag(self): tag = self.tagStack.pop() #print "Pop", tag.name if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): #print "Push", tag.name if self.currentTag: self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] def endData(self, containerClass=NavigableString): if self.currentData: currentData = u''.join(self.currentData) if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and not set([tag.name for tag in self.tagStack]).intersection( self.PRESERVE_WHITESPACE_TAGS)): if '\n' in currentData: currentData = '\n' else: currentData = ' ' self.currentData = [] if self.parseOnlyThese and len(self.tagStack) <= 1 and \ (not self.parseOnlyThese.text or \ not self.parseOnlyThese.search(currentData)): return o = containerClass(currentData) o.setup(self.currentTag, self.previous) if self.previous: self.previous.next = o self.previous = o self.currentTag.contents.append(o) def _popToTag(self, name, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.""" #print "Popping to %s" % name if name == self.ROOT_TAG_NAME: return numPops = 0 mostRecentTag = None for i in range(len(self.tagStack)-1, 0, -1): if name == self.tagStack[i].name: numPops = len(self.tagStack)-i break if not inclusivePop: numPops = numPops - 1 for i in range(0, numPops): mostRecentTag = self.popTag() return mostRecentTag def _smartPop(self, name): """We need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples:

FooBar *

* should pop to 'p', not 'b'.

FooBar *

* should pop to 'table', not 'p'.

Foo

Bar *

* should pop to 'tr', not 'p'.

    • *
    • * should pop to 'ul', not the first 'li'.
  • ** should pop to 'table', not the first 'tr' tag should implicitly close the previous tag within the same
    ** should pop to 'tr', not the first 'td' """ nestingResetTriggers = self.NESTABLE_TAGS.get(name) isNestable = nestingResetTriggers != None isResetNesting = self.RESET_NESTING_TAGS.has_key(name) popTo = None inclusive = True for i in range(len(self.tagStack)-1, 0, -1): p = self.tagStack[i] if (not p or p.name == name) and not isNestable: #Non-nestable tags get popped to the top or to their #last occurance. popTo = name break if (nestingResetTriggers is not None and p.name in nestingResetTriggers) \ or (nestingResetTriggers is None and isResetNesting and self.RESET_NESTING_TAGS.has_key(p.name)): #If we encounter one of the nesting reset triggers #peculiar to this tag, or we encounter another tag #that causes nesting to reset, pop up to but not #including that tag. popTo = p.name inclusive = False break p = p.parent if popTo: self._popToTag(popTo, inclusive) def unknown_starttag(self, name, attrs, selfClosing=0): #print "Start tag %s: %s" % (name, attrs) if self.quoteStack: #This is not a real tag. #print "<%s> is not real!" % name attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs]) self.handle_data('<%s%s>' % (name, attrs)) return self.endData() if not self.isSelfClosingTag(name) and not selfClosing: self._smartPop(name) if self.parseOnlyThese and len(self.tagStack) <= 1 \ and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)): return tag = Tag(self, name, attrs, self.currentTag, self.previous) if self.previous: self.previous.next = tag self.previous = tag self.pushTag(tag) if selfClosing or self.isSelfClosingTag(name): self.popTag() if name in self.QUOTE_TAGS: #print "Beginning quote (%s)" % name self.quoteStack.append(name) self.literal = 1 return tag def unknown_endtag(self, name): #print "End tag %s" % name if self.quoteStack and self.quoteStack[-1] != name: #This is not a real end tag. #print " is not real!" % name self.handle_data('' % name) return self.endData() self._popToTag(name) if self.quoteStack and self.quoteStack[-1] == name: self.quoteStack.pop() self.literal = (len(self.quoteStack) > 0) def handle_data(self, data): self.currentData.append(data) def _toStringSubclass(self, text, subclass): """Adds a certain piece of text to the tree as a NavigableString subclass.""" self.endData() self.handle_data(text) self.endData(subclass) def handle_pi(self, text): """Handle a processing instruction as a ProcessingInstruction object, possibly one with a %SOUP-ENCODING% slot into which an encoding will be plugged later.""" if text[:3] == "xml": text = u"xml version='1.0' encoding='%SOUP-ENCODING%'" self._toStringSubclass(text, ProcessingInstruction) def handle_comment(self, text): "Handle comments as Comment objects." self._toStringSubclass(text, Comment) def handle_charref(self, ref): "Handle character references as data." if self.convertEntities: data = unichr(int(ref)) else: data = '&#%s;' % ref self.handle_data(data) def handle_entityref(self, ref): """Handle entity references as data, possibly converting known HTML and/or XML entity references to the corresponding Unicode characters.""" data = None if self.convertHTMLEntities: try: data = unichr(name2codepoint[ref]) except KeyError: pass if not data and self.convertXMLEntities: data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref) if not data and self.convertHTMLEntities and \ not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref): # TODO: We've got a problem here. We're told this is # an entity reference, but it's not an XML entity # reference or an HTML entity reference. Nonetheless, # the logical thing to do is to pass it through as an # unrecognized entity reference. # # Except: when the input is "&carol;" this function # will be called with input "carol". When the input is # "AT&T", this function will be called with input # "T". We have no way of knowing whether a semicolon # was present originally, so we don't know whether # this is an unknown entity or just a misplaced # ampersand. # # The more common case is a misplaced ampersand, so I # escape the ampersand and omit the trailing semicolon. data = "&%s" % ref if not data: # This case is different from the one above, because we # haven't already gone through a supposedly comprehensive # mapping of entities to Unicode characters. We might not # have gone through any mapping at all. So the chances are # very high that this is a real entity, and not a # misplaced ampersand. data = "&%s;" % ref self.handle_data(data) def handle_decl(self, data): "Handle DOCTYPEs and the like as Declaration objects." self._toStringSubclass(data, Declaration) def parse_declaration(self, i): """Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as a CData object.""" j = None if self.rawdata[i:i+9] == '', i) if k == -1: k = len(self.rawdata) data = self.rawdata[i+9:k] j = k+3 self._toStringSubclass(data, CData) else: try: j = SGMLParser.parse_declaration(self, i) except SGMLParseError: toHandle = self.rawdata[i:] self.handle_data(toHandle) j = i + len(toHandle) return j class BeautifulSoup(BeautifulStoneSoup): """This parser knows the following facts about HTML: * Some tags have no closing tag and should be interpreted as being closed as soon as they are encountered. * The text inside some tags (ie. 'script') may contain tags which are not really part of the document and which should be parsed as text, not tags. If you want to parse the text as tags, you can always fetch it and parse it explicitly. * Tag nesting rules: Most tags can't be nested at all. For instance, the occurance of a

    tag should implicitly close the previous

    tag.

    Para1

    Para2 should be transformed into:

    Para1

    Para2 Some tags can be nested arbitrarily. For instance, the occurance of a

    tag should _not_ implicitly close the previous
    tag. Alice said:
    Bob said:
    Blah should NOT be transformed into: Alice said:
    Bob said:
    Blah Some tags can be nested, but the nesting is reset by the interposition of other tags. For instance, a
    , but not close a tag in another table.
    BlahBlah should be transformed into:
    BlahBlah but, Blah
    Blah should NOT be transformed into Blah
    Blah Differing assumptions about tag nesting rules are a major source of problems with the BeautifulSoup class. If BeautifulSoup is not treating as nestable a tag your page author treats as nestable, try ICantBelieveItsBeautifulSoup, MinimalSoup, or BeautifulStoneSoup before writing your own subclass.""" def __init__(self, *args, **kwargs): if not kwargs.has_key('smartQuotesTo'): kwargs['smartQuotesTo'] = self.HTML_ENTITIES kwargs['isHTML'] = True BeautifulStoneSoup.__init__(self, *args, **kwargs) SELF_CLOSING_TAGS = buildTagMap(None, ('br' , 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base', 'col')) PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea']) QUOTE_TAGS = {'script' : None, 'textarea' : None} #According to the HTML standard, each of these inline tags can #contain another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', 'center') #According to the HTML standard, these block tags can contain #another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del') #Lists can contain other lists, but there are restrictions. NESTABLE_LIST_TAGS = { 'ol' : [], 'ul' : [], 'li' : ['ul', 'ol'], 'dl' : [], 'dd' : ['dl'], 'dt' : ['dl'] } #Tables can contain other tables, but there are restrictions. NESTABLE_TABLE_TAGS = {'table' : [], 'tr' : ['table', 'tbody', 'tfoot', 'thead'], 'td' : ['tr'], 'th' : ['tr'], 'thead' : ['table'], 'tbody' : ['table'], 'tfoot' : ['table'], } NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre') #If one of these tags is encountered, all tags up to the next tag of #this type are popped. RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', NON_NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) # Used to detect the charset in a META tag; see start_meta CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M) def start_meta(self, attrs): """Beautiful Soup can detect a charset included in a META tag, try to convert the document to that charset, and re-parse the document from the beginning.""" httpEquiv = None contentType = None contentTypeIndex = None tagNeedsEncodingSubstitution = False for i in range(0, len(attrs)): key, value = attrs[i] key = key.lower() if key == 'http-equiv': httpEquiv = value elif key == 'content': contentType = value contentTypeIndex = i if httpEquiv and contentType: # It's an interesting meta tag. match = self.CHARSET_RE.search(contentType) if match: if (self.declaredHTMLEncoding is not None or self.originalEncoding == self.fromEncoding): # An HTML encoding was sniffed while converting # the document to Unicode, or an HTML encoding was # sniffed during a previous pass through the # document, or an encoding was specified # explicitly and it worked. Rewrite the meta tag. def rewrite(match): return match.group(1) + "%SOUP-ENCODING%" newAttr = self.CHARSET_RE.sub(rewrite, contentType) attrs[contentTypeIndex] = (attrs[contentTypeIndex][0], newAttr) tagNeedsEncodingSubstitution = True else: # This is our first pass through the document. # Go through it again with the encoding information. newCharset = match.group(3) if newCharset and newCharset != self.originalEncoding: self.declaredHTMLEncoding = newCharset self._feed(self.declaredHTMLEncoding) raise StopParsing pass tag = self.unknown_starttag("meta", attrs) if tag and tagNeedsEncodingSubstitution: tag.containsSubstitutions = True class StopParsing(Exception): pass class ICantBelieveItsBeautifulSoup(BeautifulSoup): """The BeautifulSoup class is oriented towards skipping over common HTML errors like unclosed tags. However, sometimes it makes errors of its own. For instance, consider this fragment: FooBar This is perfectly valid (if bizarre) HTML. However, the BeautifulSoup class will implicitly close the first b tag when it encounters the second 'b'. It will think the author wrote "FooBar", and didn't close the first 'b' tag, because there's no real-world reason to bold something that's already bold. When it encounters '' it will close two more 'b' tags, for a grand total of three tags closed instead of two. This can throw off the rest of your document structure. The same is true of a number of other tags, listed below. It's much more common for someone to forget to close a 'b' tag than to actually use nested 'b' tags, and the BeautifulSoup class handles the common case. This class handles the not-co-common case: where you can't believe someone wrote what they did, but it's valid HTML and BeautifulSoup screwed up by assuming it wouldn't be.""" I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ ('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', 'big') I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',) NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) class MinimalSoup(BeautifulSoup): """The MinimalSoup class is for parsing HTML that contains pathologically bad markup. It makes no assumptions about tag nesting, but it does know which tags are self-closing, that """) self.assertEqual(get_meta_refresh(r1), (5.0, 'http://example.org/newpage')) self.assertEqual(get_meta_refresh(r2), (None, None)) self.assertEqual(get_meta_refresh(r3), (None, None)) if __name__ == "__main__": unittest.main() Scrapy-0.14.4/scrapy/tests/test_downloader_handlers.py0000600000016101777760000002573111754531743023245 0ustar buildbotnogroupimport os from twisted.trial import unittest from twisted.protocols.policies import WrappingFactory from twisted.python.filepath import FilePath from twisted.internet import reactor, defer from twisted.web import server, static, util, resource from twisted.web.test.test_webclient import ForeverTakingResource, \ NoLengthResource, HostHeaderResource, \ PayloadResource, BrokenDownloadResource from w3lib.url import path_to_file_uri from scrapy.core.downloader.handlers.file import FileDownloadHandler from scrapy.core.downloader.handlers.http import HttpDownloadHandler from scrapy.core.downloader.handlers.s3 import S3DownloadHandler from scrapy.spider import BaseSpider from scrapy.http import Request from scrapy import optional_features class FileTestCase(unittest.TestCase): def setUp(self): self.tmpname = self.mktemp() fd = open(self.tmpname + '^', 'w') fd.write('0123456789') fd.close() self.download_request = FileDownloadHandler().download_request def test_download(self): def _test(response): self.assertEquals(response.url, request.url) self.assertEquals(response.status, 200) self.assertEquals(response.body, '0123456789') request = Request(path_to_file_uri(self.tmpname + '^')) assert request.url.upper().endswith('%5E') return self.download_request(request, BaseSpider('foo')).addCallback(_test) def test_non_existent(self): request = Request('file://%s' % self.mktemp()) d = self.download_request(request, BaseSpider('foo')) return self.assertFailure(d, IOError) class HttpTestCase(unittest.TestCase): def setUp(self): name = self.mktemp() os.mkdir(name) FilePath(name).child("file").setContent("0123456789") r = static.File(name) r.putChild("redirect", util.Redirect("/file")) r.putChild("wait", ForeverTakingResource()) r.putChild("nolength", NoLengthResource()) r.putChild("host", HostHeaderResource()) r.putChild("payload", PayloadResource()) r.putChild("broken", BrokenDownloadResource()) self.site = server.Site(r, timeout=None) self.wrapper = WrappingFactory(self.site) self.port = reactor.listenTCP(0, self.wrapper, interface='127.0.0.1') self.portno = self.port.getHost().port self.download_request = HttpDownloadHandler().download_request def tearDown(self): return self.port.stopListening() def getURL(self, path): return "http://127.0.0.1:%d/%s" % (self.portno, path) def test_download(self): request = Request(self.getURL('file')) d = self.download_request(request, BaseSpider('foo')) d.addCallback(lambda r: r.body) d.addCallback(self.assertEquals, "0123456789") return d def test_download_head(self): request = Request(self.getURL('file'), method='HEAD') d = self.download_request(request, BaseSpider('foo')) d.addCallback(lambda r: r.body) d.addCallback(self.assertEquals, '') return d def test_redirect_status(self): request = Request(self.getURL('redirect')) d = self.download_request(request, BaseSpider('foo')) d.addCallback(lambda r: r.status) d.addCallback(self.assertEquals, 302) return d def test_redirect_status_head(self): request = Request(self.getURL('redirect'), method='HEAD') d = self.download_request(request, BaseSpider('foo')) d.addCallback(lambda r: r.status) d.addCallback(self.assertEquals, 302) return d def test_timeout_download_from_spider(self): request = Request(self.getURL('wait'), meta=dict(download_timeout=0.000001)) d = self.download_request(request, BaseSpider('foo')) return self.assertFailure(d, defer.TimeoutError) def test_host_header_not_in_request_headers(self): def _test(response): self.assertEquals(response.body, '127.0.0.1:%d' % self.portno) self.assertEquals(request.headers, {}) request = Request(self.getURL('host')) return self.download_request(request, BaseSpider('foo')).addCallback(_test) def test_host_header_seted_in_request_headers(self): def _test(response): self.assertEquals(response.body, 'example.com') self.assertEquals(request.headers.get('Host'), 'example.com') request = Request(self.getURL('host'), headers={'Host': 'example.com'}) return self.download_request(request, BaseSpider('foo')).addCallback(_test) d = self.download_request(request, BaseSpider('foo')) d.addCallback(lambda r: r.body) d.addCallback(self.assertEquals, 'example.com') return d def test_payload(self): body = '1'*100 # PayloadResource requires body length to be 100 request = Request(self.getURL('payload'), method='POST', body=body) d = self.download_request(request, BaseSpider('foo')) d.addCallback(lambda r: r.body) d.addCallback(self.assertEquals, body) return d class UriResource(resource.Resource): """Return the full uri that was requested""" def getChild(self, path, request): return self def render(self, request): return request.uri class HttpProxyTestCase(unittest.TestCase): def setUp(self): site = server.Site(UriResource(), timeout=None) wrapper = WrappingFactory(site) self.port = reactor.listenTCP(0, wrapper, interface='127.0.0.1') self.portno = self.port.getHost().port self.download_request = HttpDownloadHandler().download_request def tearDown(self): return self.port.stopListening() def getURL(self, path): return "http://127.0.0.1:%d/%s" % (self.portno, path) def test_download_with_proxy(self): def _test(response): self.assertEquals(response.status, 200) self.assertEquals(response.url, request.url) self.assertEquals(response.body, 'https://example.com') http_proxy = self.getURL('') request = Request('https://example.com', meta={'proxy': http_proxy}) return self.download_request(request, BaseSpider('foo')).addCallback(_test) def test_download_without_proxy(self): def _test(response): self.assertEquals(response.status, 200) self.assertEquals(response.url, request.url) self.assertEquals(response.body, '/path/to/resource') request = Request(self.getURL('path/to/resource')) return self.download_request(request, BaseSpider('foo')).addCallback(_test) class HttpDownloadHandlerMock(object): def download_request(self, request, spider): return request class S3TestCase(unittest.TestCase): skip = 'boto' not in optional_features and 'missing boto library' # test use same example keys than amazon developer guide # http://s3.amazonaws.com/awsdocs/S3/20060301/s3-dg-20060301.pdf # and the tests described here are the examples from that manual AWS_ACCESS_KEY_ID = '0PN5J17HBGZHT7JJ3X82' AWS_SECRET_ACCESS_KEY = 'uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o' def setUp(self): s3reqh = S3DownloadHandler(self.AWS_ACCESS_KEY_ID, \ self.AWS_SECRET_ACCESS_KEY, \ httpdownloadhandler=HttpDownloadHandlerMock) self.download_request = s3reqh.download_request self.spider = BaseSpider('foo') def test_request_signing1(self): # gets an object from the johnsmith bucket. req = Request('s3://johnsmith/photos/puppy.jpg', headers={'Date': 'Tue, 27 Mar 2007 19:36:42 +0000'}) httpreq = self.download_request(req, self.spider) self.assertEqual(httpreq.headers['Authorization'], \ 'AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA=') def test_request_signing2(self): # puts an object into the johnsmith bucket. req = Request('s3://johnsmith/photos/puppy.jpg', method='PUT', headers={ 'Content-Type': 'image/jpeg', 'Date': 'Tue, 27 Mar 2007 21:15:45 +0000', 'Content-Length': '94328', }) httpreq = self.download_request(req, self.spider) self.assertEqual(httpreq.headers['Authorization'], \ 'AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ=') def test_request_signing3(self): # lists the content of the johnsmith bucket. req = Request('s3://johnsmith/?prefix=photos&max-keys=50&marker=puppy', \ method='GET', headers={ 'User-Agent': 'Mozilla/5.0', 'Date': 'Tue, 27 Mar 2007 19:42:41 +0000', }) httpreq = self.download_request(req, self.spider) self.assertEqual(httpreq.headers['Authorization'], \ 'AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4=') def test_request_signing4(self): # fetches the access control policy sub-resource for the 'johnsmith' bucket. req = Request('s3://johnsmith/?acl', \ method='GET', headers={'Date': 'Tue, 27 Mar 2007 19:44:46 +0000'}) httpreq = self.download_request(req, self.spider) self.assertEqual(httpreq.headers['Authorization'], \ 'AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g=') def test_request_signing5(self): # deletes an object from the 'johnsmith' bucket using the # path-style and Date alternative. req = Request('s3://johnsmith/photos/puppy.jpg', \ method='DELETE', headers={ 'Date': 'Tue, 27 Mar 2007 21:20:27 +0000', 'x-amz-date': 'Tue, 27 Mar 2007 21:20:26 +0000', }) httpreq = self.download_request(req, self.spider) self.assertEqual(httpreq.headers['Authorization'], \ 'AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk=') def test_request_signing6(self): # uploads an object to a CNAME style virtual hosted bucket with metadata. req = Request('s3://static.johnsmith.net:8080/db-backup.dat.gz', \ method='PUT', headers={ 'User-Agent': 'curl/7.15.5', 'Host': 'static.johnsmith.net:8080', 'Date': 'Tue, 27 Mar 2007 21:06:08 +0000', 'x-amz-acl': 'public-read', 'content-type': 'application/x-download', 'Content-MD5': '4gJE4saaMU4BqNR0kLY+lw==', 'X-Amz-Meta-ReviewedBy': 'joe@johnsmith.net,jane@johnsmith.net', 'X-Amz-Meta-FileChecksum': '0x02661779', 'X-Amz-Meta-ChecksumAlgorithm': 'crc32', 'Content-Disposition': 'attachment; filename=database.dat', 'Content-Encoding': 'gzip', 'Content-Length': '5913339', }) httpreq = self.download_request(req, self.spider) self.assertEqual(httpreq.headers['Authorization'], \ 'AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI=') Scrapy-0.14.4/scrapy/tests/test_contrib_feedexport.py0000600000016101777760000000675211754531743023116 0ustar buildbotnogroupimport os, urlparse from cStringIO import StringIO from zope.interface.verify import verifyObject from twisted.trial import unittest from twisted.internet import defer from w3lib.url import path_to_file_uri from scrapy.spider import BaseSpider from scrapy.contrib.feedexport import IFeedStorage, FileFeedStorage, FTPFeedStorage, S3FeedStorage, StdoutFeedStorage from scrapy.utils.test import assert_aws_environ class FileFeedStorageTest(unittest.TestCase): def test_store_file_uri(self): path = os.path.abspath(self.mktemp()) uri = path_to_file_uri(path) return self._assert_stores(FileFeedStorage(uri), path) def test_store_file_uri_makedirs(self): path = os.path.abspath(self.mktemp()) path = os.path.join(path, 'more', 'paths', 'file.txt') uri = path_to_file_uri(path) return self._assert_stores(FileFeedStorage(uri), path) def test_store_direct_path(self): path = os.path.abspath(self.mktemp()) return self._assert_stores(FileFeedStorage(path), path) def test_store_direct_path_relative(self): path = self.mktemp() return self._assert_stores(FileFeedStorage(path), path) def test_interface(self): path = self.mktemp() st = FileFeedStorage(path) verifyObject(IFeedStorage, st) @defer.inlineCallbacks def _assert_stores(self, storage, path): spider = BaseSpider("default") file = storage.open(spider) file.write("content") yield storage.store(file) self.failUnless(os.path.exists(path)) self.failUnlessEqual(open(path).read(), "content") class FTPFeedStorageTest(unittest.TestCase): def test_store(self): uri = os.environ.get('FEEDTEST_FTP_URI') path = os.environ.get('FEEDTEST_FTP_PATH') if not (uri and path): raise unittest.SkipTest("No FTP server available for testing") st = FTPFeedStorage(uri) verifyObject(IFeedStorage, st) return self._assert_stores(st, path) @defer.inlineCallbacks def _assert_stores(self, storage, path): spider = BaseSpider("default") file = storage.open(spider) file.write("content") yield storage.store(file) self.failUnless(os.path.exists(path)) self.failUnlessEqual(open(path).read(), "content") # again, to check s3 objects are overwritten yield storage.store(StringIO("new content")) self.failUnlessEqual(open(path).read(), "new content") class S3FeedStorageTest(unittest.TestCase): @defer.inlineCallbacks def test_store(self): assert_aws_environ() uri = os.environ.get('FEEDTEST_S3_URI') if not uri: raise unittest.SkipTest("No S3 URI available for testing") from boto import connect_s3 storage = S3FeedStorage(uri) verifyObject(IFeedStorage, storage) file = storage.open(BaseSpider("default")) file.write("content") yield storage.store(file) u = urlparse.urlparse(uri) key = connect_s3().get_bucket(u.hostname, validate=False).get_key(u.path) self.failUnlessEqual(key.get_contents_as_string(), "content") class StdoutFeedStorageTest(unittest.TestCase): @defer.inlineCallbacks def test_store(self): out = StringIO() storage = StdoutFeedStorage('stdout:', _stdout=out) file = storage.open(BaseSpider("default")) file.write("content") yield storage.store(file) self.assertEqual(out.getvalue(), "content") Scrapy-0.14.4/scrapy/tests/test_utils_conf.py0000600000016101777760000000132311754531743021363 0ustar buildbotnogroupimport unittest from scrapy.utils.conf import build_component_list, arglist_to_dict class UtilsConfTestCase(unittest.TestCase): def test_build_component_list(self): base = {'one': 1, 'two': 2, 'three': 3, 'five': 5, 'six': None} custom = {'two': None, 'three': 8, 'four': 4} self.assertEqual(build_component_list(base, custom), ['one', 'four', 'five', 'three']) custom = ['a', 'b', 'c'] self.assertEqual(build_component_list(base, custom), custom) def test_arglist_to_dict(self): self.assertEqual(arglist_to_dict(['arg1=val1', 'arg2=val2']), {'arg1': 'val1', 'arg2': 'val2'}) if __name__ == "__main__": unittest.main() Scrapy-0.14.4/scrapy/tests/__init__.py0000600000016101777760000000066411754531743017725 0ustar buildbotnogroup""" scrapy.tests: this package contains all Scrapy unittests To run all Scrapy unittests go to Scrapy main dir and type: bin/runtests.sh If you're in windows use runtests.bat instead. """ import os tests_datadir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sample_data') def get_testdata(*paths): """Return test data""" path = os.path.join(tests_datadir, *paths) return open(path, 'rb').read() Scrapy-0.14.4/scrapy/tests/test_http_response.py0000600000016101777760000003475511754531743022132 0ustar buildbotnogroupimport unittest from scrapy.http import Request, Response, TextResponse, HtmlResponse, XmlResponse, Headers from scrapy.utils.encoding import resolve_encoding class BaseResponseTest(unittest.TestCase): response_class = Response def test_init(self): # Response requires url in the consturctor self.assertRaises(Exception, self.response_class) self.assertTrue(isinstance(self.response_class('http://example.com/'), self.response_class)) # body can be str or None self.assertTrue(isinstance(self.response_class('http://example.com/', body=''), self.response_class)) self.assertTrue(isinstance(self.response_class('http://example.com/', body='body'), self.response_class)) # test presence of all optional parameters self.assertTrue(isinstance(self.response_class('http://example.com/', headers={}, status=200, body=''), self.response_class)) r = self.response_class("http://www.example.com") assert isinstance(r.url, str) self.assertEqual(r.url, "http://www.example.com") self.assertEqual(r.status, 200) assert isinstance(r.headers, Headers) self.assertEqual(r.headers, {}) headers = {"caca": "coco"} body = "a body" r = self.response_class("http://www.example.com", headers=headers, body=body) assert r.headers is not headers self.assertEqual(r.headers["caca"], "coco") r = self.response_class("http://www.example.com", status=301) self.assertEqual(r.status, 301) r = self.response_class("http://www.example.com", status='301') self.assertEqual(r.status, 301) self.assertRaises(ValueError, self.response_class, "http://example.com", status='lala200') def test_copy(self): """Test Response copy""" r1 = self.response_class("http://www.example.com", body="Some body") r1.flags.append('cached') r2 = r1.copy() self.assertEqual(r1.status, r2.status) self.assertEqual(r1.body, r2.body) # make sure flags list is shallow copied assert r1.flags is not r2.flags, "flags must be a shallow copy, not identical" self.assertEqual(r1.flags, r2.flags) # make sure headers attribute is shallow copied assert r1.headers is not r2.headers, "headers must be a shallow copy, not identical" self.assertEqual(r1.headers, r2.headers) def test_copy_meta(self): req = Request("http://www.example.com") req.meta['foo'] = 'bar' r1 = self.response_class("http://www.example.com", body="Some body", request=req) assert r1.meta is req.meta def test_copy_inherited_classes(self): """Test Response children copies preserve their class""" class CustomResponse(self.response_class): pass r1 = CustomResponse('http://www.example.com') r2 = r1.copy() assert type(r2) is CustomResponse def test_replace(self): """Test Response.replace() method""" hdrs = Headers({"key": "value"}) r1 = self.response_class("http://www.example.com") r2 = r1.replace(status=301, body="New body", headers=hdrs) assert r1.body == '' self.assertEqual(r1.url, r2.url) self.assertEqual((r1.status, r2.status), (200, 301)) self.assertEqual((r1.body, r2.body), ('', "New body")) self.assertEqual((r1.headers, r2.headers), ({}, hdrs)) # Empty attributes (which may fail if not compared properly) r3 = self.response_class("http://www.example.com", flags=['cached']) r4 = r3.replace(body='', flags=[]) self.assertEqual(r4.body, '') self.assertEqual(r4.flags, []) def _assert_response_values(self, response, encoding, body): if isinstance(body, unicode): body_unicode = body body_str = body.encode(encoding) else: body_unicode = body.decode(encoding) body_str = body assert isinstance(response.body, str) self._assert_response_encoding(response, encoding) self.assertEqual(response.body, body_str) self.assertEqual(response.body_as_unicode(), body_unicode) def _assert_response_encoding(self, response, encoding): self.assertEqual(response.encoding, resolve_encoding(encoding)) class ResponseText(BaseResponseTest): def test_no_unicode_url(self): self.assertRaises(TypeError, self.response_class, u'http://www.example.com') class TextResponseTest(BaseResponseTest): response_class = TextResponse def test_replace(self): super(TextResponseTest, self).test_replace() r1 = self.response_class("http://www.example.com", body="hello", encoding="cp852") r2 = r1.replace(url="http://www.example.com/other") r3 = r1.replace(url="http://www.example.com/other", encoding="latin1") assert isinstance(r2, self.response_class) self.assertEqual(r2.url, "http://www.example.com/other") self._assert_response_encoding(r2, "cp852") self.assertEqual(r3.url, "http://www.example.com/other") self.assertEqual(r3._declared_encoding(), "latin1") def test_unicode_url(self): # instantiate with unicode url without encoding (should set default encoding) resp = self.response_class(u"http://www.example.com/") self._assert_response_encoding(resp, self.response_class._DEFAULT_ENCODING) # make sure urls are converted to str resp = self.response_class(url=u"http://www.example.com/", encoding='utf-8') assert isinstance(resp.url, str) resp = self.response_class(url=u"http://www.example.com/price/\xa3", encoding='utf-8') self.assertEqual(resp.url, 'http://www.example.com/price/\xc2\xa3') resp = self.response_class(url=u"http://www.example.com/price/\xa3", encoding='latin-1') self.assertEqual(resp.url, 'http://www.example.com/price/\xa3') resp = self.response_class(u"http://www.example.com/price/\xa3", headers={"Content-type": ["text/html; charset=utf-8"]}) self.assertEqual(resp.url, 'http://www.example.com/price/\xc2\xa3') resp = self.response_class(u"http://www.example.com/price/\xa3", headers={"Content-type": ["text/html; charset=iso-8859-1"]}) self.assertEqual(resp.url, 'http://www.example.com/price/\xa3') def test_unicode_body(self): unicode_string = u'\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0447\u0435\u0441\u043a\u0438\u0439 \u0442\u0435\u043a\u0441\u0442' self.assertRaises(TypeError, self.response_class, 'http://www.example.com', body=u'unicode body') original_string = unicode_string.encode('cp1251') r1 = self.response_class('http://www.example.com', body=original_string, encoding='cp1251') # check body_as_unicode self.assertTrue(isinstance(r1.body_as_unicode(), unicode)) self.assertEqual(r1.body_as_unicode(), unicode_string) def test_encoding(self): r1 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=utf-8"]}, body="\xc2\xa3") r2 = self.response_class("http://www.example.com", encoding='utf-8', body=u"\xa3") r3 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=iso-8859-1"]}, body="\xa3") r4 = self.response_class("http://www.example.com", body="\xa2\xa3") r5 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=None"]}, body="\xc2\xa3") r6 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=gb2312"]}, body="\xa8D") r7 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=gbk"]}, body="\xa8D") self.assertEqual(r1._headers_encoding(), "utf-8") self.assertEqual(r2._headers_encoding(), None) self.assertEqual(r2._declared_encoding(), 'utf-8') self._assert_response_encoding(r2, 'utf-8') self.assertEqual(r3._headers_encoding(), "iso-8859-1") self.assertEqual(r3._declared_encoding(), "iso-8859-1") self.assertEqual(r4._headers_encoding(), None) self.assertEqual(r5._headers_encoding(), None) self._assert_response_encoding(r5, "utf-8") assert r4._body_inferred_encoding() is not None and r4._body_inferred_encoding() != 'ascii' self._assert_response_values(r1, 'utf-8', u"\xa3") self._assert_response_values(r2, 'utf-8', u"\xa3") self._assert_response_values(r3, 'iso-8859-1', u"\xa3") self._assert_response_values(r6, 'gb18030', u"\u2015") self._assert_response_values(r7, 'gb18030', u"\u2015") # TextResponse (and subclasses) must be passed a encoding when instantiating with unicode bodies self.assertRaises(TypeError, self.response_class, "http://www.example.com", body=u"\xa3") def test_declared_encoding_invalid(self): """Check that unknown declared encodings are ignored""" r = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=UKNOWN"]}, body="\xc2\xa3") self.assertEqual(r._declared_encoding(), None) self._assert_response_values(r, 'utf-8', u"\xa3") def test_utf16(self): """Test utf-16 because UnicodeDammit is known to have problems with""" r = self.response_class("http://www.example.com", body='\xff\xfeh\x00i\x00', encoding='utf-16') self._assert_response_values(r, 'utf-16', u"hi") def test_invalid_utf8_encoded_body_with_valid_utf8_BOM(self): r6 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=utf-8"]}, body="\xef\xbb\xbfWORD\xe3\xab") self.assertEqual(r6.encoding, 'utf-8') self.assertEqual(r6.body_as_unicode(), u'\ufeffWORD\ufffd\ufffd') def test_replace_wrong_encoding(self): """Test invalid chars are replaced properly""" r = self.response_class("http://www.example.com", encoding='utf-8', body='PREFIX\xe3\xabSUFFIX') # XXX: Policy for replacing invalid chars may suffer minor variations # but it should always contain the unicode replacement char (u'\ufffd') assert u'\ufffd' in r.body_as_unicode(), repr(r.body_as_unicode()) assert u'PREFIX' in r.body_as_unicode(), repr(r.body_as_unicode()) assert u'SUFFIX' in r.body_as_unicode(), repr(r.body_as_unicode()) # Do not destroy html tags due to encoding bugs r = self.response_class("http://example.com", encoding='utf-8', \ body='\xf0value') assert u'value' in r.body_as_unicode(), repr(r.body_as_unicode()) # FIXME: This test should pass once we stop using BeautifulSoup's UnicodeDammit in TextResponse #r = self.response_class("http://www.example.com", body='PREFIX\xe3\xabSUFFIX') #assert u'\ufffd' in r.body_as_unicode(), repr(r.body_as_unicode()) class HtmlResponseTest(TextResponseTest): response_class = HtmlResponse def test_html_encoding(self): body = """Some pagePrice: \xa3100' """ r1 = self.response_class("http://www.example.com", body=body) self._assert_response_values(r1, 'iso-8859-1', body) body = """ Price: \xa3100 """ r2 = self.response_class("http://www.example.com", body=body) self._assert_response_values(r2, 'iso-8859-1', body) # for conflicting declarations headers must take precedence body = """Some pagePrice: \xa3100' """ r3 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=iso-8859-1"]}, body=body) self._assert_response_values(r3, 'iso-8859-1', body) # make sure replace() preserves the encoding of the original response body = "New body \xa3" r4 = r3.replace(body=body) self._assert_response_values(r4, 'iso-8859-1', body) def test_html5_meta_charset(self): body = """Some pagebla bla""" r1 = self.response_class("http://www.example.com", body=body) self._assert_response_values(r1, 'gb2312', body) def test_httpequiv_meta(self): body = '''''' response = self.response_class('http://example.com', body=body) self._assert_response_values(response, 'gb18030', body) body = '''''' response = self.response_class('http://example.com', body=body) self._assert_response_values(response, 'gb18030', body) class XmlResponseTest(TextResponseTest): response_class = XmlResponse def test_xml_encoding(self): body = "" r1 = self.response_class("http://www.example.com", body=body) self._assert_response_values(r1, self.response_class._DEFAULT_ENCODING, body) body = """""" r2 = self.response_class("http://www.example.com", body=body) self._assert_response_values(r2, 'iso-8859-1', body) # make sure replace() preserves the explicit encoding passed in the constructor body = """""" r3 = self.response_class("http://www.example.com", body=body, encoding='utf-8') body2 = "New body" r4 = r3.replace(body=body2) self._assert_response_values(r4, 'utf-8', body2) def test_replace_encoding(self): # make sure replace() keeps the previous encoding unless overridden explicitly body = """""" body2 = """""" r5 = self.response_class("http://www.example.com", body=body) r6 = r5.replace(body=body2) r7 = r5.replace(body=body2, encoding='utf-8') self._assert_response_values(r5, 'iso-8859-1', body) self._assert_response_values(r6, 'iso-8859-1', body2) self._assert_response_values(r7, 'utf-8', body2) if __name__ == "__main__": unittest.main() Scrapy-0.14.4/scrapy/tests/test_spider.py0000600000016101777760000000275611754531743020517 0ustar buildbotnogroupfrom __future__ import with_statement import warnings from twisted.trial import unittest from scrapy.spider import BaseSpider from scrapy.contrib.spiders.init import InitSpider from scrapy.contrib.spiders import CrawlSpider, XMLFeedSpider, CSVFeedSpider, SitemapSpider class BaseSpiderTest(unittest.TestCase): spider_class = BaseSpider def setUp(self): warnings.simplefilter("always") def tearDown(self): warnings.resetwarnings() def test_base_spider(self): spider = self.spider_class("example.com") self.assertEqual(spider.name, 'example.com') self.assertEqual(spider.start_urls, []) def test_spider_args(self): """Constructor arguments are assigned to spider attributes""" spider = self.spider_class('example.com', foo='bar') self.assertEqual(spider.foo, 'bar') def test_spider_without_name(self): """Constructor arguments are assigned to spider attributes""" self.assertRaises(ValueError, self.spider_class) self.assertRaises(ValueError, self.spider_class, somearg='foo') class InitSpiderTest(BaseSpiderTest): spider_class = InitSpider class XMLFeedSpiderTest(BaseSpiderTest): spider_class = XMLFeedSpider class CSVFeedSpiderTest(BaseSpiderTest): spider_class = CSVFeedSpider class CrawlSpiderTest(BaseSpiderTest): spider_class = CrawlSpider class SitemapSpiderTest(BaseSpiderTest): spider_class = SitemapSpider if __name__ == '__main__': unittest.main() Scrapy-0.14.4/scrapy/tests/test_utils_encoding.py0000600000016101777760000000146311754531743022231 0ustar buildbotnogroupimport unittest from scrapy.utils.encoding import encoding_exists, resolve_encoding class UtilsEncodingTestCase(unittest.TestCase): _ENCODING_ALIASES = { 'foo': 'cp1252', 'bar': 'none', } def test_resolve_encoding(self): self.assertEqual(resolve_encoding('latin1', self._ENCODING_ALIASES), 'latin1') self.assertEqual(resolve_encoding('foo', self._ENCODING_ALIASES), 'cp1252') def test_encoding_exists(self): assert encoding_exists('latin1', self._ENCODING_ALIASES) assert encoding_exists('foo', self._ENCODING_ALIASES) assert not encoding_exists('bar', self._ENCODING_ALIASES) assert not encoding_exists('none', self._ENCODING_ALIASES) if __name__ == "__main__": unittest.main() Scrapy-0.14.4/scrapy/tests/test_http_cookies.py0000600000016101777760000000416411754531743021717 0ustar buildbotnogroupfrom urlparse import urlparse from unittest import TestCase from scrapy.http import Request, Response from scrapy.http.cookies import WrappedRequest, WrappedResponse class WrappedRequestTest(TestCase): def setUp(self): self.request = Request("http://www.example.com/page.html", \ headers={"Content-Type": "text/html"}) self.wrapped = WrappedRequest(self.request) def test_get_full_url(self): self.assertEqual(self.wrapped.get_full_url(), self.request.url) def test_get_host(self): self.assertEqual(self.wrapped.get_host(), urlparse(self.request.url).netloc) def test_get_type(self): self.assertEqual(self.wrapped.get_type(), urlparse(self.request.url).scheme) def test_is_unverifiable(self): self.assertFalse(self.wrapped.is_unverifiable()) def test_is_unverifiable2(self): self.request.meta['is_unverifiable'] = True self.assertTrue(self.wrapped.is_unverifiable()) def test_get_origin_req_host(self): self.assertEqual(self.wrapped.get_origin_req_host(), 'www.example.com') def test_has_header(self): self.assertTrue(self.wrapped.has_header('content-type')) self.assertFalse(self.wrapped.has_header('xxxxx')) def test_get_header(self): self.assertEqual(self.wrapped.get_header('content-type'), 'text/html') self.assertEqual(self.wrapped.get_header('xxxxx', 'def'), 'def') def test_header_items(self): self.assertEqual(self.wrapped.header_items(), [('Content-Type', ['text/html'])]) def test_add_unredirected_header(self): self.wrapped.add_unredirected_header('hello', 'world') self.assertEqual(self.request.headers['hello'], 'world') class WrappedResponseTest(TestCase): def setUp(self): self.response = Response("http://www.example.com/page.html", headers={"Content-TYpe": "text/html"}) self.wrapped = WrappedResponse(self.response) def test_info(self): self.assert_(self.wrapped.info() is self.wrapped) def test_getheaders(self): self.assertEqual(self.wrapped.getheaders('content-type'), ['text/html']) Scrapy-0.14.4/scrapy/tests/test_http_headers.py0000600000016101777760000001113211754531743021667 0ustar buildbotnogroupimport unittest import copy from scrapy.http import Headers class HeadersTest(unittest.TestCase): def test_basics(self): h = Headers({'Content-Type': 'text/html', 'Content-Length': 1234}) assert h['Content-Type'] assert h['Content-Length'] self.assertRaises(KeyError, h.__getitem__, 'Accept') self.assertEqual(h.get('Accept'), None) self.assertEqual(h.getlist('Accept'), []) self.assertEqual(h.get('Accept', '*/*'), '*/*') self.assertEqual(h.getlist('Accept', '*/*'), ['*/*']) self.assertEqual(h.getlist('Accept', ['text/html', 'images/jpeg']), ['text/html','images/jpeg']) def test_single_value(self): h = Headers() h['Content-Type'] = 'text/html' self.assertEqual(h['Content-Type'], 'text/html') self.assertEqual(h.get('Content-Type'), 'text/html') self.assertEqual(h.getlist('Content-Type'), ['text/html']) def test_multivalue(self): h = Headers() h['X-Forwarded-For'] = hlist = ['ip1', 'ip2'] self.assertEqual(h['X-Forwarded-For'], 'ip2') self.assertEqual(h.get('X-Forwarded-For'), 'ip2') self.assertEqual(h.getlist('X-Forwarded-For'), hlist) assert h.getlist('X-Forwarded-For') is not hlist def test_encode_utf8(self): h = Headers({u'key': u'\xa3'}, encoding='utf-8') key, val = dict(h).items()[0] assert isinstance(key, str), key assert isinstance(val[0], str), val[0] self.assertEqual(val[0], '\xc2\xa3') def test_encode_latin1(self): h = Headers({u'key': u'\xa3'}, encoding='latin1') key, val = dict(h).items()[0] self.assertEqual(val[0], '\xa3') def test_encode_multiple(self): h = Headers({u'key': [u'\xa3']}, encoding='utf-8') key, val = dict(h).items()[0] self.assertEqual(val[0], '\xc2\xa3') def test_delete_and_contains(self): h = Headers() h['Content-Type'] = 'text/html' assert 'Content-Type' in h del h['Content-Type'] assert 'Content-Type' not in h def test_setdefault(self): h = Headers() hlist = ['ip1', 'ip2'] olist = h.setdefault('X-Forwarded-For', hlist) assert h.getlist('X-Forwarded-For') is not hlist assert h.getlist('X-Forwarded-For') is olist h = Headers() olist = h.setdefault('X-Forwarded-For', 'ip1') self.assertEqual(h.getlist('X-Forwarded-For'), ['ip1']) assert h.getlist('X-Forwarded-For') is olist def test_iterables(self): idict = {'Content-Type': 'text/html', 'X-Forwarded-For': ['ip1', 'ip2']} h = Headers(idict) self.assertEqual(dict(h), {'Content-Type': ['text/html'], 'X-Forwarded-For': ['ip1', 'ip2']}) self.assertEqual(h.keys(), ['X-Forwarded-For', 'Content-Type']) self.assertEqual(h.items(), [('X-Forwarded-For', ['ip1', 'ip2']), ('Content-Type', ['text/html'])]) self.assertEqual(list(h.iteritems()), [('X-Forwarded-For', ['ip1', 'ip2']), ('Content-Type', ['text/html'])]) self.assertEqual(h.values(), ['ip2', 'text/html']) def test_update(self): h = Headers() h.update({'Content-Type': 'text/html', 'X-Forwarded-For': ['ip1', 'ip2']}) self.assertEqual(h.getlist('Content-Type'), ['text/html']) self.assertEqual(h.getlist('X-Forwarded-For'), ['ip1', 'ip2']) def test_copy(self): h1 = Headers({'header1': ['value1', 'value2']}) h2 = copy.copy(h1) self.assertEqual(h1, h2) self.assertEqual(h1.getlist('header1'), h2.getlist('header1')) assert h1.getlist('header1') is not h2.getlist('header1') assert isinstance(h2, Headers) def test_appendlist(self): h1 = Headers({'header1': 'value1'}) h1.appendlist('header1', 'value3') self.assertEqual(h1.getlist('header1'), ['value1', 'value3']) h1 = Headers() h1.appendlist('header1', 'value1') h1.appendlist('header1', 'value3') self.assertEqual(h1.getlist('header1'), ['value1', 'value3']) def test_setlist(self): h1 = Headers({'header1': 'value1'}) self.assertEqual(h1.getlist('header1'), ['value1']) h1.setlist('header1', ['value2', 'value3']) self.assertEqual(h1.getlist('header1'), ['value2', 'value3']) def test_setlistdefault(self): h1 = Headers({'header1': 'value1'}) h1.setlistdefault('header1', ['value2', 'value3']) h1.setlistdefault('header2', ['value2', 'value3']) self.assertEqual(h1.getlist('header1'), ['value1']) self.assertEqual(h1.getlist('header2'), ['value2', 'value3']) Scrapy-0.14.4/scrapy/tests/test_downloadermiddleware_cookies.py0000600000016101777760000000521011754531743025125 0ustar buildbotnogroupfrom __future__ import with_statement from unittest import TestCase from scrapy.http import Response, Request from scrapy.spider import BaseSpider from scrapy.contrib.downloadermiddleware.cookies import CookiesMiddleware class CookiesMiddlewareTest(TestCase): def setUp(self): self.spider = BaseSpider('foo') self.mw = CookiesMiddleware() def tearDown(self): self.mw.spider_closed(self.spider) del self.mw def test_basic(self): headers = {'Set-Cookie': 'C1=value1; path=/'} req = Request('http://scrapytest.org/') assert self.mw.process_request(req, self.spider) is None assert 'Cookie' not in req.headers res = Response('http://scrapytest.org/', headers=headers) assert self.mw.process_response(req, res, self.spider) is res #assert res.cookies req2 = Request('http://scrapytest.org/sub1/') assert self.mw.process_request(req2, self.spider) is None self.assertEquals(req2.headers.get('Cookie'), "C1=value1") def test_dont_merge_cookies(self): # merge some cookies into jar headers = {'Set-Cookie': 'C1=value1; path=/'} req = Request('http://scrapytest.org/') res = Response('http://scrapytest.org/', headers=headers) assert self.mw.process_response(req, res, self.spider) is res # test Cookie header is not seted to request req = Request('http://scrapytest.org/dontmerge', meta={'dont_merge_cookies': 1}) assert self.mw.process_request(req, self.spider) is None assert 'Cookie' not in req.headers # check that returned cookies are not merged back to jar res = Response('http://scrapytest.org/dontmerge', headers={'Set-Cookie': 'dont=mergeme; path=/'}) assert self.mw.process_response(req, res, self.spider) is res req = Request('http://scrapytest.org/mergeme') assert self.mw.process_request(req, self.spider) is None self.assertEquals(req.headers.get('Cookie'), 'C1=value1') def test_merge_request_cookies(self): req = Request('http://scrapytest.org/', cookies={'galleta': 'salada'}) assert self.mw.process_request(req, self.spider) is None self.assertEquals(req.headers.get('Cookie'), 'galleta=salada') headers = {'Set-Cookie': 'C1=value1; path=/'} res = Response('http://scrapytest.org/', headers=headers) assert self.mw.process_response(req, res, self.spider) is res req2 = Request('http://scrapytest.org/sub1/') assert self.mw.process_request(req2, self.spider) is None self.assertEquals(req2.headers.get('Cookie'), "C1=value1; galleta=salada") Scrapy-0.14.4/scrapy/tests/test_libxml2.py0000600000016101777760000000105611754531743020572 0ustar buildbotnogroupfrom twisted.trial import unittest from scrapy.utils.test import libxml2debug class Libxml2Test(unittest.TestCase): try: import libxml2 except ImportError, e: skip = str(e) @libxml2debug def test_libxml2_bug_2_6_27(self): # this test will fail in version 2.6.27 but passes on 2.6.29+ html = "" node = self.libxml2.htmlParseDoc(html, 'utf-8') result = [str(r) for r in node.xpathEval('//text()')] self.assertEquals(result, ['1', '2', '3']) node.freeDoc() Scrapy-0.14.4/scrapy/tests/test_downloadermiddleware_retry.py0000600000016101777760000000615611754531743024650 0ustar buildbotnogroupimport unittest from twisted.internet.error import TimeoutError as ServerTimeoutError, DNSLookupError, \ ConnectionRefusedError, ConnectionDone, ConnectError, \ ConnectionLost from scrapy.contrib.downloadermiddleware.retry import RetryMiddleware from scrapy.spider import BaseSpider from scrapy.http import Request, Response class RetryTest(unittest.TestCase): def setUp(self): self.spider = BaseSpider('foo') self.mw = RetryMiddleware() self.mw.max_retry_times = 2 def test_priority_adjust(self): req = Request('http://www.scrapytest.org/503') rsp = Response('http://www.scrapytest.org/503', body='', status=503) req2 = self.mw.process_response(req, rsp, self.spider) assert req2.priority < req.priority def test_404(self): req = Request('http://www.scrapytest.org/404') rsp = Response('http://www.scrapytest.org/404', body='', status=404) # dont retry 404s assert self.mw.process_response(req, rsp, self.spider) is rsp def test_dont_retry(self): req = Request('http://www.scrapytest.org/503', meta={'dont_retry': True}) rsp = Response('http://www.scrapytest.org/503', body='', status=503) # first retry r = self.mw.process_response(req, rsp, self.spider) assert r is rsp def test_dont_retry_exc(self): req = Request('http://www.scrapytest.org/503', meta={'dont_retry': True}) rsp = Response('http://www.scrapytest.org/503', body='', status=503) r = self.mw.process_exception(req, DNSLookupError(), self.spider) assert r is None def test_503(self): req = Request('http://www.scrapytest.org/503') rsp = Response('http://www.scrapytest.org/503', body='', status=503) # first retry req = self.mw.process_response(req, rsp, self.spider) assert isinstance(req, Request) self.assertEqual(req.meta['retry_times'], 1) # second retry req = self.mw.process_response(req, rsp, self.spider) assert isinstance(req, Request) self.assertEqual(req.meta['retry_times'], 2) # discard it assert self.mw.process_response(req, rsp, self.spider) is rsp def test_twistederrors(self): for exc in (ServerTimeoutError, DNSLookupError, ConnectionRefusedError, ConnectionDone, ConnectError, ConnectionLost): req = Request('http://www.scrapytest.org/%s' % exc.__name__) self._test_retry_exception(req, exc()) def _test_retry_exception(self, req, exception): # first retry req = self.mw.process_exception(req, exception, self.spider) assert isinstance(req, Request) self.assertEqual(req.meta['retry_times'], 1) # second retry req = self.mw.process_exception(req, exception, self.spider) assert isinstance(req, Request) self.assertEqual(req.meta['retry_times'], 2) # discard it req = self.mw.process_exception(req, exception, self.spider) self.assertEqual(req, None) if __name__ == "__main__": unittest.main() Scrapy-0.14.4/scrapy/tests/test_middleware.py0000600000016101777760000000444511754531743021343 0ustar buildbotnogroupfrom twisted.trial import unittest from scrapy.settings import Settings from scrapy.exceptions import NotConfigured from scrapy.middleware import MiddlewareManager class M1(object): def open_spider(self, spider): pass def close_spider(self, spider): pass def process(self, response, request, spider): pass class M2(object): def open_spider(self, spider): pass def close_spider(self, spider): pass pass class M3(object): def process(self, response, request, spider): pass class MOff(object): def open_spider(self, spider): pass def close_spider(self, spider): pass def __init__(self): raise NotConfigured class TestMiddlewareManager(MiddlewareManager): @classmethod def _get_mwlist_from_settings(cls, settings): return ['scrapy.tests.test_middleware.%s' % x for x in ['M1', 'MOff', 'M3']] def _add_middleware(self, mw): super(TestMiddlewareManager, self)._add_middleware(mw) if hasattr(mw, 'process'): self.methods['process'].append(mw.process) class MiddlewareManagerTest(unittest.TestCase): def test_init(self): m1, m2, m3 = M1(), M2(), M3() mwman = TestMiddlewareManager(m1, m2, m3) self.assertEqual(mwman.methods['open_spider'], [m1.open_spider, m2.open_spider]) self.assertEqual(mwman.methods['close_spider'], [m2.close_spider, m1.close_spider]) self.assertEqual(mwman.methods['process'], [m1.process, m3.process]) def test_methods(self): mwman = TestMiddlewareManager(M1(), M2(), M3()) self.assertEqual([x.im_class for x in mwman.methods['open_spider']], [M1, M2]) self.assertEqual([x.im_class for x in mwman.methods['close_spider']], [M2, M1]) self.assertEqual([x.im_class for x in mwman.methods['process']], [M1, M3]) def test_enabled(self): m1, m2, m3 = M1(), M2(), M3() mwman = MiddlewareManager(m1, m2, m3) self.failUnlessEqual(mwman.middlewares, (m1, m2, m3)) def test_enabled_from_settings(self): settings = Settings() mwman = TestMiddlewareManager.from_settings(settings) classes = [x.__class__ for x in mwman.middlewares] self.failUnlessEqual(classes, [M1, M3]) Scrapy-0.14.4/scrapy/tests/test_utils_iterators.py0000600000016101777760000002521711754531743022462 0ustar buildbotnogroupimport os from twisted.trial import unittest from scrapy.utils.iterators import csviter, xmliter from scrapy.contrib_exp.iterators import xmliter_lxml from scrapy.http import XmlResponse, TextResponse, Response from scrapy.tests import get_testdata class XmliterTestCase(unittest.TestCase): xmliter = staticmethod(xmliter) def test_xmliter(self): body = """\ \ \ Type 1\ Name 1\ \ \ Type 2\ Name 2\ \ """ response = XmlResponse(url="http://example.com", body=body) attrs = [] for x in self.xmliter(response, 'product'): attrs.append((x.select("@id").extract(), x.select("name/text()").extract(), x.select("./type/text()").extract())) self.assertEqual(attrs, [(['001'], ['Name 1'], ['Type 1']), (['002'], ['Name 2'], ['Type 2'])]) def test_xmliter_text(self): body = u"""onetwo""" self.assertEqual([x.select("text()").extract() for x in self.xmliter(body, 'product')], [[u'one'], [u'two']]) def test_xmliter_namespaces(self): body = """\ My Dummy Company http://www.mydummycompany.com This is a dummy company. We do nothing. Item 1 This is item 1 http://www.mydummycompany.com/items/1 http://www.mydummycompany.com/images/item1.jpg ITEM_1 400 """ response = XmlResponse(url='http://mydummycompany.com', body=body) my_iter = self.xmliter(response, 'item') node = my_iter.next() node.register_namespace('g', 'http://base.google.com/ns/1.0') self.assertEqual(node.select('title/text()').extract(), ['Item 1']) self.assertEqual(node.select('description/text()').extract(), ['This is item 1']) self.assertEqual(node.select('link/text()').extract(), ['http://www.mydummycompany.com/items/1']) self.assertEqual(node.select('g:image_link/text()').extract(), ['http://www.mydummycompany.com/images/item1.jpg']) self.assertEqual(node.select('g:id/text()').extract(), ['ITEM_1']) self.assertEqual(node.select('g:price/text()').extract(), ['400']) self.assertEqual(node.select('image_link/text()').extract(), []) self.assertEqual(node.select('id/text()').extract(), []) self.assertEqual(node.select('price/text()').extract(), []) def test_xmliter_exception(self): body = u"""onetwo""" iter = self.xmliter(body, 'product') iter.next() iter.next() self.assertRaises(StopIteration, iter.next) def test_xmliter_encoding(self): body = '\n\n Some Turkish Characters \xd6\xc7\xde\xdd\xd0\xdc \xfc\xf0\xfd\xfe\xe7\xf6\n\n\n' response = XmlResponse('http://www.example.com', body=body) self.assertEqual( self.xmliter(response, 'item').next().extract(), u'Some Turkish Characters \xd6\xc7\u015e\u0130\u011e\xdc \xfc\u011f\u0131\u015f\xe7\xf6' ) class LxmlXmliterTestCase(XmliterTestCase): xmliter = staticmethod(xmliter_lxml) try: import lxml except ImportError: skip = "lxml not available" def test_xmliter_iterate_namespace(self): body = """\ My Dummy Company http://www.mydummycompany.com This is a dummy company. We do nothing. Item 1 This is item 1 http://www.mydummycompany.com/items/1 http://www.mydummycompany.com/images/item1.jpg http://www.mydummycompany.com/images/item2.jpg """ response = XmlResponse(url='http://mydummycompany.com', body=body) no_namespace_iter = self.xmliter(response, 'image_link') self.assertEqual(len(list(no_namespace_iter)), 0) namespace_iter = self.xmliter(response, 'image_link', 'http://base.google.com/ns/1.0') node = namespace_iter.next() self.assertEqual(node.select('text()').extract(), ['http://www.mydummycompany.com/images/item1.jpg']) node = namespace_iter.next() self.assertEqual(node.select('text()').extract(), ['http://www.mydummycompany.com/images/item2.jpg']) class UtilsCsvTestCase(unittest.TestCase): sample_feeds_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sample_data', 'feeds') sample_feed_path = os.path.join(sample_feeds_dir, 'feed-sample3.csv') sample_feed2_path = os.path.join(sample_feeds_dir, 'feed-sample4.csv') sample_feed3_path = os.path.join(sample_feeds_dir, 'feed-sample5.csv') def test_csviter_defaults(self): body = get_testdata('feeds', 'feed-sample3.csv') response = TextResponse(url="http://example.com/", body=body) csv = csviter(response) result = [row for row in csv] self.assertEqual(result, [{u'id': u'1', u'name': u'alpha', u'value': u'foobar'}, {u'id': u'2', u'name': u'unicode', u'value': u'\xfan\xedc\xf3d\xe9\u203d'}, {u'id': u'3', u'name': u'multi', u'value': u'foo\nbar'}, {u'id': u'4', u'name': u'empty', u'value': u''}]) # explicit type check cuz' we no like stinkin' autocasting! yarrr for result_row in result: self.assert_(all((isinstance(k, unicode) for k in result_row.keys()))) self.assert_(all((isinstance(v, unicode) for v in result_row.values()))) def test_csviter_delimiter(self): body = get_testdata('feeds', 'feed-sample3.csv').replace(',', '\t') response = TextResponse(url="http://example.com/", body=body) csv = csviter(response, delimiter='\t') self.assertEqual([row for row in csv], [{u'id': u'1', u'name': u'alpha', u'value': u'foobar'}, {u'id': u'2', u'name': u'unicode', u'value': u'\xfan\xedc\xf3d\xe9\u203d'}, {u'id': u'3', u'name': u'multi', u'value': u'foo\nbar'}, {u'id': u'4', u'name': u'empty', u'value': u''}]) def test_csviter_delimiter_binary_response_assume_utf8_encoding(self): body = get_testdata('feeds', 'feed-sample3.csv').replace(',', '\t') response = Response(url="http://example.com/", body=body) csv = csviter(response, delimiter='\t') self.assertEqual([row for row in csv], [{u'id': u'1', u'name': u'alpha', u'value': u'foobar'}, {u'id': u'2', u'name': u'unicode', u'value': u'\xfan\xedc\xf3d\xe9\u203d'}, {u'id': u'3', u'name': u'multi', u'value': u'foo\nbar'}, {u'id': u'4', u'name': u'empty', u'value': u''}]) def test_csviter_headers(self): sample = get_testdata('feeds', 'feed-sample3.csv').splitlines() headers, body = sample[0].split(','), '\n'.join(sample[1:]) response = TextResponse(url="http://example.com/", body=body) csv = csviter(response, headers=headers) self.assertEqual([row for row in csv], [{u'id': u'1', u'name': u'alpha', u'value': u'foobar'}, {u'id': u'2', u'name': u'unicode', u'value': u'\xfan\xedc\xf3d\xe9\u203d'}, {u'id': u'3', u'name': u'multi', u'value': u'foo\nbar'}, {u'id': u'4', u'name': u'empty', u'value': u''}]) def test_csviter_falserow(self): body = get_testdata('feeds', 'feed-sample3.csv') body = '\n'.join((body, 'a,b', 'a,b,c,d')) response = TextResponse(url="http://example.com/", body=body) csv = csviter(response) self.assertEqual([row for row in csv], [{u'id': u'1', u'name': u'alpha', u'value': u'foobar'}, {u'id': u'2', u'name': u'unicode', u'value': u'\xfan\xedc\xf3d\xe9\u203d'}, {u'id': u'3', u'name': u'multi', u'value': u'foo\nbar'}, {u'id': u'4', u'name': u'empty', u'value': u''}]) def test_csviter_exception(self): body = get_testdata('feeds', 'feed-sample3.csv') response = TextResponse(url="http://example.com/", body=body) iter = csviter(response) iter.next() iter.next() iter.next() iter.next() self.assertRaises(StopIteration, iter.next) def test_csviter_encoding(self): body1 = get_testdata('feeds', 'feed-sample4.csv') body2 = get_testdata('feeds', 'feed-sample5.csv') response = TextResponse(url="http://example.com/", body=body1, encoding='latin1') csv = csviter(response) self.assertEqual([row for row in csv], [{u'id': u'1', u'name': u'latin1', u'value': u'test'}, {u'id': u'2', u'name': u'something', u'value': u'\xf1\xe1\xe9\xf3'}]) response = TextResponse(url="http://example.com/", body=body2, encoding='cp852') csv = csviter(response) self.assertEqual([row for row in csv], [{u'id': u'1', u'name': u'cp852', u'value': u'test'}, {u'id': u'2', u'name': u'something', u'value': u'\u255a\u2569\u2569\u2569\u2550\u2550\u2557'}]) if __name__ == "__main__": unittest.main() Scrapy-0.14.4/scrapy/tests/test_cmdline/0000700000016101777760000000000011754532077020257 5ustar buildbotnogroupScrapy-0.14.4/scrapy/tests/test_cmdline/extensions.py0000600000016101777760000000046311754531743023034 0ustar buildbotnogroup"""A test extension used to check the settings loading order""" from scrapy.conf import settings settings.overrides['TEST1'] = "%s + %s" % (settings['TEST1'], 'loaded') class TestExtension(object): def __init__(self): settings.overrides['TEST1'] = "%s + %s" % (settings['TEST1'], 'started') Scrapy-0.14.4/scrapy/tests/test_cmdline/settings.py0000600000016101777760000000013511754531743022471 0ustar buildbotnogroupEXTENSIONS = [ 'scrapy.tests.test_cmdline.extensions.TestExtension' ] TEST1 = 'default' Scrapy-0.14.4/scrapy/tests/test_cmdline/__init__.py0000600000016101777760000000221611754531743022372 0ustar buildbotnogroupimport sys import os from subprocess import Popen, PIPE import unittest class CmdlineTest(unittest.TestCase): def setUp(self): self.env = os.environ.copy() if 'PYTHONPATH' in os.environ: self.env['PYTHONPATH'] = os.environ['PYTHONPATH'] self.env['SCRAPY_SETTINGS_MODULE'] = 'scrapy.tests.test_cmdline.settings' def _execute(self, *new_args, **kwargs): args = (sys.executable, '-m', 'scrapy.cmdline') + new_args proc = Popen(args, stdout=PIPE, stderr=PIPE, env=self.env, **kwargs) comm = proc.communicate() return comm[0].strip() def test_default_settings(self): self.assertEqual(self._execute('settings', '--get', 'TEST1'), \ 'default + loaded + started') def test_override_settings_using_set_arg(self): self.assertEqual(self._execute('settings', '--get', 'TEST1', '-s', 'TEST1=override'), \ 'override + loaded + started') def test_override_settings_using_envvar(self): self.env['SCRAPY_TEST1'] = 'override' self.assertEqual(self._execute('settings', '--get', 'TEST1'), \ 'override + loaded + started') Scrapy-0.14.4/scrapy/tests/test_http_request.py0000600000016101777760000004352111754531743021753 0ustar buildbotnogroupimport cgi import unittest import xmlrpclib from inspect import getargspec from cStringIO import StringIO from urlparse import urlparse from scrapy.http import Request, FormRequest, XmlRpcRequest, Headers, Response class RequestTest(unittest.TestCase): request_class = Request default_method = 'GET' default_headers = {} default_meta = {} def test_init(self): # Request requires url in the constructor self.assertRaises(Exception, self.request_class) # url argument must be basestring self.assertRaises(TypeError, self.request_class, 123) r = self.request_class('http://www.example.com') r = self.request_class("http://www.example.com") assert isinstance(r.url, str) self.assertEqual(r.url, "http://www.example.com") self.assertEqual(r.method, self.default_method) assert isinstance(r.headers, Headers) self.assertEqual(r.headers, self.default_headers) self.assertEqual(r.meta, self.default_meta) meta = {"lala": "lolo"} headers = {"caca": "coco"} r = self.request_class("http://www.example.com", meta=meta, headers=headers, body="a body") assert r.meta is not meta self.assertEqual(r.meta, meta) assert r.headers is not headers self.assertEqual(r.headers["caca"], "coco") def test_url_no_scheme(self): self.assertRaises(ValueError, self.request_class, 'foo') def test_headers(self): # Different ways of setting headers attribute url = 'http://www.scrapy.org' headers = {'Accept':'gzip', 'Custom-Header':'nothing to tell you'} r = self.request_class(url=url, headers=headers) p = self.request_class(url=url, headers=r.headers) self.assertEqual(r.headers, p.headers) self.assertFalse(r.headers is headers) self.assertFalse(p.headers is r.headers) # headers must not be unicode h = Headers({'key1': u'val1', u'key2': 'val2'}) h[u'newkey'] = u'newval' for k, v in h.iteritems(): self.assert_(isinstance(k, str)) for s in v: self.assert_(isinstance(s, str)) def test_eq(self): url = 'http://www.scrapy.org' r1 = self.request_class(url=url) r2 = self.request_class(url=url) self.assertNotEqual(r1, r2) set_ = set() set_.add(r1) set_.add(r2) self.assertEqual(len(set_), 2) def test_url(self): """Request url tests""" r = self.request_class(url="http://www.scrapy.org/path") self.assertEqual(r.url, "http://www.scrapy.org/path") # url quoting on creation r = self.request_class(url="http://www.scrapy.org/blank%20space") self.assertEqual(r.url, "http://www.scrapy.org/blank%20space") r = self.request_class(url="http://www.scrapy.org/blank space") self.assertEqual(r.url, "http://www.scrapy.org/blank%20space") # url encoding r1 = self.request_class(url=u"http://www.scrapy.org/price/\xa3", encoding="utf-8") r2 = self.request_class(url=u"http://www.scrapy.org/price/\xa3", encoding="latin1") self.assertEqual(r1.url, "http://www.scrapy.org/price/%C2%A3") self.assertEqual(r2.url, "http://www.scrapy.org/price/%A3") def test_body(self): r1 = self.request_class(url="http://www.example.com/") assert r1.body == '' r2 = self.request_class(url="http://www.example.com/", body="") assert isinstance(r2.body, str) self.assertEqual(r2.encoding, 'utf-8') # default encoding r3 = self.request_class(url="http://www.example.com/", body=u"Price: \xa3100", encoding='utf-8') assert isinstance(r3.body, str) self.assertEqual(r3.body, "Price: \xc2\xa3100") r4 = self.request_class(url="http://www.example.com/", body=u"Price: \xa3100", encoding='latin1') assert isinstance(r4.body, str) self.assertEqual(r4.body, "Price: \xa3100") def test_ajax_url(self): # ascii url r = self.request_class(url="http://www.example.com/ajax.html#!key=value") self.assertEqual(r.url, "http://www.example.com/ajax.html?_escaped_fragment_=key=value") # unicode url r = self.request_class(url=u"http://www.example.com/ajax.html#!key=value") self.assertEqual(r.url, "http://www.example.com/ajax.html?_escaped_fragment_=key=value") def test_copy(self): """Test Request copy""" def somecallback(): pass r1 = self.request_class("http://www.example.com", callback=somecallback, errback=somecallback) r1.meta['foo'] = 'bar' r2 = r1.copy() # make sure copy does not propagate callbacks assert r1.callback is somecallback assert r1.errback is somecallback assert r2.callback is r1.callback assert r2.errback is r2.errback # make sure meta dict is shallow copied assert r1.meta is not r2.meta, "meta must be a shallow copy, not identical" self.assertEqual(r1.meta, r2.meta) # make sure headers attribute is shallow copied assert r1.headers is not r2.headers, "headers must be a shallow copy, not identical" self.assertEqual(r1.headers, r2.headers) self.assertEqual(r1.encoding, r2.encoding) self.assertEqual(r1.dont_filter, r2.dont_filter) # Request.body can be identical since it's an immutable object (str) def test_copy_inherited_classes(self): """Test Request children copies preserve their class""" class CustomRequest(self.request_class): pass r1 = CustomRequest('http://www.example.com') r2 = r1.copy() assert type(r2) is CustomRequest def test_replace(self): """Test Request.replace() method""" r1 = self.request_class("http://www.example.com", method='GET') hdrs = Headers(dict(r1.headers, key='value')) r2 = r1.replace(method="POST", body="New body", headers=hdrs) self.assertEqual(r1.url, r2.url) self.assertEqual((r1.method, r2.method), ("GET", "POST")) self.assertEqual((r1.body, r2.body), ('', "New body")) self.assertEqual((r1.headers, r2.headers), (self.default_headers, hdrs)) # Empty attributes (which may fail if not compared properly) r3 = self.request_class("http://www.example.com", meta={'a': 1}, dont_filter=True) r4 = r3.replace(url="http://www.example.com/2", body='', meta={}, dont_filter=False) self.assertEqual(r4.url, "http://www.example.com/2") self.assertEqual(r4.body, '') self.assertEqual(r4.meta, {}) assert r4.dont_filter is False def test_method_always_str(self): r = self.request_class("http://www.example.com", method=u"POST") assert isinstance(r.method, str) class FormRequestTest(RequestTest): request_class = FormRequest def test_empty_formdata(self): r1 = self.request_class("http://www.example.com", formdata={}) self.assertEqual(r1.body, '') def test_default_encoding(self): # using default encoding (utf-8) data = {'one': 'two', 'price': '\xc2\xa3 100'} r2 = self.request_class("http://www.example.com", formdata=data) self.assertEqual(r2.method, 'POST') self.assertEqual(r2.encoding, 'utf-8') self.assertEqual(r2.body, 'price=%C2%A3+100&one=two') self.assertEqual(r2.headers['Content-Type'], 'application/x-www-form-urlencoded') def test_custom_encoding(self): data = {'price': u'\xa3 100'} r3 = self.request_class("http://www.example.com", formdata=data, encoding='latin1') self.assertEqual(r3.encoding, 'latin1') self.assertEqual(r3.body, 'price=%A3+100') def test_multi_key_values(self): # using multiples values for a single key data = {'price': u'\xa3 100', 'colours': ['red', 'blue', 'green']} r3 = self.request_class("http://www.example.com", formdata=data) self.assertEqual(r3.body, 'colours=red&colours=blue&colours=green&price=%C2%A3+100') def test_from_response_post(self): respbody = """ """ response = Response("http://www.example.com/this/list.html", body=respbody) r1 = self.request_class.from_response(response, formdata={'one': ['two', 'three'], 'six': 'seven'}, callback=lambda x: x) self.assertEqual(r1.method, 'POST') self.assertEqual(r1.headers['Content-type'], 'application/x-www-form-urlencoded') fs = cgi.FieldStorage(StringIO(r1.body), r1.headers, environ={"REQUEST_METHOD": "POST"}) self.assertEqual(r1.url, "http://www.example.com/this/post.php") self.assertEqual(set([f.value for f in fs["test"]]), set(["val1", "val2"])) self.assertEqual(set([f.value for f in fs["one"]]), set(["two", "three"])) self.assertEqual(fs['test2'].value, 'xxx') self.assertEqual(fs['six'].value, 'seven') def test_from_response_extra_headers(self): respbody = """ """ headers = {"Accept-Encoding": "gzip,deflate"} response = Response("http://www.example.com/this/list.html", body=respbody) r1 = self.request_class.from_response(response, formdata={'one': ['two', 'three'], 'six': 'seven'}, headers=headers, callback=lambda x: x) self.assertEqual(r1.method, 'POST') self.assertEqual(r1.headers['Content-type'], 'application/x-www-form-urlencoded') self.assertEqual(r1.headers['Accept-Encoding'], 'gzip,deflate') def test_from_response_get(self): respbody = """ """ response = Response("http://www.example.com/this/list.html", body=respbody) r1 = self.request_class.from_response(response, formdata={'one': ['two', 'three'], 'six': 'seven'}) self.assertEqual(r1.method, 'GET') self.assertEqual(urlparse(r1.url).hostname, "www.example.com") self.assertEqual(urlparse(r1.url).path, "/this/get.php") urlargs = cgi.parse_qs(urlparse(r1.url).query) self.assertEqual(set(urlargs['test']), set(['val1', 'val2'])) self.assertEqual(set(urlargs['one']), set(['two', 'three'])) self.assertEqual(urlargs['test2'], ['xxx']) self.assertEqual(urlargs['six'], ['seven']) def test_from_response_override_params(self): respbody = """ """ response = Response("http://www.example.com/this/list.html", body=respbody) r1 = self.request_class.from_response(response, formdata={'two': '2'}) fs = cgi.FieldStorage(StringIO(r1.body), r1.headers, environ={"REQUEST_METHOD": "POST"}) self.assertEqual(fs['one'].value, '1') self.assertEqual(fs['two'].value, '2') def test_from_response_submit_first_clickeable(self): respbody = """ """ response = Response("http://www.example.com/this/list.html", body=respbody) r1 = self.request_class.from_response(response, formdata={'two': '2'}) urlargs = cgi.parse_qs(urlparse(r1.url).query) self.assertEqual(urlargs['clickeable1'], ['clicked1']) self.assertFalse('clickeable2' in urlargs, urlargs) self.assertEqual(urlargs['one'], ['1']) self.assertEqual(urlargs['two'], ['2']) def test_from_response_submit_not_first_clickeable(self): respbody = """ """ response = Response("http://www.example.com/this/list.html", body=respbody) r1 = self.request_class.from_response(response, formdata={'two': '2'}, clickdata={'name': 'clickeable2'}) urlargs = cgi.parse_qs(urlparse(r1.url).query) self.assertEqual(urlargs['clickeable2'], ['clicked2']) self.assertFalse('clickeable1' in urlargs, urlargs) self.assertEqual(urlargs['one'], ['1']) self.assertEqual(urlargs['two'], ['2']) def test_from_response_dont_click(self): respbody = """ """ response = Response("http://www.example.com/this/list.html", body=respbody) r1 = self.request_class.from_response(response, dont_click=True) urlargs = cgi.parse_qs(urlparse(r1.url).query) self.assertFalse('clickeable1' in urlargs, urlargs) self.assertFalse('clickeable2' in urlargs, urlargs) def test_from_response_errors_noform(self): respbody = """""" response = Response("http://www.example.com/lala.html", body=respbody) self.assertRaises(ValueError, self.request_class.from_response, response) def test_from_response_errors_formnumber(self): respbody = """ """ response = Response("http://www.example.com/lala.html", body=respbody) self.assertRaises(IndexError, self.request_class.from_response, response, formnumber=1) def test_from_response_noformname(self): respbody = """ """ response = Response("http://www.example.com/formname.html", body=respbody) r1 = self.request_class.from_response(response, formdata={'two':'3'}, callback=lambda x: x) self.assertEqual(r1.method, 'POST') self.assertEqual(r1.headers['Content-type'], 'application/x-www-form-urlencoded') fs = cgi.FieldStorage(StringIO(r1.body), r1.headers, environ={"REQUEST_METHOD": "POST"}) self.assertEqual(fs['one'].value, '1') self.assertEqual(fs['two'].value, '3') def test_from_response_formname_exists(self): respbody = """ """ response = Response("http://www.example.com/formname.html", body=respbody) r1 = self.request_class.from_response(response, formname="form2", callback=lambda x: x) self.assertEqual(r1.method, 'POST') fs = cgi.FieldStorage(StringIO(r1.body), r1.headers, environ={"REQUEST_METHOD": "POST"}) self.assertEqual(fs['three'].value, "3") self.assertEqual(fs['four'].value, "4") def test_from_response_formname_notexist(self): respbody = """ """ response = Response("http://www.example.com/formname.html", body=respbody) r1 = self.request_class.from_response(response, formname="form3", callback=lambda x: x) self.assertEqual(r1.method, 'POST') fs = cgi.FieldStorage(StringIO(r1.body), r1.headers, environ={"REQUEST_METHOD": "POST"}) self.assertEqual(fs['one'].value, "1") def test_from_response_formname_errors_formnumber(self): respbody = """ """ response = Response("http://www.example.com/formname.html", body=respbody) self.assertRaises(IndexError, self.request_class.from_response, response, formname="form3", formnumber=2) class XmlRpcRequestTest(RequestTest): request_class = XmlRpcRequest default_method = 'POST' default_headers = {'Content-Type': ['text/xml']} def _test_request(self, **kwargs): r = self.request_class('http://scrapytest.org/rpc2', **kwargs) self.assertEqual(r.headers['Content-Type'], 'text/xml') self.assertEqual(r.body, xmlrpclib.dumps(**kwargs)) self.assertEqual(r.method, 'POST') self.assertEqual(r.encoding, kwargs.get('encoding', 'utf-8')) self.assertTrue(r.dont_filter, True) def test_xmlrpc_dumps(self): self._test_request(params=('value',)) self._test_request(params=('username', 'password'), methodname='login') self._test_request(params=('response', ), methodresponse='login') self._test_request(params=(u'pas\xa3',), encoding='utf-8') self._test_request(params=(u'pas\xa3',), encoding='latin') self._test_request(params=(None,), allow_none=1) self.assertRaises(TypeError, self._test_request) self.assertRaises(TypeError, self._test_request, params=(None,)) if __name__ == "__main__": unittest.main() Scrapy-0.14.4/scrapy/tests/test_utils_signal.py0000600000016101777760000000566311754531743021726 0ustar buildbotnogroupfrom twisted.trial import unittest from twisted.python import log as txlog from twisted.python.failure import Failure from twisted.internet import defer, reactor from scrapy.xlib.pydispatch import dispatcher from scrapy.utils.signal import send_catch_log, send_catch_log_deferred from scrapy import log class SendCatchLogTest(unittest.TestCase): @defer.inlineCallbacks def test_send_catch_log(self): test_signal = object() handlers_called = set() def log_received(event): handlers_called.add(log_received) assert "error_handler" in event['message'][0] assert event['logLevel'] == log.ERROR txlog.addObserver(log_received) dispatcher.connect(self.error_handler, signal=test_signal) dispatcher.connect(self.ok_handler, signal=test_signal) result = yield defer.maybeDeferred(self._get_result, test_signal, arg='test', \ handlers_called=handlers_called) assert self.error_handler in handlers_called assert self.ok_handler in handlers_called assert log_received in handlers_called self.assertEqual(result[0][0], self.error_handler) self.assert_(isinstance(result[0][1], Failure)) self.assertEqual(result[1], (self.ok_handler, "OK")) txlog.removeObserver(log_received) self.flushLoggedErrors() dispatcher.disconnect(self.error_handler, signal=test_signal) dispatcher.disconnect(self.ok_handler, signal=test_signal) def _get_result(self, signal, *a, **kw): return send_catch_log(signal, *a, **kw) def error_handler(self, arg, handlers_called): handlers_called.add(self.error_handler) a = 1/0 def ok_handler(self, arg, handlers_called): handlers_called.add(self.ok_handler) assert arg == 'test' return "OK" class SendCatchLogDeferredTest(SendCatchLogTest): def _get_result(self, signal, *a, **kw): return send_catch_log_deferred(signal, *a, **kw) class SendCatchLogDeferredTest2(SendCatchLogTest): def ok_handler(self, arg, handlers_called): handlers_called.add(self.ok_handler) assert arg == 'test' d = defer.Deferred() reactor.callLater(0, d.callback, "OK") return d def _get_result(self, signal, *a, **kw): return send_catch_log_deferred(signal, *a, **kw) class SendCatchLogTest2(unittest.TestCase): def test_error_logged_if_deferred_not_supported(self): test_signal = object() test_handler = lambda: defer.Deferred() log_events = [] txlog.addObserver(log_events.append) dispatcher.connect(test_handler, test_signal) send_catch_log(test_signal) self.failUnless(log_events) self.failUnless("Cannot return deferreds from signal handler" in str(log_events)) txlog.removeObserver(log_events.append) self.flushLoggedErrors() dispatcher.disconnect(test_handler, test_signal) Scrapy-0.14.4/scrapy/tests/test_spidermiddleware_offsite.py0000600000016101777760000000323611754531743024266 0ustar buildbotnogroupfrom unittest import TestCase from scrapy.http import Response, Request from scrapy.spider import BaseSpider from scrapy.contrib.spidermiddleware.offsite import OffsiteMiddleware class TestOffsiteMiddleware(TestCase): def setUp(self): self.spider = self._get_spider() self.mw = OffsiteMiddleware() self.mw.spider_opened(self.spider) def _get_spider(self): return BaseSpider('foo', allowed_domains=['scrapytest.org', 'scrapy.org']) def test_process_spider_output(self): res = Response('http://scrapytest.org') onsite_reqs = [Request('http://scrapytest.org/1'), Request('http://scrapy.org/1'), Request('http://sub.scrapy.org/1'), Request('http://offsite.tld/letmepass', dont_filter=True)] offsite_reqs = [Request('http://scrapy2.org'), Request('http://offsite.tld/')] reqs = onsite_reqs + offsite_reqs out = list(self.mw.process_spider_output(res, reqs, self.spider)) self.assertEquals(out, onsite_reqs) def tearDown(self): self.mw.spider_closed(self.spider) class TestOffsiteMiddleware2(TestOffsiteMiddleware): def _get_spider(self): return BaseSpider('foo', allowed_domains=None) def test_process_spider_output(self): res = Response('http://scrapytest.org') reqs = [Request('http://a.com/b.html'), Request('http://b.com/1')] out = list(self.mw.process_spider_output(res, reqs, self.spider)) self.assertEquals(out, reqs) class TestOffsiteMiddleware3(TestOffsiteMiddleware2): def _get_spider(self): return BaseSpider('foo') Scrapy-0.14.4/scrapy/tests/test_downloadermiddleware_httpcompression.py0000600000016101777760000001312111754531743026732 0ustar buildbotnogroupfrom __future__ import with_statement from unittest import TestCase from os.path import join, abspath, dirname from cStringIO import StringIO from gzip import GzipFile from scrapy.spider import BaseSpider from scrapy.http import Response, Request, HtmlResponse from scrapy.contrib.downloadermiddleware.httpcompression import HttpCompressionMiddleware from scrapy.tests import tests_datadir from scrapy.utils.encoding import resolve_encoding SAMPLEDIR = join(tests_datadir, 'compressed') FORMAT = { 'gzip': ('html-gzip.bin', 'gzip'), 'x-gzip': ('html-gzip.bin', 'gzip'), 'rawdeflate': ('html-rawdeflate.bin', 'deflate'), 'zlibdeflate': ('html-zlibdeflate.bin', 'deflate'), } class HttpCompressionTest(TestCase): def setUp(self): self.spider = BaseSpider('foo') self.mw = HttpCompressionMiddleware() def _getresponse(self, coding): if coding not in FORMAT: raise ValueError() samplefile, contentencoding = FORMAT[coding] with open(join(SAMPLEDIR, samplefile), 'rb') as sample: body = sample.read() headers = { 'Server': 'Yaws/1.49 Yet Another Web Server', 'Date': 'Sun, 08 Mar 2009 00:41:03 GMT', 'Content-Length': len(body), 'Content-Type': 'text/html', 'Content-Encoding': contentencoding, } response = Response('http://scrapytest.org/', body=body, headers=headers) response.request = Request('http://scrapytest.org', headers={'Accept-Encoding': 'gzip,deflate'}) return response def test_process_request(self): request = Request('http://scrapytest.org') assert 'Accept-Encoding' not in request.headers self.mw.process_request(request, self.spider) self.assertEqual(request.headers.get('Accept-Encoding'), 'x-gzip,gzip,deflate') def test_process_response_gzip(self): response = self._getresponse('gzip') request = response.request self.assertEqual(response.headers['Content-Encoding'], 'gzip') newresponse = self.mw.process_response(request, response, self.spider) assert newresponse is not response assert newresponse.body.startswith('Some page""" zf = GzipFile(fileobj=f, mode='wb') zf.write(plainbody) zf.close() response = Response("http;//www.example.com/", headers=headers, body=f.getvalue()) request = Request("http://www.example.com/") newresponse = self.mw.process_response(request, response, self.spider) assert isinstance(newresponse, HtmlResponse) self.assertEqual(newresponse.body, plainbody) self.assertEqual(newresponse.encoding, resolve_encoding('gb2312')) def test_process_response_force_recalculate_encoding(self): headers = { 'Content-Type': 'text/html', 'Content-Encoding': 'gzip', } f = StringIO() plainbody = """Some page""" zf = GzipFile(fileobj=f, mode='wb') zf.write(plainbody) zf.close() response = HtmlResponse("http;//www.example.com/page.html", headers=headers, body=f.getvalue()) request = Request("http://www.example.com/") newresponse = self.mw.process_response(request, response, self.spider) assert isinstance(newresponse, HtmlResponse) self.assertEqual(newresponse.body, plainbody) self.assertEqual(newresponse.encoding, resolve_encoding('gb2312')) Scrapy-0.14.4/scrapy/tests/test_downloadermiddleware_httpcache.py0000600000016101777760000001511511754531743025441 0ustar buildbotnogroupimport unittest, tempfile, shutil, time from scrapy.http import Response, HtmlResponse, Request from scrapy.spider import BaseSpider from scrapy.contrib.downloadermiddleware.httpcache import FilesystemCacheStorage, HttpCacheMiddleware from scrapy.settings import Settings from scrapy.exceptions import IgnoreRequest class HttpCacheMiddlewareTest(unittest.TestCase): storage_class = FilesystemCacheStorage def setUp(self): self.spider = BaseSpider('example.com') self.tmpdir = tempfile.mkdtemp() self.request = Request('http://www.example.com', headers={'User-Agent': 'test'}) self.response = Response('http://www.example.com', headers={'Content-Type': 'text/html'}, body='test body', status=202) def tearDown(self): shutil.rmtree(self.tmpdir) def _get_settings(self, **new_settings): settings = { 'HTTPCACHE_ENABLED': True, 'HTTPCACHE_DIR': self.tmpdir, 'HTTPCACHE_EXPIRATION_SECS': 1, 'HTTPCACHE_IGNORE_HTTP_CODES': [], } settings.update(new_settings) return Settings(settings) def _get_storage(self, **new_settings): return self.storage_class(self._get_settings(**new_settings)) def _get_middleware(self, **new_settings): return HttpCacheMiddleware(self._get_settings(**new_settings)) def test_storage(self): storage = self._get_storage() request2 = self.request.copy() assert storage.retrieve_response(self.spider, request2) is None storage.store_response(self.spider, self.request, self.response) response2 = storage.retrieve_response(self.spider, request2) assert isinstance(response2, HtmlResponse) # inferred from content-type header self.assertEqualResponse(self.response, response2) time.sleep(2) # wait for cache to expire assert storage.retrieve_response(self.spider, request2) is None def test_storage_never_expire(self): storage = self._get_storage(HTTPCACHE_EXPIRATION_SECS=0) assert storage.retrieve_response(self.spider, self.request) is None storage.store_response(self.spider, self.request, self.response) time.sleep(0.5) # give the chance to expire assert storage.retrieve_response(self.spider, self.request) def test_middleware(self): mw = HttpCacheMiddleware(self._get_settings()) assert mw.process_request(self.request, self.spider) is None mw.process_response(self.request, self.response, self.spider) response = mw.process_request(self.request, self.spider) assert isinstance(response, HtmlResponse) self.assertEqualResponse(self.response, response) assert 'cached' in response.flags def test_different_request_response_urls(self): mw = HttpCacheMiddleware(self._get_settings()) req = Request('http://host.com/path') res = Response('http://host2.net/test.html') assert mw.process_request(req, self.spider) is None mw.process_response(req, res, self.spider) cached = mw.process_request(req, self.spider) assert isinstance(cached, Response) self.assertEqualResponse(res, cached) assert 'cached' in cached.flags def test_middleware_ignore_missing(self): mw = self._get_middleware(HTTPCACHE_IGNORE_MISSING=True) self.assertRaises(IgnoreRequest, mw.process_request, self.request, self.spider) mw.process_response(self.request, self.response, self.spider) response = mw.process_request(self.request, self.spider) assert isinstance(response, HtmlResponse) self.assertEqualResponse(self.response, response) assert 'cached' in response.flags def test_middleware_ignore_schemes(self): # http responses are cached by default req, res = Request('http://test.com/'), Response('http://test.com/') mw = self._get_middleware() assert mw.process_request(req, self.spider) is None mw.process_response(req, res, self.spider) cached = mw.process_request(req, self.spider) assert isinstance(cached, Response), type(cached) self.assertEqualResponse(res, cached) assert 'cached' in cached.flags # file response is not cached by default req, res = Request('file:///tmp/t.txt'), Response('file:///tmp/t.txt') mw = self._get_middleware() assert mw.process_request(req, self.spider) is None mw.process_response(req, res, self.spider) assert mw.storage.retrieve_response(self.spider, req) is None assert mw.process_request(req, self.spider) is None # s3 scheme response is cached by default req, res = Request('s3://bucket/key'), Response('http://bucket/key') mw = self._get_middleware() assert mw.process_request(req, self.spider) is None mw.process_response(req, res, self.spider) cached = mw.process_request(req, self.spider) assert isinstance(cached, Response), type(cached) self.assertEqualResponse(res, cached) assert 'cached' in cached.flags # ignore s3 scheme req, res = Request('s3://bucket/key2'), Response('http://bucket/key2') mw = self._get_middleware(HTTPCACHE_IGNORE_SCHEMES=['s3']) assert mw.process_request(req, self.spider) is None mw.process_response(req, res, self.spider) assert mw.storage.retrieve_response(self.spider, req) is None assert mw.process_request(req, self.spider) is None def test_middleware_ignore_http_codes(self): # test response is not cached mw = self._get_middleware(HTTPCACHE_IGNORE_HTTP_CODES=[202]) assert mw.process_request(self.request, self.spider) is None mw.process_response(self.request, self.response, self.spider) assert mw.storage.retrieve_response(self.spider, self.request) is None assert mw.process_request(self.request, self.spider) is None # test response is cached mw = self._get_middleware(HTTPCACHE_IGNORE_HTTP_CODES=[203]) mw.process_response(self.request, self.response, self.spider) response = mw.process_request(self.request, self.spider) assert isinstance(response, HtmlResponse) self.assertEqualResponse(self.response, response) assert 'cached' in response.flags def assertEqualResponse(self, response1, response2): self.assertEqual(response1.url, response2.url) self.assertEqual(response1.status, response2.status) self.assertEqual(response1.headers, response2.headers) self.assertEqual(response1.body, response2.body) if __name__ == '__main__': unittest.main() Scrapy-0.14.4/scrapy/tests/test_spidermanager/0000700000016101777760000000000011754532077021465 5ustar buildbotnogroupScrapy-0.14.4/scrapy/tests/test_spidermanager/test_spiders/0000700000016101777760000000000011754532077024175 5ustar buildbotnogroupScrapy-0.14.4/scrapy/tests/test_spidermanager/test_spiders/spider1.py0000600000016101777760000000021311754531743026113 0ustar buildbotnogroupfrom scrapy.spider import BaseSpider class Spider1(BaseSpider): name = "spider1" allowed_domains = ["scrapy1.org", "scrapy3.org"] Scrapy-0.14.4/scrapy/tests/test_spidermanager/test_spiders/spider2.py0000600000016101777760000000021311754531743026114 0ustar buildbotnogroupfrom scrapy.spider import BaseSpider class Spider2(BaseSpider): name = "spider2" allowed_domains = ["scrapy2.org", "scrapy3.org"] Scrapy-0.14.4/scrapy/tests/test_spidermanager/test_spiders/spider3.py0000600000016101777760000000036111754531743026121 0ustar buildbotnogroupfrom scrapy.spider import BaseSpider class Spider3(BaseSpider): name = "spider3" allowed_domains = ['spider3.com'] @classmethod def handles_request(cls, request): return request.url == 'http://spider3.com/onlythis' Scrapy-0.14.4/scrapy/tests/test_spidermanager/test_spiders/spider0.py0000600000016101777760000000016611754531743026121 0ustar buildbotnogroupfrom scrapy.spider import BaseSpider class Spider0(BaseSpider): allowed_domains = ["scrapy1.org", "scrapy3.org"] Scrapy-0.14.4/scrapy/tests/test_spidermanager/test_spiders/__init__.py0000600000016101777760000000000011754531743026275 0ustar buildbotnogroupScrapy-0.14.4/scrapy/tests/test_spidermanager/__init__.py0000600000016101777760000000503611754531743023603 0ustar buildbotnogroupimport sys import os import weakref import shutil from zope.interface.verify import verifyObject from twisted.trial import unittest # ugly hack to avoid cyclic imports of scrapy.spider when running this test # alone import scrapy.spider from scrapy.interfaces import ISpiderManager from scrapy.spidermanager import SpiderManager from scrapy.http import Request module_dir = os.path.dirname(os.path.abspath(__file__)) class SpiderManagerTest(unittest.TestCase): def setUp(self): orig_spiders_dir = os.path.join(module_dir, 'test_spiders') self.tmpdir = self.mktemp() os.mkdir(self.tmpdir) self.spiders_dir = os.path.join(self.tmpdir, 'test_spiders_xxx') shutil.copytree(orig_spiders_dir, self.spiders_dir) sys.path.append(self.tmpdir) self.spiderman = SpiderManager(['test_spiders_xxx']) def tearDown(self): del self.spiderman sys.path.remove(self.tmpdir) def test_interface(self): verifyObject(ISpiderManager, self.spiderman) def test_list(self): self.assertEqual(set(self.spiderman.list()), set(['spider1', 'spider2', 'spider3'])) def test_create(self): spider1 = self.spiderman.create("spider1") self.assertEqual(spider1.__class__.__name__, 'Spider1') spider2 = self.spiderman.create("spider2", foo="bar") self.assertEqual(spider2.__class__.__name__, 'Spider2') self.assertEqual(spider2.foo, 'bar') def test_find_by_request(self): self.assertEqual(self.spiderman.find_by_request(Request('http://scrapy1.org/test')), ['spider1']) self.assertEqual(self.spiderman.find_by_request(Request('http://scrapy2.org/test')), ['spider2']) self.assertEqual(set(self.spiderman.find_by_request(Request('http://scrapy3.org/test'))), set(['spider1', 'spider2'])) self.assertEqual(self.spiderman.find_by_request(Request('http://scrapy999.org/test')), []) self.assertEqual(self.spiderman.find_by_request(Request('http://spider3.com')), []) self.assertEqual(self.spiderman.find_by_request(Request('http://spider3.com/onlythis')), ['spider3']) def test_load_spider_module(self): self.spiderman = SpiderManager(['scrapy.tests.test_spidermanager.test_spiders.spider1']) assert len(self.spiderman._spiders) == 1 def test_load_base_spider(self): self.spiderman = SpiderManager(['scrapy.tests.test_spidermanager.test_spiders.spider0']) assert len(self.spiderman._spiders) == 0 Scrapy-0.14.4/scrapy/tests/test_utils_httpobj.py0000600000016101777760000000124411754531743022112 0ustar buildbotnogroupimport unittest import urlparse from scrapy.http import Request from scrapy.utils.httpobj import urlparse_cached class HttpobjUtilsTest(unittest.TestCase): def test_urlparse_cached(self): url = "http://www.example.com/index.html" request1 = Request(url) request2 = Request(url) req1a = urlparse_cached(request1) req1b = urlparse_cached(request1) req2 = urlparse_cached(request2) urlp = urlparse.urlparse(url) assert req1a == req2 assert req1a == urlp assert req1a is req1b assert req1a is not req2 assert req1a is not req2 if __name__ == "__main__": unittest.main() Scrapy-0.14.4/scrapy/signals.py0000600000016101777760000000111111754531743016450 0ustar buildbotnogroup""" Scrapy signals These signals are documented in docs/topics/signals.rst. Please don't add new signals here without documenting them there. """ engine_started = object() engine_stopped = object() spider_opened = object() spider_idle = object() spider_closed = object() spider_error = object() request_received = object() response_received = object() response_downloaded = object() item_scraped = object() item_dropped = object() stats_spider_opened = object() stats_spider_closing = object() stats_spider_closed = object() item_passed = item_scraped # for backwards compatibility Scrapy-0.14.4/scrapy/mail.py0000600000016101777760000000715611754531743015751 0ustar buildbotnogroup""" Mail sending helpers See documentation in docs/topics/email.rst """ from cStringIO import StringIO from email.MIMEMultipart import MIMEMultipart from email.MIMENonMultipart import MIMENonMultipart from email.MIMEBase import MIMEBase from email.MIMEText import MIMEText from email.Utils import COMMASPACE, formatdate from email import Encoders from twisted.internet import defer, reactor from twisted.mail.smtp import ESMTPSenderFactory from scrapy import log from scrapy.exceptions import NotConfigured from scrapy.conf import settings from scrapy.utils.signal import send_catch_log # signal sent when message is sent # args: to, subject, body, cc, attach, msg mail_sent = object() class MailSender(object): def __init__(self, smtphost=None, mailfrom=None, smtpuser=None, smtppass=None, \ smtpport=None, debug=False): self.smtphost = smtphost or settings['MAIL_HOST'] self.smtpport = smtpport or settings.getint('MAIL_PORT') self.smtpuser = smtpuser or settings['MAIL_USER'] self.smtppass = smtppass or settings['MAIL_PASS'] self.mailfrom = mailfrom or settings['MAIL_FROM'] self.debug = debug if not self.smtphost or not self.mailfrom: raise NotConfigured("MAIL_HOST and MAIL_FROM settings are required") def send(self, to, subject, body, cc=None, attachs=()): if attachs: msg = MIMEMultipart() else: msg = MIMENonMultipart('text', 'plain') msg['From'] = self.mailfrom msg['To'] = COMMASPACE.join(to) msg['Date'] = formatdate(localtime=True) msg['Subject'] = subject rcpts = to[:] if cc: rcpts.extend(cc) msg['Cc'] = COMMASPACE.join(cc) if attachs: msg.attach(MIMEText(body)) for attach_name, mimetype, f in attachs: part = MIMEBase(*mimetype.split('/')) part.set_payload(f.read()) Encoders.encode_base64(part) part.add_header('Content-Disposition', 'attachment; filename="%s"' \ % attach_name) msg.attach(part) else: msg.set_payload(body) send_catch_log(signal=mail_sent, to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg) if self.debug: log.msg('Debug mail sent OK: To=%s Cc=%s Subject="%s" Attachs=%d' % \ (to, cc, subject, len(attachs)), level=log.DEBUG) return dfd = self._sendmail(rcpts, msg.as_string()) dfd.addCallbacks(self._sent_ok, self._sent_failed, callbackArgs=[to, cc, subject, len(attachs)], errbackArgs=[to, cc, subject, len(attachs)]) reactor.addSystemEventTrigger('before', 'shutdown', lambda: dfd) return dfd def _sent_ok(self, result, to, cc, subject, nattachs): log.msg('Mail sent OK: To=%s Cc=%s Subject="%s" Attachs=%d' % \ (to, cc, subject, nattachs)) def _sent_failed(self, failure, to, cc, subject, nattachs): errstr = str(failure.value) log.msg('Unable to send mail: To=%s Cc=%s Subject="%s" Attachs=%d - %s' % \ (to, cc, subject, nattachs, errstr), level=log.ERROR) def _sendmail(self, to_addrs, msg): msg = StringIO(msg) d = defer.Deferred() factory = ESMTPSenderFactory(self.smtpuser, self.smtppass, self.mailfrom, \ to_addrs, msg, d, heloFallback=True, requireAuthentication=False, \ requireTransportSecurity=False) factory.noisy = False reactor.connectTCP(self.smtphost, self.smtpport, factory) return d Scrapy-0.14.4/scrapy/logformatter.py0000600000016101777760000000153311754531743017525 0ustar buildbotnogroupimport os from twisted.python.failure import Failure class LogFormatter(object): """Class for generating log messages for different actions. All methods must return a plain string which doesn't include the log level or the timestamp """ def crawled(self, request, response, spider): referer = request.headers.get('Referer') flags = ' %s' % str(response.flags) if response.flags else '' return u"Crawled (%d) %s (referer: %s)%s" % (response.status, \ request, referer, flags) def scraped(self, item, response, spider): src = response.getErrorMessage() if isinstance(response, Failure) else response return u"Scraped from %s%s%s" % (src, os.linesep, item) def dropped(self, item, exception, response, spider): return u"Dropped: %s%s%s" % (exception, os.linesep, item) Scrapy-0.14.4/scrapy/squeue.py0000600000016101777760000000212611754531743016326 0ustar buildbotnogroup""" Scheduler queues """ import marshal, cPickle as pickle from scrapy.utils import queue def _serializable_queue(queue_class, serialize, deserialize): class SerializableQueue(queue_class): def push(self, obj): s = serialize(obj) super(SerializableQueue, self).push(s) def pop(self): s = super(SerializableQueue, self).pop() if s: return deserialize(s) return SerializableQueue def _pickle_serialize(obj): try: return pickle.dumps(obj, protocol=2) except pickle.PicklingError, e: raise ValueError(str(e)) PickleFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \ _pickle_serialize, pickle.loads) PickleLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \ _pickle_serialize, pickle.loads) MarshalFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \ marshal.dumps, marshal.loads) MarshalLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \ marshal.dumps, marshal.loads) FifoMemoryQueue = queue.FifoMemoryQueue LifoMemoryQueue = queue.LifoMemoryQueue Scrapy-0.14.4/scrapy/shell.py0000600000016101777760000001121311754531743016123 0ustar buildbotnogroup""" Scrapy Shell See documentation in docs/topics/shell.rst """ import signal from twisted.internet import reactor, threads from w3lib.url import any_to_uri from scrapy.item import BaseItem from scrapy.spider import BaseSpider from scrapy.selector import XPathSelector, XmlXPathSelector, HtmlXPathSelector from scrapy.utils.spider import create_spider_for_request from scrapy.utils.misc import load_object from scrapy.utils.request import request_deferred from scrapy.utils.response import open_in_browser from scrapy.utils.console import start_python_console from scrapy.settings import Settings from scrapy.http import Request, Response, HtmlResponse, XmlResponse class Shell(object): relevant_classes = (BaseSpider, Request, Response, BaseItem, \ XPathSelector, Settings) def __init__(self, crawler, update_vars=None, inthread=False, code=None): self.crawler = crawler self.update_vars = update_vars or (lambda x: None) self.item_class = load_object(crawler.settings['DEFAULT_ITEM_CLASS']) self.spider = None self.inthread = inthread self.code = code self.vars = {} def start(self, *a, **kw): # disable accidental Ctrl-C key press from shutting down the engine signal.signal(signal.SIGINT, signal.SIG_IGN) if self.inthread: return threads.deferToThread(self._start, *a, **kw) else: self._start(*a, **kw) def _start(self, url=None, request=None, response=None, spider=None): if url: self.fetch(url, spider) elif request: self.fetch(request, spider) elif response: request = response.request self.populate_vars(response, request, spider) else: self.populate_vars() if self.code: print eval(self.code, globals(), self.vars) else: start_python_console(self.vars) def _schedule(self, request, spider): spider = self._open_spider(request, spider) d = request_deferred(request) d.addCallback(lambda x: (x, spider)) self.crawler.engine.crawl(request, spider) return d def _open_spider(self, request, spider): if self.spider: return self.spider if spider is None: spider = create_spider_for_request(self.crawler.spiders, request, \ BaseSpider('default'), log_multiple=True) spider.set_crawler(self.crawler) self.crawler.engine.open_spider(spider, close_if_idle=False) self.spider = spider return spider def fetch(self, request_or_url, spider=None): if isinstance(request_or_url, Request): request = request_or_url url = request.url else: url = any_to_uri(request_or_url) request = Request(url, dont_filter=True) request.meta['handle_httpstatus_all'] = True response = None response, spider = threads.blockingCallFromThread(reactor, \ self._schedule, request, spider) self.populate_vars(response, request, spider) def populate_vars(self, response=None, request=None, spider=None): self.vars['item'] = self.item_class() self.vars['settings'] = self.crawler.settings self.vars['spider'] = spider self.vars['request'] = request self.vars['response'] = response self.vars['xxs'] = XmlXPathSelector(response) \ if isinstance(response, XmlResponse) else None self.vars['hxs'] = HtmlXPathSelector(response) \ if isinstance(response, HtmlResponse) else None if self.inthread: self.vars['fetch'] = self.fetch self.vars['view'] = open_in_browser self.vars['shelp'] = self.print_help self.update_vars(self.vars) if not self.code: self.print_help() def print_help(self): self.p("Available Scrapy objects:") for k, v in sorted(self.vars.iteritems()): if self._is_relevant(v): self.p(" %-10s %s" % (k, v)) self.p("Useful shortcuts:") self.p(" shelp() Shell help (print this help)") if self.inthread: self.p(" fetch(req_or_url) Fetch request (or URL) and update local objects") self.p(" view(response) View response in a browser") def p(self, line=''): print "[s] %s" % line def _is_relevant(self, value): return isinstance(value, self.relevant_classes) def inspect_response(response, spider=None): """Open a shell to inspect the given response""" from scrapy.project import crawler Shell(crawler).start(response=response, spider=spider) Scrapy-0.14.4/scrapy/utils/0000700000016101777760000000000011754532100015566 5ustar buildbotnogroupScrapy-0.14.4/scrapy/utils/http.py0000600000016101777760000000112711754531743017136 0ustar buildbotnogroup""" Transitional module for moving to the w3lib library. For new code, always import from w3lib.http instead of this module """ from w3lib.http import * def decode_chunked_transfer(chunked_body): """Parsed body received with chunked transfer encoding, and return the decoded body. For more info see: http://en.wikipedia.org/wiki/Chunked_transfer_encoding """ body, h, t = '', '', chunked_body while t: h, t = t.split('\r\n', 1) if h == '0': break size = int(h, 16) body += t[:size] t = t[size+2:] return body Scrapy-0.14.4/scrapy/utils/test.py0000600000016101777760000000352411754531743017141 0ustar buildbotnogroup""" This module contains some assorted functions used in tests """ import os from twisted.trial.unittest import SkipTest from scrapy.crawler import Crawler from scrapy.settings import CrawlerSettings def libxml2debug(testfunction): """Decorator for debugging libxml2 memory leaks inside a function. We've found libxml2 memory leaks are something very weird, and can happen sometimes depending on the order where tests are run. So this decorator enables libxml2 memory leaks debugging only when the environment variable LIBXML2_DEBUGLEAKS is set. """ try: import libxml2 except ImportError: return testfunction def newfunc(*args, **kwargs): libxml2.debugMemory(1) testfunction(*args, **kwargs) libxml2.cleanupParser() leaked_bytes = libxml2.debugMemory(0) assert leaked_bytes == 0, "libxml2 memory leak detected: %d bytes" % leaked_bytes if 'LIBXML2_DEBUGLEAKS' in os.environ: return newfunc else: return testfunction def assert_aws_environ(): """Asserts the current environment is suitable for running AWS testsi. Raises SkipTest with the reason if it's not. """ try: import boto except ImportError, e: raise SkipTest(str(e)) if 'AWS_ACCESS_KEY_ID' not in os.environ: raise SkipTest("AWS keys not found") def get_crawler(settings_dict=None): """Return an unconfigured Crawler object. If settings_dict is given, it will be used as the settings present in the settings module of the CrawlerSettings. """ class SettingsModuleMock(object): pass settings_module = SettingsModuleMock() if settings_dict: for k, v in settings_dict.items(): setattr(settings_module, k, v) settings = CrawlerSettings(settings_module) return Crawler(settings) Scrapy-0.14.4/scrapy/utils/httpobj.py0000600000016101777760000000076611754531743017641 0ustar buildbotnogroup"""Helper functions for scrapy.http objects (Request, Response)""" import weakref from urlparse import urlparse _urlparse_cache = weakref.WeakKeyDictionary() def urlparse_cached(request_or_response): """Return urlparse.urlparse caching the result, where the argument can be a Request or Response object """ if request_or_response not in _urlparse_cache: _urlparse_cache[request_or_response] = urlparse(request_or_response.url) return _urlparse_cache[request_or_response] Scrapy-0.14.4/scrapy/utils/project.py0000600000016101777760000000270211754531743017625 0ustar buildbotnogroupfrom os.path import join, dirname, abspath, isabs, exists from os import makedirs, environ import warnings from scrapy.utils.conf import closest_scrapy_cfg, get_config from scrapy.utils.python import is_writable from scrapy.exceptions import NotConfigured DATADIR_CFG_SECTION = 'datadir' def inside_project(): scrapy_module = environ.get('SCRAPY_SETTINGS_MODULE') if scrapy_module is not None: try: __import__(scrapy_module) except ImportError: warnings.warn("Cannot import scrapy settings module %s" % scrapy_module) else: return True return bool(closest_scrapy_cfg()) def project_data_dir(project='default'): """Return the current project data dir, creating it if it doesn't exist""" if not inside_project(): raise NotConfigured("Not inside a project") cfg = get_config() if cfg.has_option(DATADIR_CFG_SECTION, project): d = cfg.get(DATADIR_CFG_SECTION, project) else: scrapy_cfg = closest_scrapy_cfg() if not scrapy_cfg: raise NotConfigured("Unable to find scrapy.cfg file to infer project data dir") d = abspath(join(dirname(scrapy_cfg), '.scrapy')) if not exists(d): makedirs(d) return d def data_path(path): """If path is relative, return the given path inside the project data dir, otherwise return the path unmodified """ return path if isabs(path) else join(project_data_dir(), path) Scrapy-0.14.4/scrapy/utils/job.py0000600000016101777760000000022411754531743016726 0ustar buildbotnogroupimport os def job_dir(settings): path = settings['JOBDIR'] if path and not os.path.exists(path): os.makedirs(path) return path Scrapy-0.14.4/scrapy/utils/pqueue.py0000600000016101777760000000343511754531743017467 0ustar buildbotnogroupclass PriorityQueue(object): """A priority queue implemented using multiple internal queues (typically, FIFO queues). The internal queue must implement the following methods: * push(obj) * pop() * close() * __len__() The constructor receives a qfactory argument, which is a callable used to instantiate a new (internal) queue when a new priority is allocated. The qfactory function is called with the priority number as first and only argument. Only integer priorities should be used. Lower numbers are higher priorities. """ def __init__(self, qfactory, startprios=()): self.queues = {} self.qfactory = qfactory for p in startprios: self.queues[p] = self.qfactory(p) self.curprio = min(startprios) if startprios else None def push(self, obj, priority=0): if priority not in self.queues: self.queues[priority] = self.qfactory(priority) q = self.queues[priority] q.push(obj) # this may fail (eg. serialization error) if priority < self.curprio or self.curprio is None: self.curprio = priority def pop(self): if self.curprio is None: return q = self.queues[self.curprio] m = q.pop() if len(q) == 0: del self.queues[self.curprio] q.close() prios = [p for p, q in self.queues.items() if len(q) > 0] self.curprio = min(prios) if prios else None return m def close(self): active = [] for p, q in self.queues.items(): if len(q): active.append(p) q.close() return active def __len__(self): return sum(len(x) for x in self.queues.values()) if self.queues else 0 Scrapy-0.14.4/scrapy/utils/memory.py0000600000016101777760000000416611754531743017475 0ustar buildbotnogroupfrom __future__ import with_statement import os import sys import struct _vmvalue_scale = {'kB': 1024, 'mB': 1024*1024, 'KB': 1024, 'MB': 1024*1024} def get_vmvalue_from_procfs(vmkey='VmSize', pid=None): """Return virtual memory value (in bytes) for the given pid using the /proc filesystem. If pid is not given, it default to the current process pid. Available keys are: VmSize, VmRSS (default), VmStk """ if pid is None: pid = os.getpid() try: t = open('/proc/%d/status' % pid) except IOError: raise RuntimeError("/proc filesystem not supported") if sys.platform == "sunos5": return _vmvalue_solaris(vmkey, pid) else: v = t.read() t.close() # get vmkey line e.g. 'VmRSS: 9999 kB\n ...' i = v.index(vmkey + ':') v = v[i:].split(None, 3) # whitespace if len(v) < 3: return 0 # invalid format? # convert Vm value to bytes return int(v[1]) * _vmvalue_scale[v[2]] def procfs_supported(): try: open('/proc/%d/status' % os.getpid()) except IOError: return False else: return True def _vmvalue_solaris(vmkey, pid): # Memory layout for struct psinfo. # http://docs.sun.com/app/docs/doc/816-5174/proc-4?l=en&a=view _psinfo_struct_format = ( "10i" # pr_flag [0] through pr_egid [9] "5L" # pr_addr [10] through pr_ttyydev [14] "2H" # pr_pctcpu [15] and pr_pctmem [16] "6l" # pr_start [17-18] through pr_ctime [21-22] "16s" # pr_fname [23] "80s" # pr_psargs [24] "2i" # pr_wstat[25] and pr_argc [26] "2L" # pr_argv [27] and pr_envp [28] "b3x" # pr_dmodel[29] and pr_pad2 "7i" # pr_taskid [30] through pr_filler "20i6l" # pr_lwp ) psinfo_file = os.path.join("/proc", str(pid), "psinfo") with open(psinfo_file) as f: parts = struct.unpack(_psinfo_struct_format, f.read()) vmkey_index = { 'VmSize' : 11, # pr_size 'VmRSS' : 12, # pr_rssize } vm_in_kB = parts[vmkey_index[vmkey]] return vm_in_kB * 1024 Scrapy-0.14.4/scrapy/utils/reqser.py0000600000016101777760000000404111754531743017456 0ustar buildbotnogroup""" Helper functions for serializing (and deserializing) requests. """ from scrapy.http import Request def request_to_dict(request, spider=None): """Convert Request object to a dict. If a spider is given, it will try to find out the name of the spider method used in the callback and store that as the callback. """ cb = request.callback if callable(cb): cb = _find_method(spider, cb) eb = request.errback if callable(eb): eb = _find_method(spider, eb) d = { 'url': request.url.decode('ascii'), # urls should be safe (safe_string_url) 'callback': cb, 'errback': eb, 'method': request.method, 'headers': dict(request.headers), 'body': request.body, 'cookies': request.cookies, 'meta': request.meta, '_encoding': request._encoding, 'priority': request.priority, 'dont_filter': request.dont_filter, } return d def request_from_dict(d, spider=None): """Create Request object from a dict. If a spider is given, it will try to resolve the callbacks looking at the spider for methods with the same name. """ cb = d['callback'] if cb and spider: cb = _get_method(spider, cb) eb = d['errback'] if eb and spider: eb = _get_method(spider, eb) return Request( url=d['url'].encode('ascii'), callback=cb, errback=eb, method=d['method'], headers=d['headers'], body=d['body'], cookies=d['cookies'], meta=d['meta'], encoding=d['_encoding'], priority=d['priority'], dont_filter=d['dont_filter']) def _find_method(obj, func): if obj and hasattr(func, 'im_self') and func.im_self is obj: return func.im_func.__name__ else: raise ValueError("Function %s is not a method of: %s" % (func, obj)) def _get_method(obj, name): name = str(name) try: return getattr(obj, name) except AttributeError: raise ValueError("Method %r not found in: %s" % (name, obj)) Scrapy-0.14.4/scrapy/utils/datatypes.py0000600000016101777760000002034411754531743020157 0ustar buildbotnogroup""" This module contains data types used by Scrapy which are not included in the Python Standard Library. This module must not depend on any module outside the Standard Library. """ import copy from collections import deque, defaultdict from itertools import chain from scrapy.utils.py27 import OrderedDict class MultiValueDictKeyError(KeyError): pass class MultiValueDict(dict): """ A subclass of dictionary customized to handle multiple values for the same key. >>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']}) >>> d['name'] 'Simon' >>> d.getlist('name') ['Adrian', 'Simon'] >>> d.get('lastname', 'nonexistent') 'nonexistent' >>> d.setlist('lastname', ['Holovaty', 'Willison']) This class exists to solve the irritating problem raised by cgi.parse_qs, which returns a list for every key, even though most Web forms submit single name-value pairs. """ def __init__(self, key_to_list_mapping=()): dict.__init__(self, key_to_list_mapping) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, dict.__repr__(self)) def __getitem__(self, key): """ Returns the last data value for this key, or [] if it's an empty list; raises KeyError if not found. """ try: list_ = dict.__getitem__(self, key) except KeyError: raise MultiValueDictKeyError, "Key %r not found in %r" % (key, self) try: return list_[-1] except IndexError: return [] def __setitem__(self, key, value): dict.__setitem__(self, key, [value]) def __copy__(self): return self.__class__(dict.items(self)) def __deepcopy__(self, memo=None): if memo is None: memo = {} result = self.__class__() memo[id(self)] = result for key, value in dict.items(self): dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def get(self, key, default=None): "Returns the default value if the requested data doesn't exist" try: val = self[key] except KeyError: return default if val == []: return default return val def getlist(self, key): "Returns an empty list if the requested data doesn't exist" try: return dict.__getitem__(self, key) except KeyError: return [] def setlist(self, key, list_): dict.__setitem__(self, key, list_) def setdefault(self, key, default=None): if key not in self: self[key] = default return self[key] def setlistdefault(self, key, default_list=()): if key not in self: self.setlist(key, default_list) return self.getlist(key) def appendlist(self, key, value): "Appends an item to the internal list associated with key" self.setlistdefault(key, []) dict.__setitem__(self, key, self.getlist(key) + [value]) def items(self): """ Returns a list of (key, value) pairs, where value is the last item in the list associated with the key. """ return [(key, self[key]) for key in self.keys()] def lists(self): "Returns a list of (key, list) pairs." return dict.items(self) def values(self): "Returns a list of the last value on every key list." return [self[key] for key in self.keys()] def copy(self): "Returns a copy of this object." return self.__deepcopy__() def update(self, *args, **kwargs): "update() extends rather than replaces existing key lists. Also accepts keyword args." if len(args) > 1: raise TypeError, "update expected at most 1 arguments, got %d" % len(args) if args: other_dict = args[0] if isinstance(other_dict, MultiValueDict): for key, value_list in other_dict.lists(): self.setlistdefault(key, []).extend(value_list) else: try: for key, value in other_dict.items(): self.setlistdefault(key, []).append(value) except TypeError: raise ValueError, "MultiValueDict.update() takes either a MultiValueDict or dictionary" for key, value in kwargs.iteritems(): self.setlistdefault(key, []).append(value) class SiteNode(object): """Class to represent a site node (page, image or any other file)""" def __init__(self, url): self.url = url self.itemnames = [] self.children = [] self.parent = None def add_child(self, node): self.children.append(node) node.parent = self def to_string(self, level=0): s = "%s%s\n" % (' '*level, self.url) if self.itemnames: for n in self.itemnames: s += "%sScraped: %s\n" % (' '*(level+1), n) for node in self.children: s += node.to_string(level+1) return s class CaselessDict(dict): __slots__ = () def __init__(self, seq=None): super(CaselessDict, self).__init__() if seq: self.update(seq) def __getitem__(self, key): return dict.__getitem__(self, self.normkey(key)) def __setitem__(self, key, value): dict.__setitem__(self, self.normkey(key), self.normvalue(value)) def __delitem__(self, key): dict.__delitem__(self, self.normkey(key)) def __contains__(self, key): return dict.__contains__(self, self.normkey(key)) has_key = __contains__ def __copy__(self): return self.__class__(self) copy = __copy__ def normkey(self, key): """Method to normalize dictionary key access""" return key.lower() def normvalue(self, value): """Method to normalize values prior to be setted""" return value def get(self, key, def_val=None): return dict.get(self, self.normkey(key), self.normvalue(def_val)) def setdefault(self, key, def_val=None): return dict.setdefault(self, self.normkey(key), self.normvalue(def_val)) def update(self, seq): seq = seq.iteritems() if isinstance(seq, dict) else seq iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq) super(CaselessDict, self).update(iseq) @classmethod def fromkeys(cls, keys, value=None): return cls((k, value) for k in keys) def pop(self, key, *args): return dict.pop(self, self.normkey(key), *args) class MergeDict(object): """ A simple class for creating new "virtual" dictionaries that actually look up values in more than one dictionary, passed in the constructor. If a key appears in more than one of the given dictionaries, only the first occurrence will be used. """ def __init__(self, *dicts): self.dicts = dicts def __getitem__(self, key): for dict_ in self.dicts: try: return dict_[key] except KeyError: pass raise KeyError def __copy__(self): return self.__class__(*self.dicts) def get(self, key, default=None): try: return self[key] except KeyError: return default def getlist(self, key): for dict_ in self.dicts: if key in dict_.keys(): return dict_.getlist(key) return [] def items(self): item_list = [] for dict_ in self.dicts: item_list.extend(dict_.items()) return item_list def has_key(self, key): for dict_ in self.dicts: if key in dict_: return True return False __contains__ = has_key def copy(self): """Returns a copy of this object.""" return self.__copy__() class LocalCache(OrderedDict): """Dictionary with a finite number of keys. Older items expires first. """ def __init__(self, limit=None): super(LocalCache, self).__init__() self.limit = limit def __setitem__(self, key, value): while len(self) >= self.limit: self.popitem(last=False) super(LocalCache, self).__setitem__(key, value) Scrapy-0.14.4/scrapy/utils/signal.py0000600000016101777760000000506411754531743017440 0ustar buildbotnogroup"""Helper functinos for working with signals""" from twisted.internet.defer import maybeDeferred, DeferredList, Deferred from twisted.python.failure import Failure from scrapy.xlib.pydispatch.dispatcher import Any, Anonymous, liveReceivers, \ getAllReceivers, disconnect from scrapy.xlib.pydispatch.robustapply import robustApply from scrapy import log def send_catch_log(signal=Any, sender=Anonymous, *arguments, **named): """Like pydispatcher.robust.sendRobust but it also logs errors and returns Failures instead of exceptions. """ dont_log = named.pop('dont_log', None) spider = named.get('spider', None) responses = [] for receiver in liveReceivers(getAllReceivers(sender, signal)): try: response = robustApply(receiver, signal=signal, sender=sender, *arguments, **named) if isinstance(response, Deferred): log.msg("Cannot return deferreds from signal handler: %s" % \ receiver, log.ERROR, spider=spider) except dont_log: result = Failure() except Exception: result = Failure() log.err(result, "Error caught on signal handler: %s" % receiver, \ spider=spider) else: result = response responses.append((receiver, result)) return responses def send_catch_log_deferred(signal=Any, sender=Anonymous, *arguments, **named): """Like send_catch_log but supports returning deferreds on signal handlers. Returns a deferred that gets fired once all signal handlers deferreds were fired. """ def logerror(failure, recv): if dont_log is None or not isinstance(failure.value, dont_log): log.err(failure, "Error caught on signal handler: %s" % recv, \ spider=spider) return failure dont_log = named.pop('dont_log', None) spider = named.get('spider', None) dfds = [] for receiver in liveReceivers(getAllReceivers(sender, signal)): d = maybeDeferred(robustApply, receiver, signal=signal, sender=sender, *arguments, **named) d.addErrback(logerror, receiver) d.addBoth(lambda result: (receiver, result)) dfds.append(d) d = DeferredList(dfds) d.addCallback(lambda out: [x[1] for x in out]) return d def disconnect_all(signal=Any, sender=Any): """Disconnect all signal handlers. Useful for cleaning up after running tests """ for receiver in liveReceivers(getAllReceivers(sender, signal)): disconnect(receiver, signal=signal, sender=sender) Scrapy-0.14.4/scrapy/utils/python.py0000600000016101777760000001667511754531743017516 0ustar buildbotnogroup""" This module contains essential stuff that should've come with Python itself ;) It also contains functions (or functionality) which is in Python versions higher than 2.5 which is the lowest version supported by Scrapy. """ import os import re import inspect import weakref from functools import wraps from sgmllib import SGMLParser class FixedSGMLParser(SGMLParser): """The SGMLParser that comes with Python has a bug in the convert_charref() method. This is the same class with the bug fixed""" def convert_charref(self, name): """This method fixes a bug in Python's SGMLParser.""" try: n = int(name) except ValueError: return if not 0 <= n <= 127 : # ASCII ends at 127, not 255 return return self.convert_codepoint(n) def flatten(x): """flatten(sequence) -> list Returns a single, flat list which contains all elements retrieved from the sequence and all recursively contained sub-sequences (iterables). Examples: >>> [1, 2, [3,4], (5,6)] [1, 2, [3, 4], (5, 6)] >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)]) [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]""" result = [] for el in x: if hasattr(el, "__iter__"): result.extend(flatten(el)) else: result.append(el) return result def unique(list_, key=lambda x: x): """efficient function to uniquify a list preserving item order""" seen = {} result = [] for item in list_: seenkey = key(item) if seenkey in seen: continue seen[seenkey] = 1 result.append(item) return result def str_to_unicode(text, encoding=None, errors='strict'): """Return the unicode representation of text in the given encoding. Unlike .encode(encoding) this function can be applied directly to a unicode object without the risk of double-decoding problems (which can happen if you don't use the default 'ascii' encoding) """ if encoding is None: encoding = 'utf-8' if isinstance(text, str): return text.decode(encoding, errors) elif isinstance(text, unicode): return text else: raise TypeError('str_to_unicode must receive a str or unicode object, got %s' % type(text).__name__) def unicode_to_str(text, encoding=None, errors='strict'): """Return the str representation of text in the given encoding. Unlike .encode(encoding) this function can be applied directly to a str object without the risk of double-decoding problems (which can happen if you don't use the default 'ascii' encoding) """ if encoding is None: encoding = 'utf-8' if isinstance(text, unicode): return text.encode(encoding, errors) elif isinstance(text, str): return text else: raise TypeError('unicode_to_str must receive a unicode or str object, got %s' % type(text).__name__) def re_rsearch(pattern, text, chunk_size=1024): """ This function does a reverse search in a text using a regular expression given in the attribute 'pattern'. Since the re module does not provide this functionality, we have to find for the expression into chunks of text extracted from the end (for the sake of efficiency). At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for the pattern. If the pattern is not found, another chunk is extracted, and another search is performed. This process continues until a match is found, or until the whole file is read. In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing the start position of the match, and the ending (regarding the entire text). """ def _chunk_iter(): offset = len(text) while True: offset -= (chunk_size * 1024) if offset <= 0: break yield (text[offset:], offset) yield (text, 0) pattern = re.compile(pattern) if isinstance(pattern, basestring) else pattern for chunk, offset in _chunk_iter(): matches = [match for match in pattern.finditer(chunk)] if matches: return (offset + matches[-1].span()[0], offset + matches[-1].span()[1]) return None def memoizemethod_noargs(method): """Decorator to cache the result of a method (without arguments) using a weak reference to its object """ cache = weakref.WeakKeyDictionary() @wraps(method) def new_method(self, *args, **kwargs): if self not in cache: cache[self] = method(self, *args, **kwargs) return cache[self] return new_method _BINARYCHARS = set(map(chr, range(32))) - set(["\0", "\t", "\n", "\r"]) def isbinarytext(text): """Return True if the given text is considered binary, or false otherwise, by looking for binary bytes at their chars """ assert isinstance(text, str), "text must be str, got '%s'" % type(text).__name__ return any(c in _BINARYCHARS for c in text) def get_func_args(func): """Return the argument name list of a callable""" if inspect.isfunction(func): func_args, _, _, _ = inspect.getargspec(func) elif hasattr(func, '__call__'): try: func_args, _, _, _ = inspect.getargspec(func.__call__) except Exception: func_args = [] else: raise TypeError('%s is not callable' % type(func)) return func_args def equal_attributes(obj1, obj2, attributes): """Compare two objects attributes""" # not attributes given return False by default if not attributes: return False for attr in attributes: # support callables like itemgetter if callable(attr): if not attr(obj1) == attr(obj2): return False else: # check that objects has attribute if not hasattr(obj1, attr): return False if not hasattr(obj2, attr): return False # compare object attributes if not getattr(obj1, attr) == getattr(obj2, attr): return False # all attributes equal return True class WeakKeyCache(object): def __init__(self, default_factory): self.default_factory = default_factory self._weakdict = weakref.WeakKeyDictionary() def __getitem__(self, key): if key not in self._weakdict: self._weakdict[key] = self.default_factory(key) return self._weakdict[key] def stringify_dict(dct_or_tuples, encoding='utf-8', keys_only=True): """Return a (new) dict with the unicode keys (and values if, keys_only is False) of the given dict converted to strings. `dct_or_tuples` can be a dict or a list of tuples, like any dict constructor supports. """ d = {} for k, v in dict(dct_or_tuples).iteritems(): k = k.encode(encoding) if isinstance(k, unicode) else k if not keys_only: v = v.encode(encoding) if isinstance(v, unicode) else v d[k] = v return d def is_writable(path): """Return True if the given path can be written (if it exists) or created (if it doesn't exist) """ if os.path.exists(path): return os.access(path, os.W_OK) else: return os.access(os.path.dirname(path), os.W_OK) def setattr_default(obj, name, value): """Set attribute value, but only if it's not already set. Similar to setdefault() for dicts. """ if not hasattr(obj, name): setattr(obj, name, value) Scrapy-0.14.4/scrapy/utils/encoding.py0000600000016101777760000000120611754531743017743 0ustar buildbotnogroupimport codecs from scrapy.conf import settings _ENCODING_ALIASES = dict(settings['ENCODING_ALIASES_BASE']) _ENCODING_ALIASES.update(settings['ENCODING_ALIASES']) def encoding_exists(encoding, _aliases=_ENCODING_ALIASES): """Returns ``True`` if encoding is valid, otherwise returns ``False``""" try: codecs.lookup(resolve_encoding(encoding, _aliases)) except LookupError: return False return True def resolve_encoding(alias, _aliases=_ENCODING_ALIASES): """Return the encoding the given alias maps to, or the alias as passed if no mapping is found. """ return _aliases.get(alias.lower(), alias) Scrapy-0.14.4/scrapy/utils/ossignal.py0000600000016101777760000000167611754531743020007 0ustar buildbotnogroup from __future__ import absolute_import from twisted.internet import reactor import signal signal_names = {} for signame in dir(signal): if signame.startswith("SIG"): signum = getattr(signal, signame) if isinstance(signum, int): signal_names[signum] = signame def install_shutdown_handlers(function, override_sigint=True): """Install the given function as a signal handler for all common shutdown signals (such as SIGINT, SIGTERM, etc). If override_sigint is ``False`` the SIGINT handler won't be install if there is already a handler in place (e.g. Pdb) """ reactor._handleSignals() signal.signal(signal.SIGTERM, function) if signal.getsignal(signal.SIGINT) == signal.default_int_handler or \ override_sigint: signal.signal(signal.SIGINT, function) # Catch Ctrl-Break in windows if hasattr(signal, "SIGBREAK"): signal.signal(signal.SIGBREAK, function) Scrapy-0.14.4/scrapy/utils/testproc.py0000600000016101777760000000263111754531743020023 0ustar buildbotnogroupimport sys import os from twisted.internet import reactor, defer, protocol class ProcessTest(object): command = None prefix = [sys.executable, '-m', 'scrapy.cmdline'] cwd = os.getcwd() # trial chdirs to temp dir def execute(self, args, check_code=True, settings='missing'): env = os.environ.copy() env['SCRAPY_SETTINGS_MODULE'] = settings cmd = self.prefix + [self.command] + list(args) pp = TestProcessProtocol() pp.deferred.addBoth(self._process_finished, cmd, check_code) reactor.spawnProcess(pp, cmd[0], cmd, env=env, path=self.cwd) return pp.deferred def _process_finished(self, pp, cmd, check_code): if pp.exitcode and check_code: msg = "process %s exit with code %d" % (cmd, pp.exitcode) msg += "\n>>> stdout <<<\n%s" % pp.out msg += "\n" msg += "\n>>> stderr <<<\n%s" % pp.err raise RuntimeError(msg) return pp.exitcode, pp.out, pp.err class TestProcessProtocol(protocol.ProcessProtocol): def __init__(self): self.deferred = defer.Deferred() self.out = '' self.err = '' self.exitcode = None def outReceived(self, data): self.out += data def errReceived(self, data): self.err += data def processEnded(self, status): self.exitcode = status.value.exitCode self.deferred.callback(self) Scrapy-0.14.4/scrapy/utils/url.py0000600000016101777760000000524311754531743016764 0ustar buildbotnogroup""" This module contains general purpose URL functions not found in the standard library. Some of the functions that used to be imported from this module have been moved to the w3lib.url module. Always import those from there instead. """ import urlparse import urllib import cgi from w3lib.url import * from scrapy.utils.python import unicode_to_str def url_is_from_any_domain(url, domains): """Return True if the url belongs to any of the given domains""" host = parse_url(url).hostname if host: return any(((host == d) or (host.endswith('.%s' % d)) for d in domains)) else: return False def url_is_from_spider(url, spider): """Return True if the url belongs to the given spider""" return url_is_from_any_domain(url, [spider.name] + \ getattr(spider, 'allowed_domains', [])) def url_has_any_extension(url, extensions): return posixpath.splitext(parse_url(url).path)[1].lower() in extensions def canonicalize_url(url, keep_blank_values=True, keep_fragments=False, \ encoding=None): """Canonicalize the given url by applying the following procedures: - sort query arguments, first by key, then by value - percent encode paths and query arguments. non-ASCII characters are percent-encoded using UTF-8 (RFC-3986) - normalize all spaces (in query arguments) '+' (plus symbol) - normalize percent encodings case (%2f -> %2F) - remove query arguments with blank values (unless keep_blank_values is True) - remove fragments (unless keep_fragments is True) The url passed can be a str or unicode, while the url returned is always a str. For examples see the tests in scrapy.tests.test_utils_url """ scheme, netloc, path, params, query, fragment = parse_url(url) keyvals = cgi.parse_qsl(query, keep_blank_values) keyvals.sort() query = urllib.urlencode(keyvals) path = safe_url_string(urllib.unquote(path)) fragment = '' if not keep_fragments else fragment return urlparse.urlunparse((scheme, netloc.lower(), path, params, query, fragment)) def parse_url(url, encoding=None): """Return urlparsed url from the given argument (which could be an already parsed url) """ return url if isinstance(url, urlparse.ParseResult) else \ urlparse.urlparse(unicode_to_str(url, encoding)) def escape_ajax(url): """ Return the crawleable url according to: http://code.google.com/web/ajaxcrawling/docs/getting-started.html TODO: add support for urls with query arguments >>> escape_ajax("www.example.com/ajax.html#!key=value") 'www.example.com/ajax.html?_escaped_fragment_=key=value' """ return url.replace('#!', '?_escaped_fragment_=') Scrapy-0.14.4/scrapy/utils/txweb.py0000600000016101777760000000100211754531743017300 0ustar buildbotnogroupfrom twisted.web import resource from scrapy.utils.py26 import json class JsonResource(resource.Resource): json_encoder = json.JSONEncoder() def render(self, txrequest): r = resource.Resource.render(self, txrequest) return self.render_object(r, txrequest) def render_object(self, obj, txrequest): r = self.json_encoder.encode(obj) + "\n" txrequest.setHeader('Content-Type', 'application/json') txrequest.setHeader('Content-Length', len(r)) return r Scrapy-0.14.4/scrapy/utils/display.py0000600000016101777760000000122411754531743017622 0ustar buildbotnogroup""" pprint and pformat wrappers with colorization support """ import sys from pprint import pformat as pformat_ def _colorize(text, colorize=True): if not colorize or not sys.stdout.isatty(): return text try: from pygments import highlight from pygments.formatters import TerminalFormatter from pygments.lexers import PythonLexer return highlight(text, PythonLexer(), TerminalFormatter()) except ImportError: return text def pformat(obj, *args, **kwargs): return _colorize(pformat_(obj), kwargs.pop('colorize', True)) def pprint(obj, *args, **kwargs): print pformat(obj, *args, **kwargs) Scrapy-0.14.4/scrapy/utils/decorator.py0000600000016101777760000000224411754531743020142 0ustar buildbotnogroupimport warnings from functools import wraps from twisted.internet import defer, threads from scrapy.exceptions import ScrapyDeprecationWarning def deprecated(use_instead=None): """This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used.""" def wrapped(func): @wraps(func) def new_func(*args, **kwargs): message = "Call to deprecated function %s." % func.__name__ if use_instead: message += " Use %s instead." % use_instead warnings.warn(message, category=ScrapyDeprecationWarning, stacklevel=2) return func(*args, **kwargs) return new_func return wrapped def defers(func): """Decorator to make sure a function always returns a deferred""" @wraps(func) def wrapped(*a, **kw): return defer.maybeDeferred(func, *a, **kw) return wrapped def inthread(func): """Decorator to call a function in a thread and return a deferred with the result """ @wraps(func) def wrapped(*a, **kw): return threads.deferToThread(func, *a, **kw) return wrapped Scrapy-0.14.4/scrapy/utils/engine.py0000600000016101777760000000401011754531743017416 0ustar buildbotnogroup"""Some debugging functions for working with the Scrapy engine""" from time import time # used in global tests code def get_engine_status(engine): """Return a report of the current engine status""" global_tests = [ "time()-engine.start_time", "engine.has_capacity()", "engine.downloader.is_idle()", "len(engine.downloader.slots)", "len(engine.downloader.active)", "engine.scraper.is_idle()", "len(engine.scraper.slots)", ] spider_tests = [ "engine.spider_is_idle(spider)", "engine.slots[spider].closing", "len(engine.slots[spider].inprogress)", "len(engine.slots[spider].scheduler.dqs or [])", "len(engine.slots[spider].scheduler.mqs)", "len(engine.scraper.slots[spider].queue)", "len(engine.scraper.slots[spider].active)", "engine.scraper.slots[spider].active_size", "engine.scraper.slots[spider].itemproc_size", "engine.scraper.slots[spider].needs_backout()", ] status = {'global': [], 'spiders': {}} for test in global_tests: try: status['global'] += [(test, eval(test))] except Exception, e: status['global'] += [(test, "%s (exception)" % type(e).__name__)] for spider in engine.slots.keys(): x = [] for test in spider_tests: try: x += [(test, eval(test))] except Exception, e: x += [(test, "%s (exception)" % type(e).__name__)] status['spiders'][spider] = x return status def format_engine_status(engine=None): status = get_engine_status(engine) s = "Execution engine status\n\n" for test, result in status['global']: s += "%-47s : %s\n" % (test, result) s += "\n" for spider, tests in status['spiders'].items(): s += "Spider: %s\n" % spider for test, result in tests: s += " %-50s : %s\n" % (test, result) return s def print_engine_status(engine): print format_engine_status(engine) Scrapy-0.14.4/scrapy/utils/testsite.py0000600000016101777760000000210611754531743020021 0ustar buildbotnogroupimport urlparse from twisted.internet import reactor from twisted.web import server, resource, static, util class SiteTest(object): def setUp(self): self.site = reactor.listenTCP(0, test_site(), interface="127.0.0.1") self.baseurl = "http://localhost:%d/" % self.site.getHost().port def tearDown(self): self.site.stopListening() def url(self, path): return urlparse.urljoin(self.baseurl, path) def test_site(): r = resource.Resource() r.putChild("text", static.Data("Works", "text/plain")) r.putChild("html", static.Data("

    Works

    World

    ", "text/html")) r.putChild("enc-gb18030", static.Data("

    gb18030 encoding

    ", "text/html; charset=gb18030")) r.putChild("redirect", util.Redirect("/redirected")) r.putChild("redirected", static.Data("Redirected here", "text/plain")) return server.Site(r) if __name__ == '__main__': port = reactor.listenTCP(0, test_site(), interface="127.0.0.1") print "http://localhost:%d/" % port.getHost().port reactor.run() Scrapy-0.14.4/scrapy/utils/defer.py0000600000016101777760000000614511754531743017251 0ustar buildbotnogroup""" Helper functions for dealing with Twisted deferreds """ from twisted.internet import defer, reactor, task from twisted.python import failure from scrapy.exceptions import IgnoreRequest def defer_fail(_failure): """Same as twisted.internet.defer.fail, but delay calling errback until next reactor loop """ d = defer.Deferred() reactor.callLater(0, d.errback, _failure) return d def defer_succeed(result): """Same as twsited.internet.defer.succed, but delay calling callback until next reactor loop """ d = defer.Deferred() reactor.callLater(0, d.callback, result) return d def defer_result(result): if isinstance(result, defer.Deferred): return result elif isinstance(result, failure.Failure): return defer_fail(result) else: return defer_succeed(result) def mustbe_deferred(f, *args, **kw): """Same as twisted.internet.defer.maybeDeferred, but delay calling callback/errback to next reactor loop """ try: result = f(*args, **kw) # FIXME: Hack to avoid introspecting tracebacks. This to speed up # processing of IgnoreRequest errors which are, by far, the most common # exception in Scrapy - see #125 except IgnoreRequest, e: return defer_fail(failure.Failure(e)) except: return defer_fail(failure.Failure()) else: return defer_result(result) def parallel(iterable, count, callable, *args, **named): """Execute a callable over the objects in the given iterable, in parallel, using no more than ``count`` concurrent calls. Taken from: http://jcalderone.livejournal.com/24285.html """ coop = task.Cooperator() work = (callable(elem, *args, **named) for elem in iterable) return defer.DeferredList([coop.coiterate(work) for i in xrange(count)]) def process_chain(callbacks, input, *a, **kw): """Return a Deferred built by chaining the given callbacks""" d = defer.Deferred() for x in callbacks: d.addCallback(x, *a, **kw) d.callback(input) return d def process_chain_both(callbacks, errbacks, input, *a, **kw): """Return a Deferred built by chaining the given callbacks and errbacks""" d = defer.Deferred() for cb, eb in zip(callbacks, errbacks): d.addCallbacks(cb, eb, callbackArgs=a, callbackKeywords=kw, errbackArgs=a, errbackKeywords=kw) if isinstance(input, failure.Failure): d.errback(input) else: d.callback(input) return d def process_parallel(callbacks, input, *a, **kw): """Return a Deferred with the output of all successful calls to the given callbacks """ dfds = [defer.succeed(input).addCallback(x, *a, **kw) for x in callbacks] d = defer.gatherResults(dfds) d.addErrback(lambda _: _.value.subFailure) return d def iter_errback(iterable, errback, *a, **kw): """Wraps an iterable calling an errback if an error is caught while iterating it. """ it = iter(iterable) while 1: try: yield it.next() except StopIteration: break except: errback(failure.Failure(), *a, **kw) Scrapy-0.14.4/scrapy/utils/conf.py0000600000016101777760000000412411754531743017104 0ustar buildbotnogroupimport sys import os from ConfigParser import SafeConfigParser from operator import itemgetter def build_component_list(base, custom): """Compose a component list based on a custom and base dict of components (typically middlewares or extensions), unless custom is already a list, in which case it's returned. """ if isinstance(custom, (list, tuple)): return custom compdict = base.copy() compdict.update(custom) return [k for k, v in sorted(compdict.items(), key=itemgetter(1)) \ if v is not None] def arglist_to_dict(arglist): """Convert a list of arguments like ['arg1=val1', 'arg2=val2', ...] to a dict """ return dict(x.split('=', 1) for x in arglist) def closest_scrapy_cfg(path='.', prevpath=None): """Return the path to the closest scrapy.cfg file by traversing the current directory and its parents """ if path == prevpath: return '' path = os.path.abspath(path) cfgfile = os.path.join(path, 'scrapy.cfg') if os.path.exists(cfgfile): return cfgfile return closest_scrapy_cfg(os.path.dirname(path), path) def init_env(project='default', set_syspath=True): """Initialize environment to use command-line tool from inside a project dir. This sets the Scrapy settings module and modifies the Python path to be able to locate the project module. """ cfg = get_config() if cfg.has_option('settings', project): os.environ['SCRAPY_SETTINGS_MODULE'] = cfg.get('settings', project) closest = closest_scrapy_cfg() if closest: projdir = os.path.dirname(closest) if set_syspath and projdir not in sys.path: sys.path.append(projdir) def get_config(use_closest=True): """Get Scrapy config file as a SafeConfigParser""" sources = get_sources(use_closest) cfg = SafeConfigParser() cfg.read(sources) return cfg def get_sources(use_closest=True): sources = ['/etc/scrapy.cfg', r'c:\scrapy\scrapy.cfg', \ os.path.expanduser('~/.scrapy.cfg')] if use_closest: sources.append(closest_scrapy_cfg()) return sources Scrapy-0.14.4/scrapy/utils/template.py0000600000016101777760000000137311754531743017775 0ustar buildbotnogroup"""Helper functions for working with templates""" from __future__ import with_statement import os import re import string def render_templatefile(path, **kwargs): with open(path, 'rb') as file: raw = file.read() content = string.Template(raw).substitute(**kwargs) with open(path.rstrip('.tmpl'), 'wb') as file: file.write(content) if path.endswith('.tmpl'): os.remove(path) CAMELCASE_INVALID_CHARS = re.compile('[^a-zA-Z\d]') def string_camelcase(string): """ Convert a word to its CamelCase version and remove invalid chars >>> string_camelcase('lost-pound') 'LostPound' >>> string_camelcase('missing_images') 'MissingImages' """ return CAMELCASE_INVALID_CHARS.sub('', string.title()) Scrapy-0.14.4/scrapy/utils/trackref.py0000600000016101777760000000373611754531743017770 0ustar buildbotnogroup"""This module provides some functions and classes to record and report references to live object instances. If you want live objects for a particular class to be tracked, you only have to subclass form object_ref (instead of object). Also, remember to turn on tracking by enabling the TRACK_REFS setting. About performance: This library has a minimal performance impact when enabled, and no performance penalty at all when disabled (as object_ref becomes just an alias to object in that case). """ import weakref, os from collections import defaultdict from time import time from operator import itemgetter from types import NoneType from scrapy.conf import settings live_refs = defaultdict(weakref.WeakKeyDictionary) class object_ref(object): """Inherit from this class (instead of object) to a keep a record of live instances""" __slots__ = () def __new__(cls, *args, **kwargs): obj = object.__new__(cls) live_refs[cls][obj] = time() return obj if not settings.getbool('TRACK_REFS'): object_ref = object def format_live_refs(ignore=NoneType): if object_ref is object: return "The trackref module is disabled. Use TRACK_REFS setting to enable it." s = "Live References" + os.linesep + os.linesep now = time() for cls, wdict in live_refs.iteritems(): if not wdict: continue if issubclass(cls, ignore): continue oldest = min(wdict.itervalues()) s += "%-30s %6d oldest: %ds ago" % (cls.__name__, len(wdict), \ now-oldest) + os.linesep return s def print_live_refs(*a, **kw): print format_live_refs(*a, **kw) def get_oldest(class_name): for cls, wdict in live_refs.iteritems(): if cls.__name__ == class_name: if wdict: return min(wdict.iteritems(), key=itemgetter(1))[0] def iter_all(class_name): for cls, wdict in live_refs.iteritems(): if cls.__name__ == class_name: return wdict.iterkeys() Scrapy-0.14.4/scrapy/utils/ftp.py0000600000016101777760000000102111754531743016741 0ustar buildbotnogroupfrom ftplib import error_perm from posixpath import dirname def ftp_makedirs_cwd(ftp, path, first_call=True): """Set the current directory of the FTP connection given in the `ftp` argument (as a ftplib.FTP object), creating all parent directories if they don't exist. The ftplib.FTP object must be already connected and logged in. """ try: ftp.cwd(path) except error_perm: ftp_makedirs_cwd(ftp, dirname(path), False) ftp.mkd(path) if first_call: ftp.cwd(path) Scrapy-0.14.4/scrapy/utils/misc.py0000600000016101777760000000633311754531743017116 0ustar buildbotnogroup"""Helper functions which doesn't fit anywhere else""" import re import hashlib from pkgutil import iter_modules from w3lib.html import remove_entities from scrapy.utils.python import flatten def arg_to_iter(arg): """Convert an argument to an iterable. The argument can be a None, single value, or an iterable. Exception: if arg is a dict, [arg] will be returned """ if arg is None: return [] elif not isinstance(arg, dict) and hasattr(arg, '__iter__'): return arg else: return [arg] def load_object(path): """Load an object given its absolute object path, and return it. object can be a class, function, variable o instance. path ie: 'scrapy.contrib.downloadermiddelware.redirect.RedirectMiddleware' """ try: dot = path.rindex('.') except ValueError: raise ValueError, "Error loading object '%s': not a full path" % path module, name = path[:dot], path[dot+1:] try: mod = __import__(module, {}, {}, ['']) except ImportError, e: raise ImportError, "Error loading object '%s': %s" % (path, e) try: obj = getattr(mod, name) except AttributeError: raise NameError, "Module '%s' doesn't define any object named '%s'" % (module, name) return obj def walk_modules(path, load=False): """Loads a module and all its submodules from a the given module path and returns them. If *any* module throws an exception while importing, that exception is thrown back. For example: walk_modules('scrapy.utils') """ mods = [] mod = __import__(path, {}, {}, ['']) mods.append(mod) if hasattr(mod, '__path__'): for _, subpath, ispkg in iter_modules(mod.__path__): fullpath = path + '.' + subpath if ispkg: mods += walk_modules(fullpath) else: submod = __import__(fullpath, {}, {}, ['']) mods.append(submod) return mods def extract_regex(regex, text, encoding='utf-8'): """Extract a list of unicode strings from the given text/encoding using the following policies: * if the regex contains a named group called "extract" that will be returned * if the regex contains multiple numbered groups, all those will be returned (flattened) * if the regex doesn't contain any group the entire regex matching is returned """ if isinstance(regex, basestring): regex = re.compile(regex) try: strings = [regex.search(text).group('extract')] # named group except: strings = regex.findall(text) # full regex or numbered groups strings = flatten(strings) if isinstance(text, unicode): return [remove_entities(s, keep=['lt', 'amp']) for s in strings] else: return [remove_entities(unicode(s, encoding), keep=['lt', 'amp']) for s in strings] def md5sum(file): """Calculate the md5 checksum of a file-like object without reading its whole content in memory. >>> from StringIO import StringIO >>> md5sum(StringIO('file content to hash')) '784406af91dd5a54fbb9c84c2236595a' """ m = hashlib.md5() while 1: d = file.read(8096) if not d: break m.update(d) return m.hexdigest() Scrapy-0.14.4/scrapy/utils/multipart.py0000600000016101777760000000023311754531743020175 0ustar buildbotnogroup""" Transitional module for moving to the w3lib library. For new code, always import from w3lib.form instead of this module """ from w3lib.form import * Scrapy-0.14.4/scrapy/utils/reactor.py0000600000016101777760000000252011754531743017614 0ustar buildbotnogroupfrom twisted.internet import reactor, error def listen_tcp(portrange, host, factory): """Like reactor.listenTCP but tries different ports in a range.""" assert len(portrange) <= 2, "invalid portrange: %s" % portrange if not hasattr(portrange, '__iter__'): return reactor.listenTCP(portrange, factory, interface=host) if not portrange: return reactor.listenTCP(0, factory, interface=host) if len(portrange) == 1: return reactor.listenTCP(portrange[0], factory, interface=host) for x in range(portrange[0], portrange[1]+1): try: return reactor.listenTCP(x, factory, interface=host) except error.CannotListenError: if x == portrange[1]: raise class CallLaterOnce(object): """Schedule a function to be called in the next reactor loop, but only if it hasn't been already scheduled since the last time it run. """ def __init__(self, func, *a, **kw): self._func = func self._a = a self._kw = kw self._call = None def schedule(self, delay=0): if self._call is None: self._call = reactor.callLater(delay, self) def cancel(self): if self._call: self._call.cancel() def __call__(self): self._call = None return self._func(*self._a, **self._kw) Scrapy-0.14.4/scrapy/utils/spider.py0000600000016101777760000000340411754531743017445 0ustar buildbotnogroupimport inspect from scrapy import log from scrapy.item import BaseItem from scrapy.utils.misc import arg_to_iter def iterate_spider_output(result): return [result] if isinstance(result, BaseItem) else arg_to_iter(result) def iter_spider_classes(module): """Return an iterator over all spider classes defined in the given module that can be instantiated (ie. which have name) """ # this needs to be imported here until get rid of the spider manager # singleton in scrapy.spider.spiders from scrapy.spider import BaseSpider for obj in vars(module).itervalues(): if inspect.isclass(obj) and \ issubclass(obj, BaseSpider) and \ obj.__module__ == module.__name__ and \ getattr(obj, 'name', None): yield obj def create_spider_for_request(spidermanager, request, default_spider=None, \ log_none=False, log_multiple=False, **spider_kwargs): """Create a spider to handle the given Request. This will look for the spiders that can handle the given request (using the spider manager) and return a (new) Spider if (and only if) there is only one Spider able to handle the Request. If multiple spiders (or no spider) are found, it will return the default_spider passed. It can optionally log if multiple or no spiders are found. """ snames = spidermanager.find_by_request(request) if len(snames) == 1: return spidermanager.create(snames[0], **spider_kwargs) if len(snames) > 1 and log_multiple: log.msg('More than one spider can handle: %s - %s' % \ (request, ", ".join(snames)), log.ERROR) if len(snames) == 0 and log_none: log.msg('Unable to find spider that handles: %s' % request, log.ERROR) return default_spider Scrapy-0.14.4/scrapy/utils/sitemap.py0000600000016101777760000000225411754531743017623 0ustar buildbotnogroup""" Module for processing Sitemaps. Note: The main purpose of this module is to provide support for the SitemapSpider, its API is subject to change without notice. """ from cStringIO import StringIO from xml.etree.cElementTree import ElementTree class Sitemap(object): """Class to parse Sitemap (type=urlset) and Sitemap Index (type=sitemapindex) files""" def __init__(self, xmltext): tree = ElementTree() tree.parse(StringIO(xmltext)) self._root = tree.getroot() rt = self._root.tag self.type = self._root.tag.split('}', 1)[1] if '}' in rt else rt def __iter__(self): for elem in self._root.getchildren(): d = {} for el in elem.getchildren(): tag = el.tag name = tag.split('}', 1)[1] if '}' in tag else tag d[name] = el.text.strip() if el.text else '' yield d def sitemap_urls_from_robots(robots_text): """Return an iterator over all sitemap urls contained in the given robots.txt file """ for line in robots_text.splitlines(): if line.lstrip().startswith('Sitemap:'): yield line.split(':', 1)[1].strip() Scrapy-0.14.4/scrapy/utils/jsonrpc.py0000600000016101777760000000602611754531743017640 0ustar buildbotnogroup""" This module implements the JSON-RPC 2.0 protocol, as defined in: http://groups.google.com/group/json-rpc/web/json-rpc-2-0 """ import urllib import traceback from scrapy.utils.py26 import json from scrapy.utils.serialize import ScrapyJSONDecoder # JSON-RPC 2.0 errors, as defined in: class jsonrpc_errors: PARSE_ERROR = -32700 INVALID_REQUEST = -32600 METHOD_NOT_FOUND = -32601 INVALID_PARAMS = -32602 INTERNAL_ERROR = -32603 class JsonRpcError(Exception): def __init__(self, code, message, data=None): super(JsonRpcError, self).__init__() self.code = code self.message = message self.data = data def __str__(self): return "JSON-RPC error (code %d): %s" % (self.code, self.message) def jsonrpc_client_call(url, method, *args, **kwargs): """Execute a JSON-RPC call on the given url""" _urllib = kwargs.pop('_urllib', urllib) if args and kwargs: raise ValueError("Pass *args or **kwargs but not both to jsonrpc_client_call") req = {'jsonrpc': '2.0', 'method': method, 'params': args or kwargs, 'id': 1} res = json.loads(_urllib.urlopen(url, json.dumps(req)).read()) if 'result' in res: return res['result'] elif 'error' in res: er = res['error'] raise JsonRpcError(er['code'], er['message'], er['data']) else: msg = "JSON-RPC response must contain 'result' or 'error': %s" % res raise ValueError(msg) def jsonrpc_server_call(target, jsonrpc_request, json_decoder=None): """Execute the given JSON-RPC request (as JSON-encoded string) on the given target object and return the JSON-RPC response, as a dict """ if json_decoder is None: json_decoder = ScrapyJSONDecoder() try: req = json_decoder.decode(jsonrpc_request) except Exception, e: return jsonrpc_error(None, jsonrpc_errors.PARSE_ERROR, 'Parse error', \ traceback.format_exc()) try: id, methname = req['id'], req['method'] except KeyError: return jsonrpc_error(None, jsonrpc_errors.INVALID_REQUEST, 'Invalid Request') try: method = getattr(target, methname) except AttributeError: return jsonrpc_error(id, jsonrpc_errors.METHOD_NOT_FOUND, 'Method not found') params = req.get('params', []) a, kw = ([], params) if isinstance(params, dict) else (params, {}) kw = dict([(str(k), v) for k, v in kw.items()]) # convert kw keys to str try: return jsonrpc_result(id, method(*a, **kw)) except Exception, e: return jsonrpc_error(id, jsonrpc_errors.INTERNAL_ERROR, str(e), \ traceback.format_exc()) def jsonrpc_error(id, code, message, data=None): """Create JSON-RPC error response""" return { 'jsonrpc': '2.0', 'error': { 'code': code, 'message': message, 'data': data, }, 'id': id, } def jsonrpc_result(id, result): """Create JSON-RPC result response""" return { 'jsonrpc': '2.0', 'result': result, 'id': id, } Scrapy-0.14.4/scrapy/utils/deprecate.py0000600000016101777760000000066511754531743020121 0ustar buildbotnogroup"""Some helpers for deprecation messages""" import warnings from scrapy.exceptions import ScrapyDeprecationWarning def attribute(obj, oldattr, newattr, version='0.12'): cname = obj.__class__.__name__ warnings.warn("%s.%s attribute is deprecated and will be no longer supported " "in Scrapy %s, use %s.%s attribute instead" % \ (cname, oldattr, version, cname, newattr), ScrapyDeprecationWarning, stacklevel=3) Scrapy-0.14.4/scrapy/utils/response.py0000600000016101777760000000607611754531743020025 0ustar buildbotnogroup""" This module provides some useful functions for working with scrapy.http.Response objects """ import os import re import weakref import webbrowser import tempfile from twisted.web import http from twisted.web.http import RESPONSES from w3lib import html from scrapy.http import Response, HtmlResponse def body_or_str(obj, unicode=True): assert isinstance(obj, (Response, basestring)), \ "obj must be Response or basestring, not %s" % type(obj).__name__ if isinstance(obj, Response): return obj.body_as_unicode() if unicode else obj.body elif isinstance(obj, str): return obj.decode('utf-8') if unicode else obj else: return obj if unicode else obj.encode('utf-8') _baseurl_cache = weakref.WeakKeyDictionary() def get_base_url(response): """Return the base url of the given response, joined with the response url""" if response not in _baseurl_cache: text = response.body_as_unicode()[0:4096] _baseurl_cache[response] = html.get_base_url(text, response.url, \ response.encoding) return _baseurl_cache[response] _noscript_re = re.compile(u'', re.IGNORECASE | re.DOTALL) _script_re = re.compile(u'.*?', re.IGNORECASE | re.DOTALL) _metaref_cache = weakref.WeakKeyDictionary() def get_meta_refresh(response): """Parse the http-equiv refrsh parameter from the given response""" if response not in _metaref_cache: text = response.body_as_unicode()[0:4096] text = _noscript_re.sub(u'', text) text = _script_re.sub(u'', text) _metaref_cache[response] = html.get_meta_refresh(text, response.url, \ response.encoding) return _metaref_cache[response] def response_status_message(status): """Return status code plus status text descriptive message >>> response_status_message(200) '200 OK' >>> response_status_message(404) '404 Not Found' """ return '%s %s' % (status, http.responses.get(int(status))) def response_httprepr(response): """Return raw HTTP representation (as string) of the given response. This is provided only for reference, since it's not the exact stream of bytes that was received (that's not exposed by Twisted). """ s = "HTTP/1.1 %d %s\r\n" % (response.status, RESPONSES.get(response.status, '')) if response.headers: s += response.headers.to_string() + "\r\n" s += "\r\n" s += response.body return s def open_in_browser(response, _openfunc=webbrowser.open): """Open the given response in a local web browser, populating the tag for external links to work """ # XXX: this implementation is a bit dirty and could be improved if not isinstance(response, HtmlResponse): raise TypeError("Unsupported response type: %s" % \ response.__class__.__name__) body = response.body if '', '' % response.url) fd, fname = tempfile.mkstemp('.html') os.write(fd, body) os.close(fd) return _openfunc("file://%s" % fname) Scrapy-0.14.4/scrapy/utils/py26.py0000600000016101777760000000775111754531743016770 0ustar buildbotnogroup""" This module provides functions added in Python 2.6, which weren't yet available in Python 2.5. The Python 2.6 function is used when available. """ import sys import os import fnmatch import pkgutil from shutil import copy2, copystat __all__ = ['cpu_count', 'copytree', 'ignore_patterns'] try: import multiprocessing cpu_count = multiprocessing.cpu_count except ImportError: def cpu_count(): ''' Returns the number of CPUs in the system ''' if sys.platform == 'win32': try: num = int(os.environ['NUMBER_OF_PROCESSORS']) except (ValueError, KeyError): num = 0 elif 'bsd' in sys.platform or sys.platform == 'darwin': try: num = int(os.popen('sysctl -n hw.ncpu').read()) except ValueError: num = 0 else: try: num = os.sysconf('SC_NPROCESSORS_ONLN') except (ValueError, OSError, AttributeError): num = 0 if num >= 1: return num else: raise NotImplementedError('cannot determine number of cpus') if sys.version_info >= (2, 6): from shutil import copytree, ignore_patterns else: try: WindowsError except NameError: WindowsError = None class Error(EnvironmentError): pass def ignore_patterns(*patterns): def _ignore_patterns(path, names): ignored_names = [] for pattern in patterns: ignored_names.extend(fnmatch.filter(names, pattern)) return set(ignored_names) return _ignore_patterns def copytree(src, dst, symlinks=False, ignore=None): names = os.listdir(src) if ignore is not None: ignored_names = ignore(src, names) else: ignored_names = set() os.makedirs(dst) errors = [] for name in names: if name in ignored_names: continue srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if symlinks and os.path.islink(srcname): linkto = os.readlink(srcname) os.symlink(linkto, dstname) elif os.path.isdir(srcname): copytree(srcname, dstname, symlinks, ignore) else: copy2(srcname, dstname) # XXX What about devices, sockets etc.? except (IOError, os.error), why: errors.append((srcname, dstname, str(why))) # catch the Error from the recursive copytree so that we can # continue with other files except Error, err: errors.extend(err.args[0]) try: copystat(src, dst) except OSError, why: if WindowsError is not None and isinstance(why, WindowsError): # Copying file access times may fail on Windows pass else: errors.extend((src, dst, str(why))) if errors: raise Error, errors try: import json except ImportError: import simplejson as json def _get_data(package, resource): loader = pkgutil.get_loader(package) if loader is None or not hasattr(loader, 'get_data'): return None mod = sys.modules.get(package) or loader.load_module(package) if mod is None or not hasattr(mod, '__file__'): return None # Modify the resource name to be compatible with the loader.get_data # signature - an os.path format "filename" starting with the dirname of # the package's __file__ parts = resource.split('/') parts.insert(0, os.path.dirname(mod.__file__)) resource_name = os.path.join(*parts) return loader.get_data(resource_name) # pkgutil.get_data() not available in python 2.5 # see http://docs.python.org/release/2.5/lib/module-pkgutil.html try: get_data = pkgutil.get_data except AttributeError: get_data = _get_data Scrapy-0.14.4/scrapy/utils/request.py0000600000016101777760000000673311754531743017657 0ustar buildbotnogroup""" This module provides some useful functions for working with scrapy.http.Request objects """ import hashlib import weakref from urlparse import urlunparse from twisted.internet.defer import Deferred from w3lib.http import basic_auth_header from scrapy.utils.url import canonicalize_url from scrapy.utils.httpobj import urlparse_cached _fingerprint_cache = weakref.WeakKeyDictionary() def request_fingerprint(request, include_headers=None): """ Return the request fingerprint. The request fingerprint is a hash that uniquely identifies the resource the request points to. For example, take the following two urls: http://www.example.com/query?id=111&cat=222 http://www.example.com/query?cat=222&id=111 Even though those are two different URLs both point to the same resource and are equivalent (ie. they should return the same response). Another example are cookies used to store session ids. Suppose the following page is only accesible to authenticated users: http://www.example.com/members/offers.html Lot of sites use a cookie to store the session id, which adds a random component to the HTTP Request and thus should be ignored when calculating the fingerprint. For this reason, request headers are ignored by default when calculating the fingeprint. If you want to include specific headers use the include_headers argument, which is a list of Request headers to include. """ if include_headers: include_headers = tuple([h.lower() for h in sorted(include_headers)]) cache = _fingerprint_cache.setdefault(request, {}) if include_headers not in cache: fp = hashlib.sha1() fp.update(request.method) fp.update(canonicalize_url(request.url)) fp.update(request.body or '') if include_headers: for hdr in include_headers: if hdr in request.headers: fp.update(hdr) for v in request.headers.getlist(hdr): fp.update(v) cache[include_headers] = fp.hexdigest() return cache[include_headers] def request_authenticate(request, username, password): """Autenticate the given request (in place) using the HTTP basic access authentication mechanism (RFC 2617) and the given username and password """ request.headers['Authorization'] = basic_auth_header(username, password) def request_httprepr(request): """Return the raw HTTP representation (as string) of the given request. This is provided only for reference since it's not the actual stream of bytes that will be send when performing the request (that's controlled by Twisted). """ parsed = urlparse_cached(request) path = urlunparse(('', '', parsed.path or '/', parsed.params, parsed.query, '')) s = "%s %s HTTP/1.1\r\n" % (request.method, path) s += "Host: %s\r\n" % parsed.hostname if request.headers: s += request.headers.to_string() + "\r\n" s += "\r\n" s += request.body return s def request_deferred(request): """Wrap a request inside a Deferred. This returns a Deferred whose first pair of callbacks are the request callback and errback. The Deferred also triggers when the request callback/errback is executed (ie. when the request is downloaded) """ d = Deferred() if request.callback: d.addCallbacks(request.callback, request.errback) request.callback, request.errback = d.callback, d.errback return d Scrapy-0.14.4/scrapy/utils/__init__.py0000600000016101777760000000000011754531743017703 0ustar buildbotnogroupScrapy-0.14.4/scrapy/utils/console.py0000600000016101777760000000215011754531743017616 0ustar buildbotnogroup def start_python_console(namespace=None, noipython=False): """Start Python console binded to the given namespace. If IPython is available, an IPython console will be started instead, unless `noipython` is True. Also, tab completion will be used on Unix systems. """ if namespace is None: namespace = {} try: try: # use IPython if available if noipython: raise ImportError import IPython try: IPython.embed(user_ns=namespace) except AttributeError: shell = IPython.Shell.IPShellEmbed(argv=[], user_ns=namespace) shell() except ImportError: import code try: # readline module is only available on unix systems import readline except ImportError: pass else: import rlcompleter readline.parse_and_bind("tab:complete") code.interact(banner='', local=namespace) except SystemExit: # raised when using exit() in python code.interact pass Scrapy-0.14.4/scrapy/utils/markup.py0000600000016101777760000000023311754531743017453 0ustar buildbotnogroup""" Transitional module for moving to the w3lib library. For new code, always import from w3lib.html instead of this module """ from w3lib.html import * Scrapy-0.14.4/scrapy/utils/py27.py0000600000016101777760000000031211754531743016753 0ustar buildbotnogroup""" Similar to scrapy.utils.py26, but for Python 2.7 """ __all__ = ['OrderedDict'] try: from collections import OrderedDict except ImportError: from scrapy.xlib.ordereddict import OrderedDict Scrapy-0.14.4/scrapy/utils/queue.py0000600000016101777760000001132111754531743017300 0ustar buildbotnogroupfrom __future__ import with_statement import os import struct import glob from collections import deque from scrapy.utils.py26 import json class FifoMemoryQueue(object): """Memory FIFO queue.""" def __init__(self): self.q = deque() def push(self, obj): self.q.appendleft(obj) def pop(self): if self.q: return self.q.pop() def close(self): pass def __len__(self): return len(self.q) class LifoMemoryQueue(FifoMemoryQueue): """Memory LIFO queue.""" def push(self, obj): self.q.append(obj) class FifoDiskQueue(object): """Persistent FIFO queue.""" szhdr_format = ">L" szhdr_size = struct.calcsize(szhdr_format) def __init__(self, path, chunksize=100000): self.path = path if not os.path.exists(path): os.makedirs(path) self.info = self._loadinfo(chunksize) self.chunksize = self.info['chunksize'] self.headf = self._openchunk(self.info['head'][0], 'ab+') self.tailf = self._openchunk(self.info['tail'][0]) self.tailf.seek(self.info['tail'][2]) def push(self, string): hnum, hpos = self.info['head'] hpos += 1 szhdr = struct.pack(self.szhdr_format, len(string)) os.write(self.headf.fileno(), szhdr + string) if hpos == self.chunksize: hpos = 0 hnum += 1 self.headf.close() self.headf = self._openchunk(hnum, 'ab+') self.info['size'] += 1 self.info['head'] = hnum, hpos def _openchunk(self, number, mode='r'): return open(os.path.join(self.path, 'q%05d' % number), mode) def pop(self): tnum, tcnt, toffset = self.info['tail'] if [tnum, tcnt] >= self.info['head']: return tfd = self.tailf.fileno() szhdr = os.read(tfd, self.szhdr_size) if not szhdr: return size, = struct.unpack(self.szhdr_format, szhdr) data = os.read(tfd, size) tcnt += 1 toffset += self.szhdr_size + size if tcnt == self.chunksize and tnum <= self.info['head'][0]: tcnt = toffset = 0 tnum += 1 self.tailf.close() os.remove(self.tailf.name) self.tailf = self._openchunk(tnum) self.info['size'] -= 1 self.info['tail'] = tnum, tcnt, toffset return data def close(self): self.headf.close() self.tailf.close() self._saveinfo(self.info) if len(self) == 0: self._cleanup() def __len__(self): return self.info['size'] def _loadinfo(self, chunksize): infopath = self._infopath() if os.path.exists(infopath): with open(infopath) as f: info = json.load(f) else: info = { 'chunksize': chunksize, 'size': 0, 'tail': [0, 0, 0], 'head': [0, 0], } return info def _saveinfo(self, info): with open(self._infopath(), 'w') as f: json.dump(info, f) def _infopath(self): return os.path.join(self.path, 'info.json') def _cleanup(self): for x in glob.glob(os.path.join(self.path, 'q*')): os.remove(x) os.remove(os.path.join(self.path, 'info.json')) if not os.listdir(self.path): os.rmdir(self.path) class LifoDiskQueue(object): """Persistent LIFO queue.""" SIZE_FORMAT = ">L" SIZE_SIZE = struct.calcsize(SIZE_FORMAT) def __init__(self, path): self.path = path if os.path.exists(path): self.f = open(path, 'rb+') qsize = self.f.read(self.SIZE_SIZE) self.size, = struct.unpack(self.SIZE_FORMAT, qsize) self.f.seek(0, os.SEEK_END) else: self.f = open(path, 'wb+') self.f.write(struct.pack(self.SIZE_FORMAT, 0)) self.size = 0 def push(self, string): self.f.write(string) ssize = struct.pack(self.SIZE_FORMAT, len(string)) self.f.write(ssize) self.size += 1 def pop(self): if not self.size: return self.f.seek(-self.SIZE_SIZE, os.SEEK_END) size, = struct.unpack(self.SIZE_FORMAT, self.f.read()) self.f.seek(-size-self.SIZE_SIZE, os.SEEK_END) data = self.f.read(size) self.f.seek(-size, os.SEEK_CUR) self.f.truncate() self.size -= 1 return data def close(self): if self.size: self.f.seek(0) self.f.write(struct.pack(self.SIZE_FORMAT, self.size)) self.f.close() if not self.size: os.remove(self.path) def __len__(self): return self.size Scrapy-0.14.4/scrapy/utils/serialize.py0000600000016101777760000001020211754531743020140 0ustar buildbotnogroupimport re import datetime import decimal from twisted.internet import defer from scrapy.spider import BaseSpider from scrapy.http import Request, Response from scrapy.utils.py26 import json class SpiderReferencer(object): """Class to serialize (and deserialize) objects (typically dicts) containing references to running spiders (ie. Spider objects). This is required because simplejson fails to serialize dicts containing non-primitive types as keys, even when you override ScrapyJSONEncoder.default() with a custom encoding mechanism. """ spider_ref_re = re.compile('^spider:([0-9a-f]+)?:?(.+)?$') def __init__(self, crawler): self.crawler = crawler def get_reference_from_spider(self, spider): return 'spider:%x:%s' % (id(spider), spider.name) def get_spider_from_reference(self, ref): """Returns the Spider referenced by text, if text is a spider reference. Otherwise it returns the text itself. If the text references a non-running spider it raises a RuntimeError. """ m = self.spider_ref_re.search(ref) if m: spid, spname = m.groups() for spider in self.crawler.engine.open_spiders: if "%x" % id(spider) == spid or spider.name == spname: return spider raise RuntimeError("Spider not running: %s" % ref) return ref def encode_references(self, obj): """Look for Spider objects and replace them with spider references""" if isinstance(obj, BaseSpider): return self.get_reference_from_spider(obj) elif isinstance(obj, dict): d = {} for k, v in obj.items(): k = self.encode_references(k) v = self.encode_references(v) d[k] = v return d elif isinstance(obj, (list, tuple)): return [self.encode_references(x) for x in obj] else: return obj def decode_references(self, obj): """Look for spider references and replace them with Spider objects""" if isinstance(obj, basestring): return self.get_spider_from_reference(obj) elif isinstance(obj, dict): d = {} for k, v in obj.items(): k = self.decode_references(k) v = self.decode_references(v) d[k] = v return d elif isinstance(obj, (list, tuple)): return [self.decode_references(x) for x in obj] else: return obj class ScrapyJSONEncoder(json.JSONEncoder): DATE_FORMAT = "%Y-%m-%d" TIME_FORMAT = "%H:%M:%S" def __init__(self, *a, **kw): crawler = kw.pop('crawler', None) self.spref = kw.pop('spref', None) or SpiderReferencer(crawler) super(ScrapyJSONEncoder, self).__init__(*a, **kw) def encode(self, o): if self.spref: o = self.spref.encode_references(o) return super(ScrapyJSONEncoder, self).encode(o) def default(self, o): if isinstance(o, datetime.datetime): return o.strftime("%s %s" % (self.DATE_FORMAT, self.TIME_FORMAT)) elif isinstance(o, datetime.date): return o.strftime(self.DATE_FORMAT) elif isinstance(o, datetime.time): return o.strftime(self.TIME_FORMAT) elif isinstance(o, decimal.Decimal): return str(o) elif isinstance(o, defer.Deferred): return str(o) elif isinstance(o, Request): return "<%s %s %s>" % (type(o).__name__, o.method, o.url) elif isinstance(o, Response): return "<%s %s %s>" % (type(o).__name__, o.status, o.url) else: return super(ScrapyJSONEncoder, self).default(o) class ScrapyJSONDecoder(json.JSONDecoder): def __init__(self, *a, **kw): crawler = kw.pop('crawler', None) self.spref = kw.pop('spref', None) or SpiderReferencer(crawler) super(ScrapyJSONDecoder, self).__init__(*a, **kw) def decode(self, s): o = super(ScrapyJSONDecoder, self).decode(s) if self.spref: o = self.spref.decode_references(o) return o Scrapy-0.14.4/scrapy/utils/gz.py0000600000016101777760000000125011754531743016574 0ustar buildbotnogroupimport struct from cStringIO import StringIO from gzip import GzipFile def gunzip(data): """Gunzip the given data and return as much data as possible. This is resilient to CRC checksum errors. """ f = GzipFile(fileobj=StringIO(data)) output = '' chunk = '.' while chunk: try: chunk = f.read(8196) output += chunk except (IOError, struct.error): # complete only if there is some data, otherwise re-raise # see issue 87 about catching struct.error if output: output += f.extrabuf break else: raise return output Scrapy-0.14.4/scrapy/utils/iterators.py0000600000016101777760000000443611754531743020201 0ustar buildbotnogroupimport re, csv from cStringIO import StringIO from scrapy.http import TextResponse from scrapy.selector import XmlXPathSelector from scrapy import log from scrapy.utils.python import re_rsearch, str_to_unicode from scrapy.utils.response import body_or_str def xmliter(obj, nodename): """Return a iterator of XPathSelector's over all nodes of a XML document, given tha name of the node to iterate. Useful for parsing XML feeds. obj can be: - a Response object - a unicode string - a string encoded as utf-8 """ HEADER_START_RE = re.compile(r'^(.*?)<\s*%s(?:\s|>)' % nodename, re.S) HEADER_END_RE = re.compile(r'<\s*/%s\s*>' % nodename, re.S) text = body_or_str(obj) header_start = re.search(HEADER_START_RE, text) header_start = header_start.group(1).strip() if header_start else '' header_end = re_rsearch(HEADER_END_RE, text) header_end = text[header_end[1]:].strip() if header_end else '' r = re.compile(r"<%s[\s>].*?" % (nodename, nodename), re.DOTALL) for match in r.finditer(text): nodetext = header_start + match.group() + header_end yield XmlXPathSelector(text=nodetext).select('//' + nodename)[0] def csviter(obj, delimiter=None, headers=None, encoding=None): """ Returns an iterator of dictionaries from the given csv object obj can be: - a Response object - a unicode string - a string encoded as utf-8 delimiter is the character used to separate field on the given obj. headers is an iterable that when provided offers the keys for the returned dictionaries, if not the first row is used. """ encoding = obj.encoding if isinstance(obj, TextResponse) else encoding or 'utf-8' def _getrow(csv_r): return [str_to_unicode(field, encoding) for field in csv_r.next()] lines = StringIO(body_or_str(obj, unicode=False)) if delimiter: csv_r = csv.reader(lines, delimiter=delimiter) else: csv_r = csv.reader(lines) if not headers: headers = _getrow(csv_r) while True: row = _getrow(csv_r) if len(row) != len(headers): log.msg("ignoring row %d (length: %d, should be: %d)" % (csv_r.line_num, len(row), len(headers)), log.WARNING) continue else: yield dict(zip(headers, row)) Scrapy-0.14.4/scrapy/extension.py0000600000016101777760000000065311754531743017036 0ustar buildbotnogroup""" The Extension Manager See documentation in docs/topics/extensions.rst """ from scrapy.middleware import MiddlewareManager from scrapy.utils.conf import build_component_list class ExtensionManager(MiddlewareManager): component_name = 'extension' @classmethod def _get_mwlist_from_settings(cls, settings): return build_component_list(settings['EXTENSIONS_BASE'], \ settings['EXTENSIONS']) Scrapy-0.14.4/docs/0000700000016101777760000000000011754532077014072 5ustar buildbotnogroupScrapy-0.14.4/docs/topics/0000700000016101777760000000000011754532077015373 5ustar buildbotnogroupScrapy-0.14.4/docs/topics/spider-middleware.rst0000600000016101777760000002407711754531743021541 0ustar buildbotnogroup.. _topics-spider-middleware: ================= Spider Middleware ================= The spider middleware is a framework of hooks into Scrapy's spider processing mechanism where you can plug custom functionality to process the requests that are sent to :ref:`topics-spiders` for processing and to process the responses and items that are generated from spiders. .. _topics-spider-middleware-setting: Activating a spider middleware ============================== To activate a spider middleware component, add it to the :setting:`SPIDER_MIDDLEWARES` setting, which is a dict whose keys are the middleware class path and their values are the middleware orders. Here's an example:: SPIDER_MIDDLEWARES = { 'myproject.middlewares.CustomSpiderMiddleware': 543, } The :setting:`SPIDER_MIDDLEWARES` setting is merged with the :setting:`SPIDER_MIDDLEWARES_BASE` setting defined in Scrapy (and not meant to be overridden) and then sorted by order to get the final sorted list of enabled middlewares: the first middleware is the one closer to the engine and the last is the one closer to the spider. To decide which order to assign to your middleware see the :setting:`SPIDER_MIDDLEWARES_BASE` setting and pick a value according to where you want to insert the middleware. The order does matter because each middleware performs a different action and your middleware could depend on some previous (or subsequent) middleware being applied. If you want to disable a builtin middleware (the ones defined in :setting:`SPIDER_MIDDLEWARES_BASE`, and enabled by default) you must define it in your project :setting:`SPIDER_MIDDLEWARES` setting and assign `None` as its value. For example, if you want to disable the off-site middleware:: SPIDER_MIDDLEWARES = { 'myproject.middlewares.CustomSpiderMiddleware': 543, 'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': None, } Finally, keep in mind that some middlewares may need to be enabled through a particular setting. See each middleware documentation for more info. Writing your own spider middleware ================================== Writing your own spider middleware is easy. Each middleware component is a single Python class that defines one or more of the following methods: .. module:: scrapy.contrib.spidermiddleware .. class:: SpiderMiddleware .. method:: process_spider_input(response, spider) This method is called for each response that goes through the spider middleware and into the spider, for processing. :meth:`process_spider_input` should return ``None`` or raise and exception. If it returns ``None``, Scrapy will continue processing this response, executing all other middlewares until, finally, the response is handled to the spider for processing. If it raises an exception, Scrapy won't bother calling any other spider middleware :meth:`process_spider_input` and will call the request errback. The output of the errback is chained back in the other direction for :meth:`process_spider_output` to process it, or :meth:`process_spider_exception` if it raised an exception. :param reponse: the response being processed :type response: :class:`~scrapy.http.Response` object :param spider: the spider for which this response is intended :type spider: :class:`~scrapy.spider.BaseSpider` object .. method:: process_spider_output(response, result, spider) This method is called with the results returned from the Spider, after it has processed the response. :meth:`process_spider_output` must return an iterable of :class:`~scrapy.http.Request` or :class:`~scrapy.item.Item` objects. :param response: the response which generated this output from the spider :type response: class:`~scrapy.http.Response` object :param result: the result returned by the spider :type result: an iterable of :class:`~scrapy.http.Request` or :class:`~scrapy.item.Item` objects :param spider: the spider whose result is being processed :type spider: :class:`~scrapy.item.BaseSpider` object .. method:: process_spider_exception(response, exception, spider) This method is called when when a spider or :meth:`process_spider_input` method (from other spider middleware) raises an exception. :meth:`process_spider_exception` should return either ``None`` or an iterable of :class:`~scrapy.http.Response` or :class:`~scrapy.item.Item` objects. If it returns ``None``, Scrapy will continue processing this exception, executing any other :meth:`process_spider_exception` in the following middleware components, until no middleware components are left and the exception reaches the engine (where it's logged and discarded). If it returns an iterable the :meth:`process_spider_output` pipeline kicks in, and no other :meth:`process_spider_exception` will be called. :param response: the response being processed when the exception was raised :type response: :class:`~scrapy.http.Response` object :param exception: the exception raised :type exception: `Exception`_ object :param spider: the spider which raised the exception :type spider: :class:`scrapy.spider.BaseSpider` object .. _Exception: http://docs.python.org/library/exceptions.html#exceptions.Exception .. _topics-spider-middleware-ref: Built-in spider middleware reference ==================================== This page describes all spider middleware components that come with Scrapy. For information on how to use them and how to write your own spider middleware, see the :ref:`spider middleware usage guide `. For a list of the components enabled by default (and their orders) see the :setting:`SPIDER_MIDDLEWARES_BASE` setting. DepthMiddleware --------------- .. module:: scrapy.contrib.spidermiddleware.depth :synopsis: Depth Spider Middleware .. class:: DepthMiddleware DepthMiddleware is a scrape middleware used for tracking the depth of each Request inside the site being scraped. It can be used to limit the maximum depth to scrape or things like that. The :class:`DepthMiddleware` can be configured through the following settings (see the settings documentation for more info): * :setting:`DEPTH_LIMIT` - The maximum depth that will be allowed to crawl for any site. If zero, no limit will be imposed. * :setting:`DEPTH_STATS` - Whether to collect depth stats. * :setting:`DEPTH_PRIORITY` - Whether to prioritize the requests based on their depth. HttpErrorMiddleware ------------------- .. module:: scrapy.contrib.spidermiddleware.httperror :synopsis: HTTP Error Spider Middleware .. class:: HttpErrorMiddleware Filter out unsuccessful (erroneous) HTTP responses so that spiders don't have to deal with them, which (most of the time) imposes an overhead, consumes more resources, and makes the spider logic more complex. According to the `HTTP standard`_, successful responses are those whose status codes are in the 200-300 range. .. _HTTP standard: http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html If you still want to process response codes outside that range, you can specify which response codes the spider is able to handle using the ``handle_httpstatus_list`` spider attribute. For example, if you want your spider to handle 404 responses you can do this:: class MySpider(CrawlSpider): handle_httpstatus_list = [404] .. reqmeta:: handle_httpstatus_list The ``handle_httpstatus_list`` key of :attr:`Request.meta ` can also be used to specify which response codes to allow on a per-request basis. Keep in mind, however, that it's usually a bad idea to handle non-200 responses, unless you really know what you're doing. For more information see: `HTTP Status Code Definitions`_. .. _HTTP Status Code Definitions: http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html OffsiteMiddleware ----------------- .. module:: scrapy.contrib.spidermiddleware.offsite :synopsis: Offsite Spider Middleware .. class:: OffsiteMiddleware Filters out Requests for URLs outside the domains covered by the spider. This middleware filters out every request whose host names aren't in the spider's :attr:`~scrapy.spider.BaseSpider.allowed_domains` attribute. When your spider returns a request for a domain not belonging to those covered by the spider, this middleware will log a debug message similar to this one:: DEBUG: Filtered offsite request to 'www.othersite.com': To avoid filling the log with too much noise, it will only print one of these messages for each new domain filtered. So, for example, if another request for ``www.othersite.com`` is filtered, no log message will be printed. But if a request for ``someothersite.com`` is filtered, a message will be printed (but only for the first request filtred). If the spider doesn't define an :attr:`~scrapy.spider.BaseSpider.allowed_domains` attribute, or the attribute is empty, the offsite middleware will allow all requests. If the request has the :attr:`~scrapy.http.Request.dont_filter` attribute set, the offsite middleware will allow the request even if its domain is not listed in allowed domains. RefererMiddleware ----------------- .. module:: scrapy.contrib.spidermiddleware.referer :synopsis: Referer Spider Middleware .. class:: RefererMiddleware Populates Request referer field, based on the Response which originated it. UrlLengthMiddleware ------------------- .. module:: scrapy.contrib.spidermiddleware.urllength :synopsis: URL Length Spider Middleware .. class:: UrlLengthMiddleware Filters out requests with URLs longer than URLLENGTH_LIMIT The :class:`UrlLengthMiddleware` can be configured through the following settings (see the settings documentation for more info): * :setting:`URLLENGTH_LIMIT` - The maximum URL length to allow for crawled URLs. Scrapy-0.14.4/docs/topics/extensions.rst0000600000016101777760000003115511754531743020332 0ustar buildbotnogroup.. _topics-extensions: ========== Extensions ========== The extensions framework provides a mechanism for inserting your own custom functionality into Scrapy. Extensions are just regular classes that are instantiated at Scrapy startup, when extensions are initialized. Extension settings ================== Extensions use the :ref:`Scrapy settings ` to manage their settings, just like any other Scrapy code. It is customary for extensions to prefix their settings with their own name, to avoid collision with existing (and future) extensions. For example, an hypothetic extension to handle `Google Sitemaps`_ would use settings like `GOOGLESITEMAP_ENABLED`, `GOOGLESITEMAP_DEPTH`, and so on. .. _Google Sitemaps: http://en.wikipedia.org/wiki/Sitemaps Loading & activating extensions =============================== Extensions are loaded and activated at startup by instantiating a single instance of the extension class. Therefore, all the extension initialization code must be performed in the class constructor (``__init__`` method). To make an extension available, add it to the :setting:`EXTENSIONS` setting in your Scrapy settings. In :setting:`EXTENSIONS`, each extension is represented by a string: the full Python path to the extension's class name. For example:: EXTENSIONS = { 'scrapy.contrib.corestats.CoreStats': 500, 'scrapy.webservice.WebService': 500, 'scrapy.telnet.TelnetConsole': 500, } As you can see, the :setting:`EXTENSIONS` setting is a dict where the keys are the extension paths, and their values are the orders, which define the extension *loading* order. Extensions orders are not as important as middleware orders though, and they are typically irrelevant, ie. it doesn't matter in which order the extensions are loaded because they don't depend on each other [1]. However, this feature can be exploited if you need to add an extension which depends on other extensions already loaded. [1] This is is why the :setting:`EXTENSIONS_BASE` setting in Scrapy (which contains all built-in extensions enabled by default) defines all the extensions with the same order (``500``). Available, enabled and disabled extensions ========================================== Not all available extensions will be enabled. Some of them usually depend on a particular setting. For example, the HTTP Cache extension is available by default but disabled unless the :setting:`HTTPCACHE_ENABLED` setting is set. Accessing enabled extensions ============================ Even though it's not usually needed, you can access extension objects through the :ref:`topics-extensions-ref-manager` which is populated when extensions are loaded. For example, to access the ``WebService`` extension:: from scrapy.project import extensions webservice_extension = extensions.enabled['WebService'] .. see also:: :ref:`topics-extensions-ref-manager`, for the complete Extension Manager reference. Writing your own extension ========================== Writing your own extension is easy. Each extension is a single Python class which doesn't need to implement any particular method. All extension initialization code must be performed in the class constructor (``__init__`` method). If that method raises the :exc:`~scrapy.exceptions.NotConfigured` exception, the extension will be disabled. Otherwise, the extension will be enabled. Let's take a look at the following example extension which just logs a message every time a domain/spider is opened and closed:: from scrapy.xlib.pydispatch import dispatcher from scrapy import signals class SpiderOpenCloseLogging(object): def __init__(self): dispatcher.connect(self.spider_opened, signal=signals.spider_opened) dispatcher.connect(self.spider_closed, signal=signals.spider_closed) def spider_opened(self, spider): log.msg("opened spider %s" % spider.name) def spider_closed(self, spider): log.msg("closed spider %s" % spider.name) .. _topics-extensions-ref-manager: Extension Manager ================= .. module:: scrapy.extension :synopsis: The extension manager The Extension Manager is responsible for loading and keeping track of installed extensions and it's configured through the :setting:`EXTENSIONS` setting which contains a dictionary of all available extensions and their order similar to how you :ref:`configure the downloader middlewares `. .. class:: ExtensionManager The Extension Manager is a singleton object, which is instantiated at module loading time and can be accessed like this:: from scrapy.project import extensions .. attribute:: loaded A boolean which is True if extensions are already loaded or False if they're not. .. attribute:: enabled A dict with the enabled extensions. The keys are the extension class names, and the values are the extension objects. Example:: >>> from scrapy.project import extensions >>> extensions.load() >>> print extensions.enabled {'CoreStats': , 'WebService': , ... .. attribute:: disabled A dict with the disabled extensions. The keys are the extension class names, and the values are the extension class paths (because objects are never instantiated for disabled extensions). Example:: >>> from scrapy.project import extensions >>> extensions.load() >>> print extensions.disabled {'MemoryDebugger': 'scrapy.contrib.memdebug.MemoryDebugger', 'MyExtension': 'myproject.extensions.MyExtension', ... .. method:: load() Load the available extensions configured in the :setting:`EXTENSIONS` setting. On a standard run, this method is usually called by the Execution Manager, but you may need to call it explicitly if you're dealing with code outside Scrapy. .. method:: reload() Reload the available extensions. See :meth:`load`. .. _topics-extensions-ref: Built-in extensions reference ============================= General purpose extensions -------------------------- Log Stats extension ~~~~~~~~~~~~~~~~~~~ .. module:: scrapy.contrib.logstats :synopsis: Basic stats logging .. class:: LogStats Log basic stats like crawled pages and scraped items. Core Stats extension ~~~~~~~~~~~~~~~~~~~~ .. module:: scrapy.contrib.corestats :synopsis: Core stats collection .. class:: CoreStats Enable the collection of core statistics, provided the stats collection is enabled (see :ref:`topics-stats`). .. _topics-extensions-ref-webservice: Web service extension ~~~~~~~~~~~~~~~~~~~~~ .. module:: scrapy.webservice :synopsis: Web service .. class:: scrapy.webservice.WebService See `topics-webservice`. .. _topics-extensions-ref-telnetconsole: Telnet console extension ~~~~~~~~~~~~~~~~~~~~~~~~ .. module:: scrapy.telnet :synopsis: Telnet console .. class:: scrapy.telnet.TelnetConsole Provides a telnet console for getting into a Python interpreter inside the currently running Scrapy process, which can be very useful for debugging. The telnet console must be enabled by the :setting:`TELNETCONSOLE_ENABLED` setting, and the server will listen in the port specified in :setting:`TELNETCONSOLE_PORT`. .. _topics-extensions-ref-memusage: Memory usage extension ~~~~~~~~~~~~~~~~~~~~~~ .. module:: scrapy.contrib.memusage :synopsis: Memory usage extension .. class:: scrapy.contrib.memusage.MemoryUsage .. note:: This extension does not work in Windows. Allows monitoring the memory used by a Scrapy process and: 1, send a notification e-mail when it exceeds a certain value 2. terminate the Scrapy process when it exceeds a certain value The notification e-mails can be triggered when a certain warning value is reached (:setting:`MEMUSAGE_WARNING_MB`) and when the maximum value is reached (:setting:`MEMUSAGE_LIMIT_MB`) which will also cause the Scrapy process to be terminated. This extension is enabled by the :setting:`MEMUSAGE_ENABLED` setting and can be configured with the following settings: * :setting:`MEMUSAGE_LIMIT_MB` * :setting:`MEMUSAGE_WARNING_MB` * :setting:`MEMUSAGE_NOTIFY_MAIL` * :setting:`MEMUSAGE_REPORT` Memory debugger extension ~~~~~~~~~~~~~~~~~~~~~~~~~ .. module:: scrapy.contrib.memdebug :synopsis: Memory debugger extension .. class:: scrapy.contrib.memdebug.MemoryDebugger An extension for debugging memory usage. It collects information about: * objects uncollected by the Python garbage collector * libxml2 memory leaks * objects left alive that shouldn't. For more info, see :ref:`topics-leaks-trackrefs` To enable this extension, turn on the :setting:`MEMDEBUG_ENABLED` setting. The info will be stored in the stats. Close spider extension ~~~~~~~~~~~~~~~~~~~~~~ .. module:: scrapy.contrib.closespider :synopsis: Close spider extension .. class:: scrapy.contrib.closespider.CloseSpider Closes a spider automatically when some conditions are met, using a specific closing reason for each condition. The conditions for closing a spider can be configured through the following settings: * :setting:`CLOSESPIDER_TIMEOUT` * :setting:`CLOSESPIDER_ITEMCOUNT` * :setting:`CLOSESPIDER_PAGECOUNT` * :setting:`CLOSESPIDER_ERRORCOUNT` .. setting:: CLOSESPIDER_TIMEOUT CLOSESPIDER_TIMEOUT """"""""""""""""""" Default: ``0`` An integer which specifies a number of seconds. If the spider remains open for more than that number of second, it will be automatically closed with the reason ``closespider_timeout``. If zero (or non set), spiders won't be closed by timeout. .. setting:: CLOSESPIDER_ITEMCOUNT CLOSESPIDER_ITEMCOUNT """"""""""""""""""""" Default: ``0`` An integer which specifies a number of items. If the spider scrapes more than that amount if items and those items are passed by the item pipeline, the spider will be closed with the reason ``closespider_itemcount``. If zero (or non set), spiders won't be closed by number of passed items. .. setting:: CLOSESPIDER_PAGECOUNT CLOSESPIDER_PAGECOUNT """"""""""""""""""""" .. versionadded:: 0.11 Default: ``0`` An integer which specifies the maximum number of responses to crawl. If the spider crawls more than that, the spider will be closed with the reason ``closespider_pagecount``. If zero (or non set), spiders won't be closed by number of crawled responses. .. setting:: CLOSESPIDER_ERRORCOUNT CLOSESPIDER_ERRORCOUNT """""""""""""""""""""" .. versionadded:: 0.11 Default: ``0`` An integer which specifies the maximum number of errors to receive before closing the spider. If the spider generates more than that number of errors, it will be closed with the reason ``closespider_errorcount``. If zero (or non set), spiders won't be closed by number of errors. StatsMailer extension ~~~~~~~~~~~~~~~~~~~~~ .. module:: scrapy.contrib.statsmailer :synopsis: StatsMailer extension .. class:: scrapy.contrib.statsmailer.StatsMailer This simple extension can be used to send a notification e-mail every time a domain has finished scraping, including the Scrapy stats collected. The email will be sent to all recipients specified in the :setting:`STATSMAILER_RCPTS` setting. .. module:: scrapy.contrib.debug :synopsis: Extensions for debugging Scrapy Debugging extensions -------------------- Stack trace dump extension ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. class:: scrapy.contrib.debug.StackTraceDump Dumps the stack trace and Scrapy engine status of a runnning process when a `SIGQUIT`_ or `SIGUSR2`_ signal is received. After the stack trace and engine status is dumped, the Scrapy process continues running normally. The dump is sent to standard output. This extension only works on POSIX-compliant platforms (ie. not Windows). There are at least two ways to send Scrapy the `SIGQUIT`_ signal: 1. By pressing Ctrl-\ while a Scrapy process is running (Linux only?) 2. By running this command (assuming ```` is the process id of the Scrapy process):: kill -QUIT .. _SIGUSR2: http://en.wikipedia.org/wiki/SIGUSR1_and_SIGUSR2 .. _SIGQUIT: http://en.wikipedia.org/wiki/SIGQUIT Debugger extension ~~~~~~~~~~~~~~~~~~ .. class:: scrapy.contrib.debug.Debugger Invokes a `Python debugger`_ inside a running Scrapy process when a `SIGUSR2`_ signal is received. After the debugger is exited, the Scrapy process continues running normally. For more info see `Debugging in Python`. This extension only works on POSIX-compliant platforms (ie. not Windows). .. _Python debugger: http://docs.python.org/library/pdb.html .. _Debugging in Python: http://www.ferg.org/papers/debugging_in_python.html Scrapy-0.14.4/docs/topics/commands.rst0000600000016101777760000002603111754531743017731 0ustar buildbotnogroup.. _topics-commands: ================= Command line tool ================= .. versionadded:: 0.10 Scrapy is controlled through the ``scrapy`` command-line tool, to be referred here as the "Scrapy tool" to differentiate it from their sub-commands which we just call "commands", or "Scrapy commands". The Scrapy tool provides several commands, for multiple purposes, and each one accepts a different set of arguments and options. .. _topics-project-structure: Default structure of Scrapy projects ==================================== Before delving into the command-line tool and its sub-commands, let's first understand the directory structure of a Scrapy project. Even thought it can be modified, all Scrapy projects have the same file structure by default, similar to this:: scrapy.cfg myproject/ __init__.py items.py pipelines.py settings.py spiders/ __init__.py spider1.py spider2.py ... The directory where the ``scrapy.cfg`` file resides is known as the *project root directory*. That file contains the name of the python module that defines the project settings. Here is an example:: [settings] default = myproject.settings Using the ``scrapy`` tool ========================= You can start by running the Scrapy tool with no arguments and it will print some usage help and the available commands:: Scrapy X.Y - no active project Usage: scrapy [options] [args] Available commands: crawl Start crawling a spider or URL fetch Fetch a URL using the Scrapy downloader [...] The first line will print the currently active project, if you're inside a Scrapy project. In this, it was run from outside a project. If run from inside a project it would have printed something like this:: Scrapy X.Y - project: myproject Usage: scrapy [options] [args] [...] Creating projects ----------------- The first thing you typically do with the ``scrapy`` tool is create your Scrapy project:: scrapy startproject myproject That will create a Scrapy project under the ``myproject`` directory. Next, you go inside the new project directory:: cd myproject And you're ready to use use the ``scrapy`` command to manage and control your project from there. Controlling projects -------------------- You use the ``scrapy`` tool from inside your projects to control and manage them. For example, to create a new spider:: scrapy genspider mydomain mydomain.com Some Scrapy commands (like :command:`crawl`) must be run from inside a Scrapy project. See the :ref:`commands reference ` below for more information on which commands must be run from inside projects, and which not. Also keep in mind that some commands may have slightly different behaviours when running them from inside projects. For example, the fetch command will use spider-overridden behaviours (such as custom :setting:`USER_AGENT` per-spider setting) if the url being fetched is associated with some specific spider. This is intentional, as the ``fetch`` command is meant to be used to check how spiders are downloading pages. .. _topics-commands-ref: Available tool commands ======================= This section contains a list of the available built-in commands with a description and some usage examples. Remember you can always get more info about each command by running:: scrapy -h And you can see all available commands with:: scrapy -h There are two kinds of commands, those that only work from inside a Scrapy project (Project-specific commands) and those that also work without an active Scrapy project (Global commands), though they may behave slightly different when running from inside a project (as they would use the project overriden settings). Global commands: * :command:`startproject` * :command:`settings` * :command:`runspider` * :command:`shell` * :command:`fetch` * :command:`view` * :command:`version` Project-only commands: * :command:`crawl` * :command:`list` * :command:`edit` * :command:`parse` * :command:`genspider` * :command:`server` * :command:`deploy` .. command:: startproject startproject ------------ * Syntax: ``scrapy startproject `` * Requires project: *no* Creates a new Scrapy project named ``project_name``, under the ``project_name`` directory. Usage example:: $ scrapy startproject myproject .. command:: genspider genspider --------- * Syntax: ``scrapy genspider [-t template] `` * Requires project: *yes* Create a new spider in the current project. This is just a convenient shortcut command for creating spiders based on pre-defined templates, but certainly not the only way to create spiders. You can just create the spider source code files yourself, instead of using this command. Usage example:: $ scrapy genspider -l Available templates: basic crawl csvfeed xmlfeed $ scrapy genspider -d basic from scrapy.spider import BaseSpider class $classname(BaseSpider): name = "$name" allowed_domains = ["$domain"] start_urls = ( 'http://www.$domain/', ) def parse(self, response): pass $ scrapy genspider -t basic example example.com Created spider 'example' using template 'basic' in module: mybot.spiders.example .. command:: crawl crawl ----- * Syntax: ``scrapy crawl `` * Requires project: *yes* Start crawling a spider. Usage examples:: $ scrapy crawl myspider [ ... myspider starts crawling ... ] .. command:: server server ------ * Syntax: ``scrapy server`` * Requires project: *yes* Start Scrapyd server for this project, which can be referred from the JSON API with the project name ``default``. For more info see: :ref:`topics-scrapyd`. Usage example:: $ scrapy server [ ... scrapyd starts and stays idle waiting for spiders to get scheduled ... ] To schedule spiders, use the Scrapyd JSON API. .. command:: list list ---- * Syntax: ``scrapy list`` * Requires project: *yes* List all available spiders in the current project. The output is one spider per line. Usage example:: $ scrapy list spider1 spider2 .. command:: edit edit ---- * Syntax: ``scrapy edit `` * Requires project: *yes* Edit the given spider using the editor defined in the :setting:`EDITOR` setting. This command is provided only as a convenient shortcut for the most common case, the developer is of course free to choose any tool or IDE to write and debug his spiders. Usage example:: $ scrapy edit spider1 .. command:: fetch fetch ----- * Syntax: ``scrapy fetch `` * Requires project: *no* Downloads the given URL using the Scrapy downloader and writes the contents to standard output. The interesting thing about this command is that it fetches the page how the the spider would download it. For example, if the spider has an ``USER_AGENT`` attribute which overrides the User Agent, it will use that one. So this command can be used to "see" how your spider would fetch certain page. If used outside a project, no particular per-spider behaviour would be applied and it will just use the default Scrapy downloder settings. Usage examples:: $ scrapy fetch --nolog http://www.example.com/some/page.html [ ... html content here ... ] $ scrapy fetch --nolog --headers http://www.example.com/ {'Accept-Ranges': ['bytes'], 'Age': ['1263 '], 'Connection': ['close '], 'Content-Length': ['596'], 'Content-Type': ['text/html; charset=UTF-8'], 'Date': ['Wed, 18 Aug 2010 23:59:46 GMT'], 'Etag': ['"573c1-254-48c9c87349680"'], 'Last-Modified': ['Fri, 30 Jul 2010 15:30:18 GMT'], 'Server': ['Apache/2.2.3 (CentOS)']} .. command:: view view ---- * Syntax: ``scrapy view `` * Requires project: *no* Opens the given URL in a browser, as your Scrapy spider would "see" it. Sometimes spiders see pages differently from regular users, so this can be used to check what the spider "sees" and confirm it's what you expect. Usage example:: $ scrapy view http://www.example.com/some/page.html [ ... browser starts ... ] .. command:: shell shell ----- * Syntax: ``scrapy shell [url]`` * Requires project: *no* Starts the Scrapy shell for the given URL (if given) or empty if not URL is given. See :ref:`topics-shell` for more info. Usage example:: $ scrapy shell http://www.example.com/some/page.html [ ... scrapy shell starts ... ] .. command:: parse parse ----- * Syntax: ``scrapy parse [options]`` * Requires project: *yes* Fetches the given URL and parses with the spider that handles it, using the method passed with the ``--callback`` option, or ``parse`` if not given. Supported options: * ``--callback`` or ``-c``: spider method to use as callback for parsing the response * ``--rules`` or ``-r``: use :class:`~scrapy.contrib.spiders.CrawlSpider` rules to discover the callback (ie. spider method) to use for parsing the response * ``--noitems``: don't show extracted links * ``--nolinks``: don't show scraped items Usage example:: $ scrapy parse http://www.example.com/ -c parse_item [ ... scrapy log lines crawling example.com spider ... ] # Scraped Items - callback: parse ------------------------------------------------------------ MyItem({'name': u"Example item", 'category': u'Furniture', 'length': u'12 cm'} ) .. command:: settings settings -------- * Syntax: ``scrapy settings [options]`` * Requires project: *no* Get the value of a Scrapy setting. If used inside a project it'll show the project setting value, otherwise it'll show the default Scrapy value for that setting. Example usage:: $ scrapy settings --get BOT_NAME scrapybot $ scrapy settings --get DOWNLOAD_DELAY 0 .. command:: runspider runspider --------- * Syntax: ``scrapy runspider `` * Requires project: *no* Run a spider self-contained in a Python file, without having to create a project. Example usage:: $ scrapy runspider myspider.py [ ... spider starts crawling ... ] .. command:: version version ------- * Syntax: ``scrapy version [-v]`` * Requires project: *no* Prints the Scrapy version. If used with ``-v`` it also prints Python, Twisted and Platform info, which is useful for bug reports. .. command:: deploy deploy ------ .. versionadded:: 0.11 * Syntax: ``scrapy deploy [ | -l | -L ]`` * Requires project: *yes* Deploy the project into a Scrapyd server. See :ref:`topics-deploying`. Custom project commands ======================= You can also add your custom project commands by using the :setting:`COMMANDS_MODULE` setting. See the Scrapy commands in `scrapy/commands`_ for examples on how to implement your commands. .. _scrapy/commands: https://github.com/scrapy/scrapy/blob/master/scrapy/commands .. setting:: COMMANDS_MODULE COMMANDS_MODULE --------------- Default: ``''`` (empty string) A module to use for looking custom Scrapy commands. This is used to add custom commands for your Scrapy project. Example:: COMMANDS_MODULE = 'mybot.commands' Scrapy-0.14.4/docs/topics/feed-exports.rst0000600000016101777760000001603311754531743020536 0ustar buildbotnogroup.. _topics-feed-exports: ============ Feed exports ============ .. versionadded:: 0.10 One of the most frequently required features when implementing scrapers is being able to store the scraped data properly and, quite often, that means generating a "export file" with the scraped data (commonly called "export feed") to be consumed by other systems. Scrapy provides this functionality out of the box with the Feed Exports, which allows you to generate a feed with the scraped items, using multiple serialization formats and storage backends. .. _topics-feed-format: Serialization formats ===================== For serializing the scraped data, the feed exports use the :ref:`Item exporters ` and these formats are supported out of the box: * :ref:`topics-feed-format-json` * :ref:`topics-feed-format-jsonlines` * :ref:`topics-feed-format-csv` * :ref:`topics-feed-format-xml` But you can also extend the supported format through the :setting:`FEED_EXPORTERS` setting. .. _topics-feed-format-json: JSON ---- * :setting:`FEED_FORMAT`: ``json`` * Exporter used: :class:`~scrapy.contrib.exporter.JsonItemExporter` * See :ref:`this warning ` if you're using JSON with large feeds .. _topics-feed-format-jsonlines: JSON lines ---------- * :setting:`FEED_FORMAT`: ``jsonlines`` * Exporter used: :class:`~scrapy.contrib.exporter.JsonLinesItemExporter` .. _topics-feed-format-csv: CSV --- * :setting:`FEED_FORMAT`: ``csv`` * Exporter used: :class:`~scrapy.contrib.exporter.CsvItemExporter` .. _topics-feed-format-xml: XML --- * :setting:`FEED_FORMAT`: ``xml`` * Exporter used: :class:`~scrapy.contrib.exporter.XmlItemExporter` .. _topics-feed-format-pickle: Pickle ------ * :setting:`FEED_FORMAT`: ``pickle`` * Exporter used: :class:`~scrapy.contrib.exporter.PickleItemExporter` .. _topics-feed-format-marshal: Marshal ------- * :setting:`FEED_FORMAT`: ``marshal`` * Exporter used: :class:`~scrapy.contrib.exporter.MarshalItemExporter` .. _topics-feed-storage: Storages ======== When using the feed exports you define where to store the feed using a URI_ (through the :setting:`FEED_URI` setting). The feed exports supports multiple storage backend types which are defined by the URI scheme. The storages backends supported out of the box are: * :ref:`topics-feed-storage-fs` * :ref:`topics-feed-storage-ftp` * :ref:`topics-feed-storage-s3` (requires boto_) * :ref:`topics-feed-storage-stdout` Some storage backends may be unavailable if the required external libraries are not available. For example, the S3 backend is only available if the boto_ library is installed. .. _topics-feed-uri-params: Storage URI parameters ====================== The storage URI can also contain parameters that get replaced when the feed is being created. These parameters are: * ``%(time)s`` - gets replaced by a timestamp when the feed is being created * ``%(name)s`` - gets replaced by the spider name Any other named parmeter gets replaced by the spider attribute of the same name. For example, ``%(site_id)s`` would get replaced by the ``spider.site_id`` attribute the moment the feed is being created. Here are some examples to illustrate: * Store in FTP using one directory per spider: * ``ftp://user:password@ftp.example.com/scraping/feeds/%(name)s/%(time)s.json`` * Store in S3 using one directory per spider: * ``s3://mybucket/scraping/feeds/%(name)s/%(time)s.json`` .. _topics-feed-storage-backends: Storage backends ================ .. _topics-feed-storage-fs: Local filesystem ---------------- The feeds are stored in the local filesystem. * URI scheme: ``file`` * Example URI: ``file:///tmp/export.csv`` * Required external libraries: none Note that for the local filesystem storage (only) you can omit the scheme if you specify an absolute path like ``/tmp/export.csv``. This only works on Unix systems though. .. _topics-feed-storage-ftp: FTP --- The feeds are stored in a FTP server. * URI scheme: ``ftp`` * Example URI: ``ftp://user:pass@ftp.example.com/path/to/export.csv`` * Required external libraries: none .. _topics-feed-storage-s3: S3 -- The feeds are stored on `Amazon S3`_. * URI scheme: ``s3`` * Example URIs: * ``s3://mybucket/path/to/export.csv`` * ``s3://aws_key:aws_secret@mybucket/path/to/export.csv`` * Required external libraries: `boto`_ The AWS credentials can be passed as user/password in the URI, or they can be passed through the following settings: * :setting:`AWS_ACCESS_KEY_ID` * :setting:`AWS_SECRET_ACCESS_KEY` .. _topics-feed-storage-stdout: Standard output --------------- The feeds are written to the standard output of the Scrapy process. * URI scheme: ``stdout`` * Example URI: ``stdout:`` * Required external libraries: none Settings ======== These are the settings used for configuring the feed exports: * :setting:`FEED_URI` (mandatory) * :setting:`FEED_FORMAT` * :setting:`FEED_STORAGES` * :setting:`FEED_EXPORTERS` * :setting:`FEED_STORE_EMPTY` .. currentmodule:: scrapy.contrib.feedexport .. setting:: FEED_URI FEED_URI -------- Default: ``None`` The URI of the export feed. See :ref:`topics-feed-storage-backends` for supported URI schemes. This setting is required for enabling the feed exports. .. setting:: FEED_FORMAT FEED_FORMAT ----------- The serialization format to be used for the feed. See :ref:`topics-feed-format` for possible values. .. setting:: FEED_STORE_EMPTY FEED_STORE_EMPTY ---------------- Default: ``False`` Whether to export empty feeds (ie. feeds with no items). .. setting:: FEED_STORAGES FEED_STORAGES ------------- Default:: ``{}`` A dict containing additional feed storage backends supported by your project. The keys are URI schemes and the values are paths to storage classes. .. setting:: FEED_STORAGES_BASE FEED_STORAGES_BASE ------------------ Default:: { '': 'scrapy.contrib.feedexport.FileFeedStorage', 'file': 'scrapy.contrib.feedexport.FileFeedStorage', 'stdout': 'scrapy.contrib.feedexport.StdoutFeedStorage', 's3': 'scrapy.contrib.feedexport.S3FeedStorage', 'ftp': 'scrapy.contrib.feedexport.FTPFeedStorage', } A dict containing the built-in feed storage backends supported by Scrapy. .. setting:: FEED_EXPORTERS FEED_EXPORTERS -------------- Default:: ``{}`` A dict containing additional exporters supported by your project. The keys are URI schemes and the values are paths to :ref:`Item exporter ` classes. .. setting:: FEED_EXPORTERS_BASE FEED_EXPORTERS_BASE ------------------- Default:: FEED_EXPORTERS_BASE = { 'json': 'scrapy.contrib.exporter.JsonItemExporter', 'jsonlines': 'scrapy.contrib.exporter.JsonLinesItemExporter', 'csv': 'scrapy.contrib.exporter.CsvItemExporter', 'xml': 'scrapy.contrib.exporter.XmlItemExporter', 'marshal': 'scrapy.contrib.exporter.MarshalItemExporter', } A dict containing the built-in feed exporters supported by Scrapy. .. _URI: http://en.wikipedia.org/wiki/Uniform_Resource_Identifier .. _Amazon S3: http://aws.amazon.com/s3/ .. _boto: http://code.google.com/p/boto/ Scrapy-0.14.4/docs/topics/jobs.rst0000600000016101777760000000627211754531743017072 0ustar buildbotnogroup.. _topics-jobs: ================================= Jobs: pausing and resuming crawls ================================= Sometimes, for big sites, it's desirable to pause crawls and be able to resume them later. Scrapy supports this functionality out of the box by providing the following facilities: * a scheduler that persists scheduled requests on disk * a duplicates filter that persists visited requests on disk * an extension that keeps some spider state (key/value pairs) persistent between batches Job directory ============= To enable persistence support you just need to define a *job directory* through the ``JOBDIR`` setting. This directory will be for storing all required data to keep the state of a single job (ie. a spider run). It's important to note that this directory must not be shared by different spiders, or even different jobs/runs of the same spider, as it's meant to be used for storing the state of a *single* job. How to use it ============= To start a spider with persistence supported enabled, run it like this:: scrapy crawl somespider -s JOBDIR=crawls/somespider-1 Then, you can stop the spider safely at any time (by pressing Ctrl-C or sending a signal), and resume it later by issuing the same command:: scrapy crawl somespider -s JOBDIR=crawls/somespider-1 Keeping persitent state between batches ======================================= Sometimes you'll want to keep some persistent spider state between pause/resume batches. You can use the ``spider.state`` attribute for that, which should be a dict. There's a built-in extension that takes care of serializing, storing and loading that attribute from the job directory, when the spider starts and stops. Here's an example of a callback that uses the spider state (other spider code is ommited for brevity):: def parse_item(self, response): # parse item here self.state['items_count'] = self.state.get('items_count', 0) + 1 Persistence gotchas =================== There are a few things to keep in mind if you want to be able to use the Scrapy persistence support: Cookies expiration ------------------ Cookies may expire. So, if you don't resume your spider quickly the requests scheduled may no longer work. This won't be an issue if you spider doesn't rely on cookies. Request serialization --------------------- Requests must be serializable by the `pickle` module, in order for persistence to work, so you should make sure that your requests are serializable. The most common issue here is to use ``lambda`` functions on request callbacks that can't be persisted. So, for example, this won't work:: def some_callback(self, response): somearg = 'test' return Request('http://www.example.com', callback=lambda r: self.other_callback(r, somearg)) def other_callback(self, response, somearg): print "the argument passed is:", somearg But this will:: def some_callback(self, response): somearg = 'test' return Request('http://www.example.com', meta={'somearg': somearg}) def other_callback(self, response): somearg = response.meta['somearg'] print "the argument passed is:", somearg .. _pickle: http://docs.python.org/library/pickle.html Scrapy-0.14.4/docs/topics/leaks.rst0000600000016101777760000003001611754531743017225 0ustar buildbotnogroup.. _topics-leaks: ====================== Debugging memory leaks ====================== In Scrapy, objects such as Requests, Responses and Items have a finite lifetime: they are created, used for a while, and finally destroyed. From all those objects, the Request is probably the one with the longest lifetime, as it stays waiting in the Scheduler queue until it's time to process it. For more info see :ref:`topics-architecture`. As these Scrapy objects have a (rather long) lifetime, there is always the risk of accumulating them in memory without releasing them properly and thus causing what is known as a "memory leak". To help debugging memory leaks, Scrapy provides a built-in mechanism for tracking objects references called :ref:`trackref `, and you can also use a third-party library called :ref:`Guppy ` for more advanced memory debugging (see below for more info). Both mechanisms must be used from the :ref:`Telnet Console `. Common causes of memory leaks ============================= It happens quite often (sometimes by accident, sometimes on purpose) that the Scrapy developer passes objects referenced in Requests (for example, using the :attr:`~scrapy.http.Request.meta` attribute or the request callback function) and that effectively bounds the lifetime of those referenced objects to the lifetime of the Request. This is, by far, the most common cause of memory leaks in Scrapy projects, and a quite difficult one to debug for newcomers. In big projects, the spiders are typically written by different people and some of those spiders could be "leaking" and thus affecting the rest of the other (well-written) spiders when they get to run concurrently, which, in turn, affects the whole crawling process. At the same time, it's hard to avoid the reasons that cause these leaks without restricting the power of the framework, so we have decided not to restrict the functionally but provide useful tools for debugging these leaks, which quite often consist in an answer to the question: *which spider is leaking?*. The leak could also come from a custom middleware, pipeline or extension that you have written, if you are not releasing the (previously allocated) resources properly. For example, if you're allocating resources on :signal:`spider_opened` but not releasing them on :signal:`spider_closed`. .. _topics-leaks-trackrefs: Debugging memory leaks with ``trackref`` ======================================== ``trackref`` is a module provided by Scrapy to debug the most common cases of memory leaks. It basically tracks the references to all live Requests, Responses, Item and Selector objects. To activate the ``trackref`` module, enable the :setting:`TRACK_REFS` setting. It only imposes a minor performance impact, so it should be OK to use it, even in production environments. Once you have ``trackref`` enabled, you can enter the telnet console and inspect how many objects (of the classes mentioned above) are currently alive using the ``prefs()`` function which is an alias to the :func:`~scrapy.utils.trackref.print_live_refs` function:: telnet localhost 6023 >>> prefs() Live References ExampleSpider 1 oldest: 15s ago HtmlResponse 10 oldest: 1s ago XPathSelector 2 oldest: 0s ago FormRequest 878 oldest: 7s ago As you can see, that report also shows the "age" of the oldest object in each class. If you do have leaks, chances are you can figure out which spider is leaking by looking at the oldest request or response. You can get the oldest object of each class using the :func:`~scrapy.utils.trackref.get_oldest` function like this (from the telnet console). Which objects are tracked? -------------------------- The objects tracked by ``trackrefs`` are all from these classes (and all its subclasses): * ``scrapy.http.Request`` * ``scrapy.http.Response`` * ``scrapy.item.Item`` * ``scrapy.selector.XPathSelector`` * ``scrapy.spider.BaseSpider`` * ``scrapy.selector.document.Libxml2Document`` A real example -------------- Let's see a concrete example of an hypothetical case of memory leaks. Suppose we have some spider with a line similar to this one:: return Request("http://www.somenastyspider.com/product.php?pid=%d" % product_id, callback=self.parse, meta={referer: response}") That line is passing a response reference inside a request which effectively ties the response lifetime to the requests' one, and that would definitely cause memory leaks. Let's see how we can discover which one is the nasty spider (without knowing it a-priori, of course) by using the ``trackref`` tool. After the crawler is running for a few minutes and we notice its memory usage has grown a lot, we can enter its telnet console and check the live references:: >>> prefs() Live References SomenastySpider 1 oldest: 15s ago HtmlResponse 3890 oldest: 265s ago XPathSelector 2 oldest: 0s ago Request 3878 oldest: 250s ago The fact that there are so many live responses (and that they're so old) is definitely suspicious, as responses should have a relatively short lifetime compared to Requests. So let's check the oldest response:: >>> from scrapy.utils.trackref import get_oldest >>> r = get_oldest('HtmlResponse') >>> r.url 'http://www.somenastyspider.com/product.php?pid=123' There it is. By looking at the URL of the oldest response we can see it belongs to the ``somenastyspider.com`` spider. We can now go and check the code of that spider to discover the nasty line that is generating the leaks (passing response references inside requests). If you want to iterate over all objects, instead of getting the oldest one, you can use the :func:`iter_all` function:: >>> from scrapy.utils.trackref import iter_all >>> [r.url for r in iter_all('HtmlResponse')] ['http://www.somenastyspider.com/product.php?pid=123', 'http://www.somenastyspider.com/product.php?pid=584', ... Too many spiders? ----------------- If your project has too many spiders, the output of ``prefs()`` can be difficult to read. For this reason, that function has a ``ignore`` argument which can be used to ignore a particular class (and all its subclases). For example, using:: >>> from scrapy.spider import BaseSpider >>> prefs(ignore=BaseSpider) Won't show any live references to spiders. .. module:: scrapy.utils.trackref :synopsis: Track references of live objects scrapy.utils.trackref module ---------------------------- Here are the functions available in the :mod:`~scrapy.utils.trackref` module. .. class:: object_ref Inherit from this class (instead of object) if you want to track live instances with the ``trackref`` module. .. function:: print_live_refs(class_name, ignore=NoneType) Print a report of live references, grouped by class name. :param ignore: if given, all objects from the specified class (or tuple of classes) will be ignored. :type ignore: class or classes tuple .. function:: get_oldest(class_name) Return the oldest object alive with the given class name, or ``None`` if none is found. Use :func:`print_live_refs` first to get a list of all tracked live objects per class name. .. function:: iter_all(class_name) Return an iterator over all objects alive with the given class name, or ``None`` if none is found. Use :func:`print_live_refs` first to get a list of all tracked live objects per class name. .. _topics-leaks-guppy: Debugging memory leaks with Guppy ================================= ``trackref`` provides a very convenient mechanism for tracking down memory leaks, but it only keeps track of the objects that are more likely to cause memory leaks (Requests, Responses, Items, and Selectors). However, there are other cases where the memory leaks could come from other (more or less obscure) objects. If this is your case, and you can't find your leaks using ``trackref``, you still have another resource: the `Guppy library`_. .. _Guppy library: http://pypi.python.org/pypi/guppy If you use ``setuptools``, you can install Guppy with the following command:: easy_install guppy .. _setuptools: http://pypi.python.org/pypi/setuptools The telnet console also comes with a built-in shortcut (``hpy``) for accessing Guppy heap objects. Here's an example to view all Python objects available in the heap using Guppy:: >>> x = hpy.heap() >>> x.bytype Partition of a set of 297033 objects. Total size = 52587824 bytes. Index Count % Size % Cumulative % Type 0 22307 8 16423880 31 16423880 31 dict 1 122285 41 12441544 24 28865424 55 str 2 68346 23 5966696 11 34832120 66 tuple 3 227 0 5836528 11 40668648 77 unicode 4 2461 1 2222272 4 42890920 82 type 5 16870 6 2024400 4 44915320 85 function 6 13949 5 1673880 3 46589200 89 types.CodeType 7 13422 5 1653104 3 48242304 92 list 8 3735 1 1173680 2 49415984 94 _sre.SRE_Pattern 9 1209 0 456936 1 49872920 95 scrapy.http.headers.Headers <1676 more rows. Type e.g. '_.more' to view.> You can see that most space is used by dicts. Then, if you want to see from which attribute those dicts are referenced, you could do:: >>> x.bytype[0].byvia Partition of a set of 22307 objects. Total size = 16423880 bytes. Index Count % Size % Cumulative % Referred Via: 0 10982 49 9416336 57 9416336 57 '.__dict__' 1 1820 8 2681504 16 12097840 74 '.__dict__', '.func_globals' 2 3097 14 1122904 7 13220744 80 3 990 4 277200 2 13497944 82 "['cookies']" 4 987 4 276360 2 13774304 84 "['cache']" 5 985 4 275800 2 14050104 86 "['meta']" 6 897 4 251160 2 14301264 87 '[2]' 7 1 0 196888 1 14498152 88 "['moduleDict']", "['modules']" 8 672 3 188160 1 14686312 89 "['cb_kwargs']" 9 27 0 155016 1 14841328 90 '[1]' <333 more rows. Type e.g. '_.more' to view.> As you can see, the Guppy module is very powerful but also requires some deep knowledge about Python internals. For more info about Guppy, refer to the `Guppy documentation`_. .. _Guppy documentation: http://guppy-pe.sourceforge.net/ .. _topics-leaks-without-leaks: Leaks without leaks =================== Sometimes, you may notice that the memory usage of your Scrapy process will only increase, but never decrease. Unfortunately, this could happen even though neither Scrapy nor your project are leaking memory. This is due to a (not so well) known problem of Python, which may not return released memory to the operating system in some cases. For more information on this issue see: * `Python Memory Management `_ * `Python Memory Management Part 2 `_ * `Python Memory Management Part 3 `_ The improvements proposed by Evan Jones, which are detailed in `this paper`_, got merged in Python 2.5, but this only reduces the problem, it doesn't fix it completely. To quote the paper: *Unfortunately, this patch can only free an arena if there are no more objects allocated in it anymore. This means that fragmentation is a large issue. An application could have many megabytes of free memory, scattered throughout all the arenas, but it will be unable to free any of it. This is a problem experienced by all memory allocators. The only way to solve it is to move to a compacting garbage collector, which is able to move objects in memory. This would require significant changes to the Python interpreter.* This problem will be fixed in future Scrapy releases, where we plan to adopt a new process model and run spiders in a pool of recyclable sub-processes. .. _this paper: http://evanjones.ca/memoryallocator/ Scrapy-0.14.4/docs/topics/loaders.rst0000600000016101777760000006030611754531743017564 0ustar buildbotnogroup.. _topics-loaders: ============ Item Loaders ============ .. module:: scrapy.contrib.loader :synopsis: Item Loader class Item Loaders provide a convenient mechanism for populating scraped :ref:`Items `. Even though Items can be populated using their own dictionary-like API, the Item Loaders provide a much more convenient API for populating them from a scraping process, by automating some common tasks like parsing the raw extracted data before assigning it. In other words, :ref:`Items ` provide the *container* of scraped data, while Item Loaders provide the mechanism for *populating* that container. Item Loaders are designed to provide a flexible, efficient and easy mechanism for extending and overriding different field parsing rules, either by spider, or by source format (HTML, XML, etc) without becoming a nightmare to maintain. Using Item Loaders to populate items ==================================== To use an Item Loader, you must first instantiate it. You can either instantiate it with an dict-like object (e.g. Item or dict) or without one, in which case an Item is automatically instantiated in the Item Loader constructor using the Item class specified in the :attr:`ItemLoader.default_item_class` attribute. Then, you start collecting values into the Item Loader, typically using :ref:`XPath Selectors `. You can add more than one value to the same item field; the Item Loader will know how to "join" those values later using a proper processing function. Here is a typical Item Loader usage in a :ref:`Spider `, using the :ref:`Product item ` declared in the :ref:`Items chapter `:: from scrapy.contrib.loader import XPathItemLoader from myproject.items import Product def parse(self, response): l = XPathItemLoader(item=Product(), response=response) l.add_xpath('name', '//div[@class="product_name"]') l.add_xpath('name', '//div[@class="product_title"]') l.add_xpath('price', '//p[@id="price"]') l.add_xpath('stock', '//p[@id="stock"]') l.add_value('last_updated', 'today') # you can also use literal values return l.load_item() By quickly looking at that code, we can see the ``name`` field is being extracted from two different XPath locations in the page: 1. ``//div[@class="product_name"]`` 2. ``//div[@class="product_title"]`` In other words, data is being collected by extracting it from two XPath locations, using the :meth:`~XPathItemLoader.add_xpath` method. This is the data that will be assigned to the ``name`` field later. Afterwards, similar calls are used for ``price`` and ``stock`` fields, and finally the ``last_update`` field is populated directly with a literal value (``today``) using a different method: :meth:`~ItemLoader.add_value`. Finally, when all data is collected, the :meth:`ItemLoader.load_item` method is called which actually populates and returns the item populated with the data previously extracted and collected with the :meth:`~XPathItemLoader.add_xpath` and :meth:`~ItemLoader.add_value` calls. .. _topics-loaders-processors: Input and Output processors =========================== An Item Loader contains one input processor and one output processor for each (item) field. The input processor processes the extracted data as soon as it's received (through the :meth:`~XPathItemLoader.add_xpath` or :meth:`~ItemLoader.add_value` methods) and the result of the input processor is collected and kept inside the ItemLoader. After collecting all data, the :meth:`ItemLoader.load_item` method is called to populate and get the populated :class:`~scrapy.item.Item` object. That's when the output processor is called with the data previously collected (and processed using the input processor). The result of the output processor is the final value that gets assigned to the item. Let's see an example to illustrate how the input and output processors are called for a particular field (the same applies for any other field):: l = XPathItemLoader(Product(), some_xpath_selector) l.add_xpath('name', xpath1) # (1) l.add_xpath('name', xpath2) # (2) l.add_value('name', 'test') # (3) return l.load_item() # (4) So what happens is: 1. Data from ``xpath1`` is extracted, and passed through the *input processor* of the ``name`` field. The result of the input processor is collected and kept in the Item Loader (but not yet assigned to the item). 2. Data from ``xpath2`` is extracted, and passed through the same *input processor* used in (1). The result of the input processor is appended to the data collected in (1) (if any). 3. This case is similar to the previous ones, except that the value to be collected is assigned directly, instead of being extracted from a XPath. However, the value is still passed through the input processors. In this case, since the value is not iterable it is converted to an iterable of a single element before passing it to the input processor, because input processor always receive iterables. 4. The data collected in (1) and (2) is passed through the *output processor* of the ``name`` field. The result of the output processor is the value assigned to the ``name`` field in the item. It's worth noticing that processors are just callable objects, which are called with the data to be parsed, and return a parsed value. So you can use any function as input or output processor. The only requirement is that they must accept one (and only one) positional argument, which will be an iterator. .. note:: Both input and output processors must receive an iterator as their first argument. The output of those functions can be anything. The result of input processors will be appended to an internal list (in the Loader) containing the collected values (for that field). The result of the output processors is the value that will be finally assigned to the item. The other thing you need to keep in mind is that the values returned by input processors are collected internally (in lists) and then passed to output processors to populate the fields. Last, but not least, Scrapy comes with some :ref:`commonly used processors ` built-in for convenience. Declaring Item Loaders ====================== Item Loaders are declared like Items, by using a class definition syntax. Here is an example:: from scrapy.contrib.loader import ItemLoader from scrapy.contrib.loader.processor import TakeFirst, MapCompose, Join class ProductLoader(ItemLoader): default_output_processor = TakeFirst() name_in = MapCompose(unicode.title) name_out = Join() price_in = MapCompose(unicode.strip) # ... As you can see, input processors are declared using the ``_in`` suffix while output processors are declared using the ``_out`` suffix. And you can also declare a default input/output processors using the :attr:`ItemLoader.default_input_processor` and :attr:`ItemLoader.default_output_processor` attributes. .. _topics-loaders-processors-declaring: Declaring Input and Output Processors ===================================== As seen in the previous section, input and output processors can be declared in the Item Loader definition, and it's very common to declare input processors this way. However, there is one more place where you can specify the input and output processors to use: in the :ref:`Item Field ` metadata. Here is an example:: from scrapy.item import Item, Field from scrapy.contrib.loader.processor import MapCompose, Join, TakeFirst from scrapy.utils.markup import remove_entities from myproject.utils import filter_prices class Product(Item): name = Field( input_processor=MapCompose(remove_entities), output_processor=Join(), ) price = Field( default=0, input_processor=MapCompose(remove_entities, filter_prices), output_processor=TakeFirst(), ) The precedence order, for both input and output processors, is as follows: 1. Item Loader field-specific attributes: ``field_in`` and ``field_out`` (most precedence) 2. Field metadata (``input_processor`` and ``output_processor`` key) 3. Item Loader defaults: :meth:`ItemLoader.default_input_processor` and :meth:`ItemLoader.default_output_processor` (least precedence) See also: :ref:`topics-loaders-extending`. .. _topics-loaders-context: Item Loader Context =================== The Item Loader Context is a dict of arbitrary key/values which is shared among all input and output processors in the Item Loader. It can be passed when declaring, instantiating or using Item Loader. They are used to modify the behaviour of the input/output processors. For example, suppose you have a function ``parse_length`` which receives a text value and extracts a length from it:: def parse_length(text, loader_context): unit = loader_context.get('unit', 'm') # ... length parsing code goes here ... return parsed_length By accepting a ``loader_context`` argument the function is explicitly telling the Item Loader that is able to receive an Item Loader context, so the Item Loader passes the currently active context when calling it, and the processor function (``parse_length`` in this case) can thus use them. There are several ways to modify Item Loader context values: 1. By modifying the currently active Item Loader context (:attr:`~ItemLoader.context` attribute):: loader = ItemLoader(product) loader.context['unit'] = 'cm' 2. On Item Loader instantiation (the keyword arguments of Item Loader constructor are stored in the Item Loader context):: loader = ItemLoader(product, unit='cm') 3. On Item Loader declaration, for those input/output processors that support instatiating them with a Item Loader context. :class:`~processor.MapCompose` is one of them:: class ProductLoader(ItemLoader): length_out = MapCompose(parse_length, unit='cm') ItemLoader objects ================== .. class:: ItemLoader([item], \**kwargs) Return a new Item Loader for populating the given Item. If no item is given, one is instantiated automatically using the class in :attr:`default_item_class`. The item and the remaining keyword arguments are assigned to the Loader context (accesible through the :attr:`context` attribute). .. method:: get_value(value, \*processors, \**kwargs) Process the given ``value`` by the given ``processors`` and keyword arguments. Available keyword arguments: :param re: a regular expression to use for extracting data from the given value using :meth:`~scrapy.utils.misc.extract_regex` method, applied before processors :type re: str or compiled regex Examples:: >>> from scrapy.contrib.loader.processor import TakeFirst >>> loader.get_value(u'name: foo', TakeFirst(), unicode.upper, re='name: (.+)') 'FOO` .. method:: add_value(field_name, value, \*processors, \**kwargs) Process and then add the given ``value`` for the given field. The value is first passed through :meth:`get_value` by giving the ``processors`` and ``kwargs``, and then passed through the :ref:`field input processor ` and its result appened to the data collected for that field. If the field already contains collected data, the new data is added. The given ``field_name`` can be ``None``, in which case values for multiple fields may be added. And the processed value should be a dict with field_name mapped to values. Examples:: loader.add_value('name', u'Color TV') loader.add_value('colours', [u'white', u'blue']) loader.add_value('length', u'100') loader.add_value('name', u'name: foo', TakeFirst(), re='name: (.+)') loader.add_value(None, {'name': u'foo', 'sex': u'male'}) .. method:: replace_value(field_name, value) Similar to :meth:`add_value` but replaces the collected data with the new value instead of adding it. .. method:: load_item() Populate the item with the data collected so far, and return it. The data collected is first passed through the :ref:`output processors ` to get the final value to assign to each item field. .. method:: get_collected_values(field_name) Return the collected values for the given field. .. method:: get_output_value(field_name) Return the collected values parsed using the output processor, for the given field. This method doesn't populate or modify the item at all. .. method:: get_input_processor(field_name) Return the input processor for the given field. .. method:: get_output_processor(field_name) Return the output processor for the given field. .. attribute:: item The :class:`~scrapy.item.Item` object being parsed by this Item Loader. .. attribute:: context The currently active :ref:`Context ` of this Item Loader. .. attribute:: default_item_class An Item class (or factory), used to instantiate items when not given in the constructor. .. attribute:: default_input_processor The default input processor to use for those fields which don't specify one. .. attribute:: default_output_processor The default output processor to use for those fields which don't specify one. .. class:: XPathItemLoader([item, selector, response], \**kwargs) The :class:`XPathItemLoader` class extends the :class:`ItemLoader` class providing more convenient mechanisms for extracting data from web pages using :ref:`XPath selectors `. :class:`XPathItemLoader` objects accept two more additional parameters in their constructors: :param selector: The selector to extract data from, when using the :meth:`add_xpath` or :meth:`replace_xpath` method. :type selector: :class:`~scrapy.selector.XPathSelector` object :param response: The response used to construct the selector using the :attr:`default_selector_class`, unless the selector argument is given, in which case this argument is ignored. :type response: :class:`~scrapy.http.Response` object .. method:: get_xpath(xpath, \*processors, \**kwargs) Similar to :meth:`ItemLoader.get_value` but receives an XPath instead of a value, which is used to extract a list of unicode strings from the selector associated with this :class:`XPathItemLoader`. :param xpath: the XPath to extract data from :type xpath: str :param re: a regular expression to use for extracting data from the selected XPath region :type re: str or compiled regex Examples:: # HTML snippet:

    Color TV

    loader.get_xpath('//p[@class="product-name"]') # HTML snippet:

    the price is $1200

    loader.get_xpath('//p[@id="price"]', TakeFirst(), re='the price is (.*)') .. method:: add_xpath(field_name, xpath, \*processors, \**kwargs) Similar to :meth:`ItemLoader.add_value` but receives an XPath instead of a value, which is used to extract a list of unicode strings from the selector associated with this :class:`XPathItemLoader`. See :meth:`get_xpath` for ``kwargs``. :param xpath: the XPath to extract data from :type xpath: str Examples:: # HTML snippet:

    Color TV

    loader.add_xpath('name', '//p[@class="product-name"]') # HTML snippet:

    the price is $1200

    loader.add_xpath('price', '//p[@id="price"]', re='the price is (.*)') .. method:: replace_xpath(field_name, xpath, \*processors, \**kwargs) Similar to :meth:`add_xpath` but replaces collected data instead of adding it. .. attribute:: default_selector_class The class used to construct the :attr:`selector` of this :class:`XPathItemLoader`, if only a response is given in the constructor. If a selector is given in the constructor this attribute is ignored. This attribute is sometimes overridden in subclasses. .. attribute:: selector The :class:`~scrapy.selector.XPathSelector` object to extract data from. It's either the selector given in the constructor or one created from the response given in the constructor using the :attr:`default_selector_class`. This attribute is meant to be read-only. .. _topics-loaders-extending: Reusing and extending Item Loaders ================================== As your project grows bigger and acquires more and more spiders, maintenance becomes a fundamental problem, specially when you have to deal with many different parsing rules for each spider, having a lot of exceptions, but also wanting to reuse the common processors. Item Loaders are designed to ease the maintenance burden of parsing rules, without losing flexibility and, at the same time, providing a convenient mechanism for extending and overriding them. For this reason Item Loaders support traditional Python class inheritance for dealing with differences of specific spiders (or groups of spiders). Suppose, for example, that some particular site encloses their product names in three dashes (ie. ``---Plasma TV---``) and you don't want to end up scraping those dashes in the final product names. Here's how you can remove those dashes by reusing and extending the default Product Item Loader (``ProductLoader``):: from scrapy.contrib.loader.processor import MapCompose from myproject.ItemLoaders import ProductLoader def strip_dashes(x): return x.strip('-') class SiteSpecificLoader(ProductLoader): name_in = MapCompose(strip_dashes, ProductLoader.name_in) Another case where extending Item Loaders can be very helpful is when you have multiple source formats, for example XML and HTML. In the XML version you may want to remove ``CDATA`` occurrences. Here's an example of how to do it:: from scrapy.contrib.loader.processor import MapCompose from myproject.ItemLoaders import ProductLoader from myproject.utils.xml import remove_cdata class XmlProductLoader(ProductLoader): name_in = MapCompose(remove_cdata, ProductLoader.name_in) And that's how you typically extend input processors. As for output processors, it is more common to declare them in the field metadata, as they usually depend only on the field and not on each specific site parsing rule (as input processors do). See also: :ref:`topics-loaders-processors-declaring`. There are many other possible ways to extend, inherit and override your Item Loaders, and different Item Loaders hierarchies may fit better for different projects. Scrapy only provides the mechanism; it doesn't impose any specific organization of your Loaders collection - that's up to you and your project's needs. .. _topics-loaders-available-processors: Available built-in processors ============================= .. module:: scrapy.contrib.loader.processor :synopsis: A collection of processors to use with Item Loaders Even though you can use any callable function as input and output processors, Scrapy provides some commonly used processors, which are described below. Some of them, like the :class:`MapCompose` (which is typically used as input processor) compose the output of several functions executed in order, to produce the final parsed value. Here is a list of all built-in processors: .. class:: Identity The simplest processor, which doesn't do anything. It returns the original values unchanged. It doesn't receive any constructor arguments nor accepts Loader contexts. Example:: >>> from scrapy.contrib.loader.processor import Identity >>> proc = Identity() >>> proc(['one', 'two', 'three']) ['one', 'two', 'three'] .. class:: TakeFirst Return the first non-null/non-empty value from the values received, so it's typically used as an output processor to single-valued fields. It doesn't receive any constructor arguments, nor accept Loader contexts. Example:: >>> from scrapy.contrib.loader.processor import TakeFirst >>> proc = TakeFirst() >>> proc(['', 'one', 'two', 'three']) 'one' .. class:: Join(separator=u' ') Returns the values joined with the separator given in the constructor, which defaults to ``u' '``. It doesn't accept Loader contexts. When using the default separator, this processor is equivalent to the function: ``u' '.join`` Examples:: >>> from scrapy.contrib.loader.processor import Join >>> proc = Join() >>> proc(['one', 'two', 'three']) u'one two three' >>> proc = Join('
    ') >>> proc(['one', 'two', 'three']) u'one
    two
    three' .. class:: Compose(\*functions, \**default_loader_context) A processor which is constructed from the composition of the given functions. This means that each input value of this processor is passed to the first function, and the result of that function is passed to the second function, and so on, until the last function returns the output value of this processor. By default, stop process on None value. This behaviour can be changed by passing keyword argument stop_on_none=False. Example:: >>> from scrapy.contrib.loader.processor import Compose >>> proc = Compose(lambda v: v[0], str.upper) >>> proc(['hello', 'world']) 'HELLO' Each function can optionally receive a ``loader_context`` parameter. For those which do, this processor will pass the currently active :ref:`Loader context ` through that parameter. The keyword arguments passed in the constructor are used as the default Loader context values passed to each function call. However, the final Loader context values passed to functions are overridden with the currently active Loader context accessible through the :meth:`ItemLoader.context` attribute. .. class:: MapCompose(\*functions, \**default_loader_context) A processor which is constructed from the composition of the given functions, similar to the :class:`Compose` processor. The difference with this processor is the way internal results are passed among functions, which is as follows: The input value of this processor is *iterated* and each element is passed to the first function, and the result of that function (for each element) is concatenated to construct a new iterable, which is then passed to the second function, and so on, until the last function is applied for each value of the list of values collected so far. The output values of the last function are concatenated together to produce the output of this processor. Each particular function can return a value or a list of values, which is flattened with the list of values returned by the same function applied to the other input values. The functions can also return ``None`` in which case the output of that function is ignored for further processing over the chain. This processor provides a convenient way to compose functions that only work with single values (instead of iterables). For this reason the :class:`MapCompose` processor is typically used as input processor, since data is often extracted using the :meth:`~scrapy.selector.XPathSelector.extract` method of :ref:`selectors `, which returns a list of unicode strings. The example below should clarify how it works:: >>> def filter_world(x): ... return None if x == 'world' else x ... >>> from scrapy.contrib.loader.processor import MapCompose >>> proc = MapCompose(filter_world, unicode.upper) >>> proc([u'hello', u'world', u'this', u'is', u'scrapy']) [u'HELLO, u'THIS', u'IS', u'SCRAPY'] As with the Compose processor, functions can receive Loader contexts, and constructor keyword arguments are used as default context values. See :class:`Compose` processor for more info. Scrapy-0.14.4/docs/topics/telnetconsole.rst0000600000016101777760000001461611754531743021014 0ustar buildbotnogroup.. _topics-telnetconsole: ============== Telnet Console ============== .. module:: scrapy.telnet :synopsis: The Telnet Console Scrapy comes with a built-in telnet console for inspecting and controlling a Scrapy running process. The telnet console is just a regular python shell running inside the Scrapy process, so you can do literally anything from it. The telnet console is a :ref:`built-in Scrapy extension ` which comes enabled by default, but you can also disable it if you want. For more information about the extension itself see :ref:`topics-extensions-ref-telnetconsole`. .. highlight:: none How to access the telnet console ================================ The telnet console listens in the TCP port defined in the :setting:`TELNETCONSOLE_PORT` setting, which defaults to ``6023``. To access the console you need to type:: telnet localhost 6023 >>> You need the telnet program which comes installed by default in Windows, and most Linux distros. Available variables in the telnet console ========================================= The telnet console is like a regular Python shell running inside the Scrapy process, so you can do anything from it including importing new modules, etc. However, the telnet console comes with some default variables defined for convenience: +----------------+-------------------------------------------------------------------+ | Shortcut | Description | +================+===================================================================+ | ``crawler`` | the Scrapy Crawler object (``scrapy.crawler``) | +----------------+-------------------------------------------------------------------+ | ``engine`` | the Scrapy Engine object (``scrapy.core.engine``) | +----------------+-------------------------------------------------------------------+ | ``spider`` | the spider object (only if there is a single spider opened) | +----------------+-------------------------------------------------------------------+ | ``slot`` | the engine slot (only if there is a single spider opened) | +----------------+-------------------------------------------------------------------+ | ``extensions`` | the Extension Manager (``scrapy.project.crawler.extensions``) | +----------------+-------------------------------------------------------------------+ | ``stats`` | the Stats Collector (``scrapy.stats.stats``) | +----------------+-------------------------------------------------------------------+ | ``settings`` | the Scrapy settings object (``scrapy.conf.settings``) | +----------------+-------------------------------------------------------------------+ | ``est`` | print a report of the current engine status | +----------------+-------------------------------------------------------------------+ | ``prefs`` | for memory debugging (see :ref:`topics-leaks`) | +----------------+-------------------------------------------------------------------+ | ``p`` | a shortcut to the `pprint.pprint`_ function | +----------------+-------------------------------------------------------------------+ | ``hpy`` | for memory debugging (see :ref:`topics-leaks`) | +----------------+-------------------------------------------------------------------+ .. _pprint.pprint: http://docs.python.org/library/pprint.html#pprint.pprint Telnet console usage examples ============================= Here are some example tasks you can do with the telnet console: View engine status ------------------ You can use the ``est()`` method of the Scrapy engine to quickly show its state using the telnet console:: telnet localhost 6023 >>> est() Execution engine status time()-engine.start_time : 21.3188259602 engine.is_idle() : False engine.has_capacity() : True engine.scheduler.is_idle() : False len(engine.scheduler.pending_requests) : 1 engine.downloader.is_idle() : False len(engine.downloader.slots) : 1 engine.scraper.is_idle() : False len(engine.scraper.slots) : 1 Spider: engine.spider_is_idle(spider) : False engine.slots[spider].closing : False len(engine.scheduler.pending_requests[spider]) : 11504 len(engine.downloader.slots[spider].queue) : 9 len(engine.downloader.slots[spider].active) : 17 len(engine.downloader.slots[spider].transferring) : 8 engine.downloader.slots[spider].lastseen : 1311311093.61 len(engine.scraper.slots[spider].queue) : 0 len(engine.scraper.slots[spider].active) : 0 engine.scraper.slots[spider].active_size : 0 engine.scraper.slots[spider].itemproc_size : 0 engine.scraper.slots[spider].needs_backout() : False Pause, resume and stop the Scrapy engine ---------------------------------------- To pause:: telnet localhost 6023 >>> engine.pause() >>> To resume:: telnet localhost 6023 >>> engine.unpause() >>> To stop:: telnet localhost 6023 >>> engine.stop() Connection closed by foreign host. Telnet Console signals ====================== .. signal:: update_telnet_vars .. function:: update_telnet_vars(telnet_vars) Sent just before the telnet console is opened. You can hook up to this signal to add, remove or update the variables that will be available in the telnet local namespace. In order to do that, you need to update the ``telnet_vars`` dict in your handler. :param telnet_vars: the dict of telnet variables :type telnet_vars: dict Telnet settings =============== These are the settings that control the telnet console's behaviour: .. setting:: TELNETCONSOLE_PORT TELNETCONSOLE_PORT ------------------ Default: ``[6023, 6073]`` The port range to use for the etlnet console. If set to ``None`` or ``0``, a dynamically assigned port is used. .. setting:: TELNETCONSOLE_HOST TELNETCONSOLE_HOST ------------------ Default: ``'0.0.0.0'`` The interface the telnet console should listen on Scrapy-0.14.4/docs/topics/firebug.rst0000600000016101777760000001436211754531743017557 0ustar buildbotnogroup.. _topics-firebug: ========================== Using Firebug for scraping ========================== .. note:: Google Directory, the example website used in this guide is no longer available as it `has been shut down by Google`_. The concepts in this guide are still valid though. If you want to update this guide to use a new (working) site, your contribution will be more than welcome!. See :ref:`topics-contributing` for information on how to do so. Introduction ============ This document explains how to use `Firebug`_ (a Firefox add-on) to make the scraping process easier and more fun. For other useful Firefox add-ons see :ref:`topics-firefox-addons`. There are some caveats with using Firefox add-ons to inspect pages, see :ref:`topics-firefox-livedom`. In this example, we'll show how to use `Firebug`_ to scrape data from the `Google Directory`_, which contains the same data as the `Open Directory Project`_ used in the :ref:`tutorial ` but with a different face. .. _Firebug: http://getfirebug.com .. _Google Directory: http://directory.google.com/ .. _Open Directory Project: http://www.dmoz.org Firebug comes with a very useful feature called `Inspect Element`_ which allows you to inspect the HTML code of the different page elements just by hovering your mouse over them. Otherwise you would have to search for the tags manually through the HTML body which can be a very tedious task. .. _Inspect Element: http://www.youtube.com/watch?v=-pT_pDe54aA In the following screenshot you can see the `Inspect Element`_ tool in action. .. image:: _images/firebug1.png :width: 913 :height: 600 :alt: Inspecting elements with Firebug At first sight, we can see that the directory is divided in categories, which are also divided in subcategories. However, it seems that there are more subcategories than the ones being shown in this page, so we'll keep looking: .. image:: _images/firebug2.png :width: 819 :height: 629 :alt: Inspecting elements with Firebug As expected, the subcategories contain links to other subcategories, and also links to actual websites, which is the purpose of the directory. Getting links to follow ======================= By looking at the category URLs we can see they share a pattern: http://directory.google.com/Category/Subcategory/Another_Subcategory Once we know that, we are able to construct a regular expression to follow those links. For example, the following one:: directory\.google\.com/[A-Z][a-zA-Z_/]+$ So, based on that regular expression we can create the first crawling rule:: Rule(SgmlLinkExtractor(allow='directory.google.com/[A-Z][a-zA-Z_/]+$', ), 'parse_category', follow=True, ), The :class:`~scrapy.contrib.spiders.Rule` object instructs :class:`~scrapy.contrib.spiders.CrawlSpider` based spiders how to follow the category links. ``parse_category`` will be a method of the spider which will process and extract data from those pages. This is how the spider would look so far:: from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor from scrapy.contrib.spiders import CrawlSpider, Rule class GoogleDirectorySpider(CrawlSpider): name = 'directory.google.com' allowed_domains = ['directory.google.com'] start_urls = ['http://directory.google.com/'] rules = ( Rule(SgmlLinkExtractor(allow='directory\.google\.com/[A-Z][a-zA-Z_/]+$'), 'parse_category', follow=True, ), ) def parse_category(self, response): # write the category page data extraction code here pass Extracting the data =================== Now we're going to write the code to extract data from those pages. With the help of Firebug, we'll take a look at some page containing links to websites (say http://directory.google.com/Top/Arts/Awards/) and find out how we can extract those links using :ref:`XPath selectors `. We'll also use the :ref:`Scrapy shell ` to test those XPath's and make sure they work as we expect. .. image:: _images/firebug3.png :width: 965 :height: 751 :alt: Inspecting elements with Firebug As you can see, the page markup is not very descriptive: the elements don't contain ``id``, ``class`` or any attribute that clearly identifies them, so we''ll use the ranking bars as a reference point to select the data to extract when we construct our XPaths. After using FireBug, we can see that each link is inside a ``td`` tag, which is itself inside a ``tr`` tag that also contains the link's ranking bar (in another ``td``). So we can select the ranking bar, then find its parent (the ``tr``), and then finally, the link's ``td`` (which contains the data we want to scrape). This results in the following XPath:: //td[descendant::a[contains(@href, "#pagerank")]]/following-sibling::td//a It's important to use the :ref:`Scrapy shell ` to test these complex XPath expressions and make sure they work as expected. Basically, that expression will look for the ranking bar's ``td`` element, and then select any ``td`` element who has a descendant ``a`` element whose ``href`` attribute contains the string ``#pagerank``" Of course, this is not the only XPath, and maybe not the simpler one to select that data. Another approach could be, for example, to find any ``font`` tags that have that grey colour of the links, Finally, we can write our ``parse_category()`` method:: def parse_category(self, response): hxs = HtmlXPathSelector(response) # The path to website links in directory page links = hxs.select('//td[descendant::a[contains(@href, "#pagerank")]]/following-sibling::td/font') for link in links: item = DirectoryItem() item['name'] = link.select('a/text()').extract() item['url'] = link.select('a/@href').extract() item['description'] = link.select('font[2]/text()').extract() yield item Be aware that you may find some elements which appear in Firebug but not in the original HTML, such as the typical case of ``
    `` elements. or tags which Therefer in page HTML sources may on Firebug inspects the live DOM .. _has been shut down by Google: http://searchenginewatch.com/article/2096661/Google-Directory-Has-Been-Shut-Down Scrapy-0.14.4/docs/topics/exporters.rst0000600000016101777760000003222611754531743020166 0ustar buildbotnogroup.. _topics-exporters: ============== Item Exporters ============== .. module:: scrapy.contrib.exporter :synopsis: Item Exporters Once you have scraped your Items, you often want to persist or export those items, to use the data in some other application. That is, after all, the whole purpose of the scraping process. For this purpose Scrapy provides a collection of Item Exporters for different output formats, such as XML, CSV or JSON. Using Item Exporters ==================== If you are in a hurry, and just want to use an Item Exporter to output scraped data see the :ref:`topics-feed-exports`. Otherwise, if you want to know how Item Exporters work or need more custom functionality (not covered by the default exports), continue reading below. In order to use an Item Exporter, you must instantiate it with its required args. Each Item Exporter requires different arguments, so check each exporter documentation to be sure, in :ref:`topics-exporters-reference`. After you have instantiated you exporter, you have to: 1. call the method :meth:`~BaseItemExporter.start_exporting` in order to signal the beginning of the exporting process 2. call the :meth:`~BaseItemExporter.export_item` method for each item you want to export 3. and finally call the :meth:`~BaseItemExporter.finish_exporting` to signal the end of the exporting process Here you can see an :doc:`Item Pipeline ` which uses an Item Exporter to export scraped items to different files, one per spider:: from scrapy.xlib.pydispatch import dispatcher from scrapy import signals from scrapy.contrib.exporter import XmlItemExporter class XmlExportPipeline(object): def __init__(self): dispatcher.connect(self.spider_opened, signals.spider_opened) dispatcher.connect(self.spider_closed, signals.spider_closed) self.files = {} def spider_opened(self, spider): file = open('%s_products.xml' % spider.name, 'w+b') self.files[spider] = file self.exporter = XmlItemExporter(file) self.exporter.start_exporting() def spider_closed(self, spider): self.exporter.finish_exporting() file = self.files.pop(spider) file.close() def process_item(self, item, spider): self.exporter.export_item(item) return item .. _topics-exporters-field-serialization: Serialization of item fields ============================ By default, the field values are passed unmodified to the underlying serialization library, and the decision of how to serialize them is delegated to each particular serialization library. However, you can customize how each field value is serialized *before it is passed to the serialization library*. There are two ways to customize how a field will be serialized, which are described next. .. _topics-exporters-serializers: 1. Declaring a serializer in the field -------------------------------------- You can declare a serializer in the :ref:`field metadata `. The serializer must be a callable which receives a value and returns its serialized form. Example:: from scrapy.item import Item, Field def serialize_price(value): return '$ %s' % str(value) class Product(Item): name = Field() price = Field(serializer=serialize_price) 2. Overriding the serialize_field() method ------------------------------------------ You can also override the :meth:`~BaseItemExporter.serialize` method to customize how your field value will be exported. Make sure you call the base class :meth:`~BaseItemExporter.serialize` method after your custom code. Example:: from scrapy.contrib.exporter import XmlItemExporter class ProductXmlExporter(XmlItemExporter): def serialize_field(self, field, name, value): if field == 'price': return '$ %s' % str(value) return super(Product, self).serialize_field(field, name, value) .. _topics-exporters-reference: Built-in Item Exporters reference ================================= Here is a list of the Item Exporters bundled with Scrapy. Some of them contain output examples, which assume you're exporting these two items:: Item(name='Color TV', price='1200') Item(name='DVD player', price='200') BaseItemExporter ---------------- .. class:: BaseItemExporter(fields_to_export=None, export_empty_fields=False, encoding='utf-8') This is the (abstract) base class for all Item Exporters. It provides support for common features used by all (concrete) Item Exporters, such as defining what fields to export, whether to export empty fields, or which encoding to use. These features can be configured through the constructor arguments which populate their respective instance attributes: :attr:`fields_to_export`, :attr:`export_empty_fields`, :attr:`encoding`. .. method:: export_item(item) Exports the given item. This method must be implemented in subclasses. .. method:: serialize_field(field, name, value) Return the serialized value for the given field. You can override this method (in your custom Item Exporters) if you want to control how a particular field or value will be serialized/exported. By default, this method looks for a serializer :ref:`declared in the item field ` and returns the result of applying that serializer to the value. If no serializer is found, it returns the value unchanged except for ``unicode`` values which are encoded to ``str`` using the encoding declared in the :attr:`encoding` attribute. :param field: the field being serialized :type field: :class:`~scrapy.item.Field` object :param name: the name of the field being serialized :type name: str :param value: the value being serialized .. method:: start_exporting() Signal the beginning of the exporting process. Some exporters may use this to generate some required header (for example, the :class:`XmlItemExporter`). You must call this method before exporting any items. .. method:: finish_exporting() Signal the end of the exporting process. Some exporters may use this to generate some required footer (for example, the :class:`XmlItemExporter`). You must always call this method after you have no more items to export. .. attribute:: fields_to_export A list with the name of the fields that will be exported, or None if you want to export all fields. Defaults to None. Some exporters (like :class:`CsvItemExporter`) respect the order of the fields defined in this attribute. .. attribute:: export_empty_fields Whether to include empty/unpopulated item fields in the exported data. Defaults to ``False``. Some exporters (like :class:`CsvItemExporter`) ignore this attribute and always export all empty fields. .. attribute:: encoding The encoding that will be used to encode unicode values. This only affects unicode values (which are always serialized to str using this encoding). Other value types are passed unchanged to the specific serialization library. .. highlight:: none XmlItemExporter --------------- .. class:: XmlItemExporter(file, item_element='item', root_element='items', \**kwargs) Exports Items in XML format to the specified file object. :param file: the file-like object to use for exporting the data. :param root_element: The name of root element in the exported XML. :type root_element: str :param item_element: The name of each item element in the exported XML. :type item_element: str The additional keyword arguments of this constructor are passed to the :class:`BaseItemExporter` constructor. A typical output of this exporter would be:: Color TV 1200 DVD player 200 Unless overriden in the :meth:`serialize_field` method, multi-valued fields are exported by serializing each value inside a ```` element. This is for convenience, as multi-valued fields are very common. For example, the item:: Item(name=['John', 'Doe'], age='23') Would be serialized as:: John Doe 23 CsvItemExporter --------------- .. class:: CsvItemExporter(file, include_headers_line=True, join_multivalued=',', \**kwargs) Exports Items in CSV format to the given file-like object. If the :attr:`fields_to_export` attribute is set, it will be used to define the CSV columns and their order. The :attr:`export_empty_fields` attribute has no effect on this exporter. :param file: the file-like object to use for exporting the data. :param include_headers_line: If enabled, makes the exporter output a header line with the field names taken from :attr:`BaseItemExporter.fields_to_export` or the first exported item fields. :type include_headers_line: boolean :param join_multivalued: The char (or chars) that will be used for joining multi-valued fields, if found. :type include_headers_line: str The additional keyword arguments of this constructor are passed to the :class:`BaseItemExporter` constructor, and the leftover arguments to the `csv.writer`_ constructor, so you can use any `csv.writer` constructor argument to customize this exporter. A typical output of this exporter would be:: product,price Color TV,1200 DVD player,200 .. _csv.writer: http://docs.python.org/library/csv.html#csv.writer PickleItemExporter ------------------ .. class:: PickleItemExporter(file, protocol=0, \**kwargs) Exports Items in pickle format to the given file-like object. :param file: the file-like object to use for exporting the data. :param protocol: The pickle protocol to use. :type protocol: int For more information, refer to the `pickle module documentation`_. The additional keyword arguments of this constructor are passed to the :class:`BaseItemExporter` constructor. Pickle isn't a human readable format, so no output examples are provided. .. _pickle module documentation: http://docs.python.org/library/pickle.html PprintItemExporter ------------------ .. class:: PprintItemExporter(file, \**kwargs) Exports Items in pretty print format to the specified file object. :param file: the file-like object to use for exporting the data. The additional keyword arguments of this constructor are passed to the :class:`BaseItemExporter` constructor. A typical output of this exporter would be:: {'name': 'Color TV', 'price': '1200'} {'name': 'DVD player', 'price': '200'} Longer lines (when present) are pretty-formatted. JsonItemExporter ---------------- .. class:: JsonItemExporter(file, \**kwargs) Exports Items in JSON format to the specified file-like object, writing all objects as a list of objects. The additional constructor arguments are passed to the :class:`BaseItemExporter` constructor, and the leftover arguments to the `JSONEncoder`_ constructor, so you can use any `JSONEncoder`_ constructor argument to customize this exporter. :param file: the file-like object to use for exporting the data. A typical output of this exporter would be:: [{"name": "Color TV", "price": "1200"}, {"name": "DVD player", "price": "200"}] .. _json-with-large-data: .. warning:: JSON is very simple and flexible serialization format, but it doesn't scale well for large amounts of data since incremental (aka. stream-mode) parsing is not well supported (if at all) among JSON parsers (on any language), and most of them just parse the entire object in memory. If you want the power and simplicity of JSON with a more stream-friendly format, consider using :class:`JsonLinesItemExporter` instead, or splitting the output in multiple chunks. .. _JSONEncoder: http://docs.python.org/library/json.html#json.JSONEncoder JsonLinesItemExporter --------------------- .. class:: JsonLinesItemExporter(file, \**kwargs) Exports Items in JSON format to the specified file-like object, writing one JSON-encoded item per line. The additional constructor arguments are passed to the :class:`BaseItemExporter` constructor, and the leftover arguments to the `JSONEncoder`_ constructor, so you can use any `JSONEncoder`_ constructor argument to customize this exporter. :param file: the file-like object to use for exporting the data. A typical output of this exporter would be:: {"name": "Color TV", "price": "1200"} {"name": "DVD player", "price": "200"} Unlike the one produced by :class:`JsonItemExporter`, the format produced by this exporter is well suited for serializing large amounts of data. .. _JSONEncoder: http://docs.python.org/library/json.html#json.JSONEncoder Scrapy-0.14.4/docs/topics/link-extractors.rst0000600000016101777760000001432311754531743021262 0ustar buildbotnogroup.. _topics-link-extractors: =============== Link Extractors =============== LinkExtractors are objects whose only purpose is to extract links from web pages (:class:`scrapy.http.Response` objects) which will be eventually followed. There are two Link Extractors available in Scrapy by default, but you create your own custom Link Extractors to suit your needs by implementing a simple interface. The only public method that every LinkExtractor has is ``extract_links``, which receives a :class:`~scrapy.http.Response` object and returns a list of links. Link Extractors are meant to be instantiated once and their ``extract_links`` method called several times with different responses, to extract links to follow. Link extractors are used in the :class:`~scrapy.contrib.spiders.CrawlSpider` class (available in Scrapy), through a set of rules, but you can also use it in your spiders, even if you don't subclass from :class:`~scrapy.contrib.spiders.CrawlSpider`, as its purpose is very simple: to extract links. .. _topics-link-extractors-ref: Built-in link extractors reference ================================== .. module:: scrapy.contrib.linkextractors :synopsis: Link extractors classes All available link extractors classes bundled with Scrapy are provided in the :mod:`scrapy.contrib.linkextractors` module. .. module:: scrapy.contrib.linkextractors.sgml :synopsis: SGMLParser-based link extractors SgmlLinkExtractor ----------------- .. class:: SgmlLinkExtractor(allow=(), deny=(), allow_domains=(), deny_domains=(), deny_extensions=None, restrict_xpaths=(), tags=('a', 'area'), attrs=('href'), canonicalize=True, unique=True, process_value=None) The SgmlLinkExtractor extends the base :class:`BaseSgmlLinkExtractor` by providing additional filters that you can specify to extract links, including regular expressions patterns that the links must match to be extracted. All those filters are configured through these constructor parameters: :param allow: a single regular expression (or list of regular expressions) that the (absolute) urls must match in order to be extracted. If not given (or empty), it will match all links. :type allow: a regular expression (or list of) :param deny: a single regular expression (or list of regular expressions) that the (absolute) urls must match in order to be excluded (ie. not extracted). It has precedence over the ``allow`` parameter. If not given (or empty) it won't exclude any links. :type deny: a regular expression (or list of) :param allow_domains: a single value or a list of string containing domains which will be considered for extracting the links :type allow_domains: str or list :param deny_domains: a single value or a list of strings containing domains which won't be considered for extracting the links :type deny_domains: str or list :param deny_extensions: a list of extensions that should be ignored when extracting links. If not given, it will default to the ``IGNORED_EXTENSIONS`` list defined in the `scrapy.linkextractor`_ module. :type deny_extensions: list :param restrict_xpaths: is a XPath (or list of XPath's) which defines regions inside the response where links should be extracted from. If given, only the text selected by those XPath will be scanned for links. See examples below. :type restrict_xpaths: str or list :param tags: a tag or a list of tags to consider when extracting links. Defaults to ``('a', 'area')``. :type tags: str or list :param attrs: list of attrbitues which should be considered when looking for links to extract (only for those tags specified in the ``tags`` parameter). Defaults to ``('href',)`` :type attrs: boolean :param canonicalize: canonicalize each extracted url (using scrapy.utils.url.canonicalize_url). Defaults to ``True``. :type canonicalize: boolean :param unique: whether duplicate filtering should be applied to extracted links. :type unique: boolean :param process_value: see ``process_value`` argument of :class:`BaseSgmlLinkExtractor` class constructor :type process_value: callable BaseSgmlLinkExtractor --------------------- .. class:: BaseSgmlLinkExtractor(tag="a", attr="href", unique=False, process_value=None) The purpose of this Link Extractor is only to serve as a base class for the :class:`SgmlLinkExtractor`. You should use that one instead. The constructor arguments are: :param tag: either a string (with the name of a tag) or a function that receives a tag name and returns ``True`` if links should be extracted from that tag, or ``False`` if they shouldn't. Defaults to ``'a'``. request (once it's downloaded) as its first parameter. For more information, see :ref:`topics-request-response-ref-request-callback-arguments`. :type tag: str or callable :param attr: either string (with the name of a tag attribute), or a function that receives an attribute name and returns ``True`` if links should be extracted from it, or ``False`` if they shouldn't. Defaults to ``href``. :type attr: str or callable :param unique: is a boolean that specifies if a duplicate filtering should be applied to links extracted. :type unique: boolean :param process_value: a function which receives each value extracted from the tag and attributes scanned and can modify the value and return a new one, or return ``None`` to ignore the link altogether. If not given, ``process_value`` defaults to ``lambda x: x``. .. highlight:: html For example, to extract links from this code:: Link text .. highlight:: python You can use the following function in ``process_value``:: def process_value(value): m = re.search("javascript:goToPage\('(.*?)'", value) if m: return m.group(1) :type process_value: callable .. _scrapy.linkextractor: https://github.com/scrapy/scrapy/blob/master/scrapy/linkextractor.py Scrapy-0.14.4/docs/topics/spiders.rst0000600000016101777760000005425311754531743017610 0ustar buildbotnogroup.. _topics-spiders: ======= Spiders ======= Spiders are classes which define how a certain site (or domain) will be scraped, including how to crawl the site and how to extract scraped items from their pages. In other words, Spiders are the place where you define the custom behaviour for crawling and parsing pages for a particular site. For spiders, the scraping cycle goes through something like this: 1. You start by generating the initial Requests to crawl the first URLs, and specify a callback function to be called with the response downloaded from those requests. The first requests to perform are obtained by calling the :meth:`~scrapy.spider.BaseSpider.start_requests` method which (by default) generates :class:`~scrapy.http.Request` for the URLs specified in the :attr:`~scrapy.spider.BaseSpider.start_urls` and the :attr:`~scrapy.spider.BaseSpider.parse` method as callback function for the Requests. 2. In the callback function, you parse the response (web page) and return either :class:`~scrapy.item.Item` objects, :class:`~scrapy.http.Request` objects, or an iterable of both. Those Requests will also contain a callback (maybe the same) and will then be downloaded by Scrapy and then their response handled by the specified callback. 3. In callback functions, you parse the page contents, typically using :ref:`topics-selectors` (but you can also use BeautifuSoup, lxml or whatever mechanism you prefer) and generate items with the parsed data. 4. Finally, the items returned from the spider will be typically persisted in some Item pipeline. Even though this cycle applies (more or less) to any kind of spider, there are different kinds of default spiders bundled into Scrapy for different purposes. We will talk about those types here. .. _topics-spiders-ref: Built-in spiders reference ========================== Scrapy comes with some useful generic spiders that you can use, to subclass your spiders from. Their aim is to provide convenient functionality for a few common scraping cases, like following all links on a site based on certain rules, crawling from `Sitemaps`_, or parsing a XML/CSV feed. For the examples used in the following spiders, we'll assume you have a project with a ``TestItem`` declared in a ``myproject.items`` module:: from scrapy.item import Item class TestItem(Item): id = Field() name = Field() description = Field() .. module:: scrapy.spider :synopsis: Spiders base class, spider manager and spider middleware BaseSpider ---------- .. class:: BaseSpider() This is the simplest spider, and the one from which every other spider must inherit from (either the ones that come bundled with Scrapy, or the ones that you write yourself). It doesn't provide any special functionality. It just requests the given ``start_urls``/``start_requests``, and calls the spider's method ``parse`` for each of the resulting responses. .. attribute:: name A string which defines the name for this spider. The spider name is how the spider is located (and instantiated) by Scrapy, so it must be unique. However, nothing prevents you from instantiating more than one instance of the same spider. This is the most important spider attribute and it's required. Is recommended to name your spiders after the domain that their crawl. .. attribute:: allowed_domains An optional list of strings containing domains that this spider is allowed to crawl. Requests for URLs not belonging to the domain names specified in this list won't be followed if :class:`~scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware` is enabled. .. attribute:: start_urls A list of URLs where the spider will begin to crawl from, when no particular URLs are specified. So, the first pages downloaded will be those listed here. The subsequent URLs will be generated successively from data contained in the start URLs. .. method:: start_requests() This method must return an iterable with the first Requests to crawl for this spider. This is the method called by Scrapy when the spider is opened for scraping when no particular URLs are specified. If particular URLs are specified, the :meth:`make_requests_from_url` is used instead to create the Requests. This method is also called only once from Scrapy, so it's safe to implement it as a generator. The default implementation uses :meth:`make_requests_from_url` to generate Requests for each url in :attr:`start_urls`. If you want to change the Requests used to start scraping a domain, this is the method to override. For example, if you need to start by logging in using a POST request, you could do:: def start_requests(self): return [FormRequest("http://www.example.com/login", formdata={'user': 'john', 'pass': 'secret'}, callback=self.logged_in)] def logged_in(self, response): # here you would extract links to follow and return Requests for # each of them, with another callback pass .. method:: make_requests_from_url(url) A method that receives a URL and returns a :class:`~scrapy.http.Request` object (or a list of :class:`~scrapy.http.Request` objects) to scrape. This method is used to construct the initial requests in the :meth:`start_requests` method, and is typically used to convert urls to requests. Unless overridden, this method returns Requests with the :meth:`parse` method as their callback function, and with dont_filter parameter enabled (see :class:`~scrapy.http.Request` class for more info). .. method:: parse(response) This is the default callback used by Scrapy to process downloaded responses, when their requests don't specify a callback. The ``parse`` method is in charge of processing the response and returning scraped data and/or more URLs to follow. Other Requests callbacks have the same requirements as the :class:`BaseSpider` class. This method, as well as any other Request callback, must return an iterable of :class:`~scrapy.http.Request~ and/or :class:`~scrapy.item.Item` objects. :param response: the response to parse :type reponse: :class:~scrapy.http.Response` .. method:: log(message, [level, component]) Log a message using the :func:`scrapy.log.msg` function, automatically populating the spider argument with the :attr:`name` of this spider. For more information see :ref:`topics-logging`. BaseSpider example ~~~~~~~~~~~~~~~~~~ Let's see an example:: from scrapy import log # This module is useful for printing out debug information from scrapy.spider import BaseSpider class MySpider(BaseSpider): name = 'example.com' allowed_domains = ['example.com'] start_urls = [ 'http://www.example.com/1.html', 'http://www.example.com/2.html', 'http://www.example.com/3.html', ] def parse(self, response): self.log('A response from %s just arrived!' % response.url) Another example returning multiples Requests and Items from a single callback:: from scrapy.selector import HtmlXPathSelector from scrapy.spider import BaseSpider from scrapy.http import Request from myproject.items import MyItem class MySpider(BaseSpider): name = 'example.com' allowed_domains = ['example.com'] start_urls = [ 'http://www.example.com/1.html', 'http://www.example.com/2.html', 'http://www.example.com/3.html', ] def parse(self, response): hxs = HtmlXPathSelector(response) for h3 in hxs.select('//h3').extract(): yield MyItem(title=h3) for url in hxs.select('//a/@href').extract(): yield Request(url, callback=self.parse) .. module:: scrapy.contrib.spiders :synopsis: Collection of generic spiders CrawlSpider ----------- .. class:: CrawlSpider This is the most commonly used spider for crawling regular websites, as it provides a convenient mechanism for following links by defining a set of rules. It may not be the best suited for your particular web sites or project, but it's generic enough for several cases, so you can start from it and override it as needed for more custom functionality, or just implement your own spider. Apart from the attributes inherited from BaseSpider (that you must specify), this class supports a new attribute: .. attribute:: rules Which is a list of one (or more) :class:`Rule` objects. Each :class:`Rule` defines a certain behaviour for crawling the site. Rules objects are described below. If multiple rules match the same link, the first one will be used, according to the order they're defined in this attribute. Crawling rules ~~~~~~~~~~~~~~ .. class:: Rule(link_extractor, callback=None, cb_kwargs=None, follow=None, process_links=None, process_request=None) ``link_extractor`` is a :ref:`Link Extractor ` object which defines how links will be extracted from each crawled page. ``callback`` is a callable or a string (in which case a method from the spider object with that name will be used) to be called for each link extracted with the specified link_extractor. This callback receives a response as its first argument and must return a list containing :class:`~scrapy.item.Item` and/or :class:`~scrapy.http.Request` objects (or any subclass of them). .. warning:: When writing crawl spider rules, avoid using ``parse`` as callback, since the :class:`CrawlSpider` uses the ``parse`` method itself to implement its logic. So if you override the ``parse`` method, the crawl spider will no longer work. ``cb_kwargs`` is a dict containing the keyword arguments to be passed to the callback function ``follow`` is a boolean which specifies if links should be followed from each response extracted with this rule. If ``callback`` is None ``follow`` defaults to ``True``, otherwise it default to ``False``. ``process_links`` is a callable, or a string (in which case a method from the spider object with that name will be used) which will be called for each list of links extracted from each response using the specified ``link_extractor``. This is mainly used for filtering purposes. ``process_request`` is a callable, or a string (in which case a method from the spider object with that name will be used) which will be called with every request extracted by this rule, and must return a request or None (to filter out the request). CrawlSpider example ~~~~~~~~~~~~~~~~~~~ Let's now take a look at an example CrawlSpider with rules:: from scrapy.contrib.spiders import CrawlSpider, Rule from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor from scrapy.selector import HtmlXPathSelector from scrapy.item import Item class MySpider(CrawlSpider): name = 'example.com' allowed_domains = ['example.com'] start_urls = ['http://www.example.com'] rules = ( # Extract links matching 'category.php' (but not matching 'subsection.php') # and follow links from them (since no callback means follow=True by default). Rule(SgmlLinkExtractor(allow=('category\.php', ), deny=('subsection\.php', ))), # Extract links matching 'item.php' and parse them with the spider's method parse_item Rule(SgmlLinkExtractor(allow=('item\.php', )), callback='parse_item'), ) def parse_item(self, response): self.log('Hi, this is an item page! %s' % response.url) hxs = HtmlXPathSelector(response) item = Item() item['id'] = hxs.select('//td[@id="item_id"]/text()').re(r'ID: (\d+)') item['name'] = hxs.select('//td[@id="item_name"]/text()').extract() item['description'] = hxs.select('//td[@id="item_description"]/text()').extract() return item This spider would start crawling example.com's home page, collecting category links, and item links, parsing the latter with the ``parse_item`` method. For each item response, some data will be extracted from the HTML using XPath, and a :class:`~scrapy.item.Item` will be filled with it. XMLFeedSpider ------------- .. class:: XMLFeedSpider XMLFeedSpider is designed for parsing XML feeds by iterating through them by a certain node name. The iterator can be chosen from: ``iternodes``, ``xml``, and ``html``. It's recommended to use the ``iternodes`` iterator for performance reasons, since the ``xml`` and ``html`` iterators generate the whole DOM at once in order to parse it. However, using ``html`` as the iterator may be useful when parsing XML with bad markup. To set the iterator and the tag name, you must define the following class attributes: .. attribute:: iterator A string which defines the iterator to use. It can be either: - ``'iternodes'`` - a fast iterator based on regular expressions - ``'html'`` - an iterator which uses HtmlXPathSelector. Keep in mind this uses DOM parsing and must load all DOM in memory which could be a problem for big feeds - ``'xml'`` - an iterator which uses XmlXPathSelector. Keep in mind this uses DOM parsing and must load all DOM in memory which could be a problem for big feeds It defaults to: ``'iternodes'``. .. attribute:: itertag A string with the name of the node (or element) to iterate in. Example:: itertag = 'product' .. attribute:: namespaces A list of ``(prefix, uri)`` tuples which define the namespaces available in that document that will be processed with this spider. The ``prefix`` and ``uri`` will be used to automatically register namespaces using the :meth:`~scrapy.selector.XPathSelector.register_namespace` method. You can then specify nodes with namespaces in the :attr:`itertag` attribute. Example:: class YourSpider(XMLFeedSpider): namespaces = [('n', 'http://www.sitemaps.org/schemas/sitemap/0.9')] itertag = 'n:url' # ... Apart from these new attributes, this spider has the following overrideable methods too: .. method:: adapt_response(response) A method that receives the response as soon as it arrives from the spider middleware, before the spider starts parsing it. It can be used to modify the response body before parsing it. This method receives a response and also returns a response (it could be the same or another one). .. method:: parse_node(response, selector) This method is called for the nodes matching the provided tag name (``itertag``). Receives the response and an XPathSelector for each node. Overriding this method is mandatory. Otherwise, you spider won't work. This method must return either a :class:`~scrapy.item.Item` object, a :class:`~scrapy.http.Request` object, or an iterable containing any of them. .. method:: process_results(response, results) This method is called for each result (item or request) returned by the spider, and it's intended to perform any last time processing required before returning the results to the framework core, for example setting the item IDs. It receives a list of results and the response which originated those results. It must return a list of results (Items or Requests). XMLFeedSpider example ~~~~~~~~~~~~~~~~~~~~~ These spiders are pretty easy to use, let's have a look at one example:: from scrapy import log from scrapy.contrib.spiders import XMLFeedSpider from myproject.items import TestItem class MySpider(XMLFeedSpider): name = 'example.com' allowed_domains = ['example.com'] start_urls = ['http://www.example.com/feed.xml'] iterator = 'iternodes' # This is actually unnecesary, since it's the default value itertag = 'item' def parse_node(self, response, node): log.msg('Hi, this is a <%s> node!: %s' % (self.itertag, ''.join(node.extract()))) item = Item() item['id'] = node.select('@id').extract() item['name'] = node.select('name').extract() item['description'] = node.select('description').extract() return item Basically what we did up there was to create a spider that downloads a feed from the given ``start_urls``, and then iterates through each of its ``item`` tags, prints them out, and stores some random data in an :class:`~scrapy.item.Item`. CSVFeedSpider ------------- .. class:: CSVFeedSpider This spider is very similar to the XMLFeedSpider, except that it iterates over rows, instead of nodes. The method that gets called in each iteration is :meth:`parse_row`. .. attribute:: delimiter A string with the separator character for each field in the CSV file Defaults to ``','`` (comma). .. attribute:: headers A list of the rows contained in the file CSV feed which will be used to extract fields from it. .. method:: parse_row(response, row) Receives a response and a dict (representing each row) with a key for each provided (or detected) header of the CSV file. This spider also gives the opportunity to override ``adapt_response`` and ``process_results`` methods for pre- and post-processing purposes. CSVFeedSpider example ~~~~~~~~~~~~~~~~~~~~~ Let's see an example similar to the previous one, but using a :class:`CSVFeedSpider`:: from scrapy import log from scrapy.contrib.spiders import CSVFeedSpider from myproject.items import TestItem class MySpider(CSVFeedSpider): name = 'example.com' allowed_domains = ['example.com'] start_urls = ['http://www.example.com/feed.csv'] delimiter = ';' headers = ['id', 'name', 'description'] def parse_row(self, response, row): log.msg('Hi, this is a row!: %r' % row) item = TestItem() item['id'] = row['id'] item['name'] = row['name'] item['description'] = row['description'] return item SitemapSpider ------------- .. class:: SitemapSpider SitemapSpider allows you to crawl a site by discovering the URLs using `Sitemaps`_. It supports nested sitemaps and discovering sitemap urls from `robots.txt`_. .. attribute:: sitemap_urls A list of urls pointing to the sitemaps whose urls you want to crawl. You can also point to a `robots.txt`_ and it will be parsed to extract sitemap urls from it. .. attribute:: sitemap_rules A list of tuples ``(regex, callback)`` where: * ``regex`` is a regular expression to match urls extracted from sitemaps. ``regex`` can be either a str or a compiled regex object. * callback is the callback to use for processing the urls that match the regular expression. ``callback`` can be a string (indicating the name of a spider method) or a callable. For example:: sitemap_rules = [('/product/', 'parse_product')] Rules are applied in order, and only the first one that matches will be used. If you omit this attribute, all urls found in sitemaps will be processed with the ``parse`` callback. .. attribute:: sitemap_follow A list of regexes of sitemap that should be followed. This is is only for sites that use `Sitemap index files`_ that point to other sitemap files. By default, all sitemaps are followed. SitemapSpider examples ~~~~~~~~~~~~~~~~~~~~~~ Simplest example: process all urls discovered through sitemaps using the ``parse`` callback:: from scrapy.contrib.spiders import SitemapSpider class MySpider(SitemapSpider): sitemap_urls = ['http://www.example.com/sitemap.xml'] def parse(self, response): pass # ... scrape item here ... Process some urls with certain callback and other urls with a different callback:: from scrapy.contrib.spiders import SitemapSpider class MySpider(SitemapSpider): sitemap_urls = ['http://www.example.com/sitemap.xml'] sitemap_rules = [ ('/product/', 'parse_product'), ('/category/', 'parse_category'), ] def parse_product(self, response): pass # ... scrape product ... def parse_category(self, response): pass # ... scrape category ... Follow sitemaps defined in the `robots.txt`_ file and only follow sitemaps whose url contains ``/sitemap_shop``:: from scrapy.contrib.spiders import SitemapSpider class MySpider(SitemapSpider): sitemap_urls = ['http://www.example.com/robots.txt'] sitemap_rules = [ ('/shop/', 'parse_shop'), ] sitemap_follow = ['/sitemap_shops'] def parse_shop(self, response): pass # ... scrape shop here ... Combine SitemapSpider with other sources of urls:: from scrapy.contrib.spiders import SitemapSpider class MySpider(SitemapSpider): sitemap_urls = ['http://www.example.com/robots.txt'] sitemap_rules = [ ('/shop/', 'parse_shop'), ] other_urls = ['http://www.example.com/about'] def start_requests(self): requests = list(super(MySpider, self).start_requests()) requests += [Request(x, callback=self.parse_other) for x in self.other_urls] return requests def parse_shop(self, response): pass # ... scrape shop here ... def parse_other(self, response): pass # ... scrape other here ... .. _Sitemaps: http://www.sitemaps.org .. _Sitemap index files: http://www.sitemaps.org/protocol.php#index .. _robots.txt: http://www.robotstxt.org/ Scrapy-0.14.4/docs/topics/request-response.rst0000600000016101777760000005200411754531743021453 0ustar buildbotnogroup.. _topics-request-response: ====================== Requests and Responses ====================== .. module:: scrapy.http :synopsis: Request and Response classes Scrapy uses :class:`Request` and :class:`Response` objects for crawling web sites. Typically, :class:`Request` objects are generated in the spiders and pass across the system until they reach the Downloader, which executes the request and returns a :class:`Response` object which travels back to the spider that issued the request. Both :class:`Request` and :class:`Response` classes have subclasses which add functionality not required in the base classes. These are described below in :ref:`topics-request-response-ref-request-subclasses` and :ref:`topics-request-response-ref-response-subclasses`. Request objects =============== .. class:: Request(url[, method='GET', body, headers, cookies, meta, encoding='utf-8', priority=0, dont_filter=False, callback, errback]) A :class:`Request` object represents an HTTP request, which is usually generated in the Spider and executed by the Downloader, and thus generating a :class:`Response`. :param url: the URL of this request :type url: string :param method: the HTTP method of this request. Defaults to ``'GET'``. :type method: string :param meta: the initial values for the :attr:`Request.meta` attribute. If given, the dict passed in this parameter will be shallow copied. :type meta: dict :param body: the request body. If a ``unicode`` is passed, then it's encoded to ``str`` using the `encoding` passed (which defaults to ``utf-8``). If ``body`` is not given,, an empty string is stored. Regardless of the type of this argument, the final value stored will be a ``str``` (never ``unicode`` or ``None``). :type body: str or unicode :param headers: the headers of this request. The dict values can be strings (for single valued headers) or lists (for multi-valued headers). :type headers: dict :param cookies: the request cookies. Example:: request_with_cookies = Request(url="http://www.example.com", cookies={'currency': 'USD', 'country': 'UY'}) When some site returns cookies (in a response) those are stored in the cookies for that domain and will be sent again in future requests. That's the typical behaviour of any regular web browser. However, if, for some reason, you want to avoid merging with existing cookies you can instruct Scrapy to do so by setting the ``dont_merge_cookies`` key in the :attr:`Request.meta`. Example of request without merging cookies:: request_with_cookies = Request(url="http://www.example.com", cookies={'currency': 'USD', 'country': 'UY'}, meta={'dont_merge_cookies': True}) For more info see :ref:`cookies-mw`. :type cookies: dict :param encoding: the encoding of this request (defaults to ``'utf-8'``). This encoding will be used to percent-encode the URL and to convert the body to ``str`` (if given as ``unicode``). :type encoding: string :param priority: the priority of this request (defaults to ``0``). The priority is used by the scheduler to define the order used to process requests. :type priority: int :param dont_filter: indicates that this request should not be filtered by the scheduler. This is used when you want to perform an identical request multiple times, to ignore the duplicates filter. Use it with care, or you will get into crawling loops. Default to ``False``. :type dont_filter: boolean :param callback: the function that will be called with the response of this request (once its downloaded) as its first parameter. For more information see :ref:`topics-request-response-ref-request-callback-arguments` below. If a Request doesn't specify a callback, the spider's :meth:`~scrapy.spider.BaseSpider.parse` method will be used. :type callback: callable :param errback: a function that will be called if any exception was raised while processing the request. This includes pages that failed with 404 HTTP errors and such. It receives a `Twisted Failure`_ instance as first parameter. :type errback: callable .. _Twisted Failure: http://twistedmatrix.com/documents/8.2.0/api/twisted.python.failure.Failure.html .. attribute:: Request.url A string containing the URL of this request. Keep in mind that this attribute contains the escaped URL, so it can differ from the URL passed in the constructor. This attribute is read-only. To change the URL of a Request use :meth:`replace`. .. attribute:: Request.method A string representing the HTTP method in the request. This is guaranteed to be uppercase. Example: ``"GET"``, ``"POST"``, ``"PUT"``, etc .. attribute:: Request.headers A dictionary-like object which contains the request headers. .. attribute:: Request.body A str that contains the request body. This attribute is read-only. To change the body of a Request use :meth:`replace`. .. attribute:: Request.meta A dict that contains arbitrary metadata for this request. This dict is empty for new Requests, and is usually populated by different Scrapy components (extensions, middlewares, etc). So the data contained in this dict depends on the extensions you have enabled. See :ref:`topics-request-meta` for a list of special meta keys recognized by Scrapy. This dict is `shallow copied`_ when the request is cloned using the ``copy()`` or ``replace()`` methods, and can also be accesed, in your spider, from the ``response.meta`` attribute. .. _shallow copied: http://docs.python.org/library/copy.html .. method:: Request.copy() Return a new Request which is a copy of this Request. See also: :ref:`topics-request-response-ref-request-callback-arguments`. .. method:: Request.replace([url, method, headers, body, cookies, meta, encoding, dont_filter, callback, errback]) Return a Request object with the same members, except for those members given new values by whichever keyword arguments are specified. The attribute :attr:`Request.meta` is copied by default (unless a new value is given in the ``meta`` argument). See also :ref:`topics-request-response-ref-request-callback-arguments`. .. _topics-request-response-ref-request-callback-arguments: Passing additional data to callback functions --------------------------------------------- The callback of a request is a function that will be called when the response of that request is downloaded. The callback function will be called with the downloaded :class:`Response` object as its first argument. Example:: def parse_page1(self, response): return Request("http://www.example.com/some_page.html", callback=self.parse_page2) def parse_page2(self, response): # this would log http://www.example.com/some_page.html self.log("Visited %s" % response.url) In some cases you may be interested in passing arguments to those callback functions so you can receive the arguments later, in the second callback. You can use the :attr:`Request.meta` attribute for that. Here's an example of how to pass an item using this mechanism, to populate different fields from different pages:: def parse_page1(self, response): item = MyItem() item['main_url'] = response.url request = Request("http://www.example.com/some_page.html", callback=self.parse_page2) request.meta['item'] = item return request def parse_page2(self, response): item = response.meta['item'] item['other_url'] = response.url return item .. _topics-request-meta: Request.meta special keys ========================= The :attr:`Request.meta` attribute can contain any arbitrary data, but there are some special keys recognized by Scrapy and its built-in extensions. Those are: * :reqmeta:`dont_redirect` * :reqmeta:`dont_retry` * :reqmeta:`handle_httpstatus_list` * ``dont_merge_cookies`` (see ``cookies`` parameter of :class:`Request` constructor) * :reqmeta:`redirect_urls` .. _topics-request-response-ref-request-subclasses: Request subclasses ================== Here is the list of built-in :class:`Request` subclasses. You can also subclass it to implement your own custom functionality. FormRequest objects ------------------- The FormRequest class extends the base :class:`Request` with functionality for dealing with HTML forms. It uses the `ClientForm`_ library (bundled with Scrapy) to pre-populate form fields with form data from :class:`Response` objects. .. _ClientForm: http://wwwsearch.sourceforge.net/ClientForm/ .. class:: FormRequest(url, [formdata, ...]) The :class:`FormRequest` class adds a new argument to the constructor. The remaining arguments are the same as for the :class:`Request` class and are not documented here. :param formdata: is a dictionary (or iterable of (key, value) tuples) containing HTML Form data which will be url-encoded and assigned to the body of the request. :type formdata: dict or iterable of tuples The :class:`FormRequest` objects support the following class method in addition to the standard :class:`Request` methods: .. classmethod:: FormRequest.from_response(response, [formname=None, formnumber=0, formdata=None, clickdata=None, dont_click=False, ...]) Returns a new :class:`FormRequest` object with its form field values pre-populated with those found in the HTML ``
    `` element contained in the given response. For an example see :ref:`topics-request-response-ref-request-userlogin`. Keep in mind that this method is implemented using `ClientForm`_ whose policy is to automatically simulate a click, by default, on any form control that looks clickable, like a ````. Even though this is quite convenient, and often the desired behaviour, sometimes it can cause problems which could be hard to debug. For example, when working with forms that are filled and/or submitted using javascript, the default :meth:`from_response` (and `ClientForm`_) behaviour may not be the most appropiate. To disable this behaviour you can set the ``dont_click`` argument to ``True``. Also, if you want to change the control clicked (instead of disabling it) you can also use the ``clickdata`` argument. :param response: the response containing a HTML form which will be used to pre-populate the form fields :type response: :class:`Response` object :param formname: if given, the form with name attribute set to this value will be used. Otherwise, ``formnumber`` will be used for selecting the form. :type formname: string :param formnumber: the number of form to use, when the response contains multiple forms. The first one (and also the default) is ``0``. :type formnumber: integer :param formdata: fields to override in the form data. If a field was already present in the response ```` element, its value is overridden by the one passed in this parameter. :type formdata: dict :param clickdata: Arguments to be passed directly to the ClientForm ``click_request_data()`` method. See `ClientForm`_ homepage for more info. :type clickdata: dict :param dont_click: If True, the form data will be sumbitted without clicking in any element. :type dont_click: boolean The other parameters of this class method are passed directly to the :class:`FormRequest` constructor. .. versionadded:: 0.10.3 The ``formname`` parameter. Request usage examples ---------------------- Using FormRequest to send data via HTTP POST ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you want to simulate a HTML Form POST in your spider and send a couple of key-value fields, you can return a :class:`FormRequest` object (from your spider) like this:: return [FormRequest(url="http://www.example.com/post/action", formdata={'name': 'John Doe', age: '27'}, callback=self.after_post)] .. _topics-request-response-ref-request-userlogin: Using FormRequest.from_response() to simulate a user login ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is usual for web sites to provide pre-populated form fields through ```` elements, such as session related data or authentication tokens (for login pages). When scraping, you'll want these fields to be automatically pre-populated and only override a couple of them, such as the user name and password. You can use the :meth:`FormRequest.from_response` method for this job. Here's an example spider which uses it:: class LoginSpider(BaseSpider): name = 'example.com' start_urls = ['http://www.example.com/users/login.php'] def parse(self, response): return [FormRequest.from_response(response, formdata={'username': 'john', 'password': 'secret'}, callback=self.after_login)] def after_login(self, response): # check login succeed before going on if "authentication failed" in response.body: self.log("Login failed", level=log.ERROR) return # continue scraping with authenticated session... Response objects ================ .. class:: Response(url, [status=200, headers, body, flags]) A :class:`Response` object represents an HTTP response, which is usually downloaded (by the Downloader) and fed to the Spiders for processing. :param url: the URL of this response :type url: string :param headers: the headers of this response. The dict values can be strings (for single valued headers) or lists (for multi-valued headers). :type headers: dict :param status: the HTTP status of the response. Defaults to ``200``. :type status: integer :param body: the response body. It must be str, not unicode, unless you're using a encoding-aware :ref:`Response subclass `, such as :class:`TextResponse`. :type body: str :param meta: the initial values for the :attr:`Response.meta` attribute. If given, the dict will be shallow copied. :type meta: dict :param flags: is a list containing the initial values for the :attr:`Response.flags` attribute. If given, the list will be shallow copied. :type flags: list .. attribute:: Response.url A string containing the URL of the response. This attribute is read-only. To change the URL of a Response use :meth:`replace`. .. attribute:: Response.status An integer representing the HTTP status of the response. Example: ``200``, ``404``. .. attribute:: Response.headers A dictionary-like object which contains the response headers. .. attribute:: Response.body A str containing the body of this Response. Keep in mind that Reponse.body is always a str. If you want the unicode version use :meth:`TextResponse.body_as_unicode` (only available in :class:`TextResponse` and subclasses). This attribute is read-only. To change the body of a Response use :meth:`replace`. .. attribute:: Response.request The :class:`Request` object that generated this response. This attribute is assigned in the Scrapy engine, after the response and the request have passed through all :ref:`Downloader Middlewares `. In particular, this means that: - HTTP redirections will cause the original request (to the URL before redirection) to be assigned to the redirected response (with the final URL after redirection). - Response.request.url doesn't always equal Response.url - This attribute is only available in the spider code, and in the :ref:`Spider Middlewares `, but not in Downloader Middlewares (although you have the Request available there by other means) and handlers of the :signal:`response_downloaded` signal. .. attribute:: Response.meta A shortcut to the :attr:`Request.meta` attribute of the :attr:`Response.request` object (ie. ``self.request.meta``). Unlike the :attr:`Response.request` attribute, the :attr:`Response.meta` attribute is propagated along redirects and retries, so you will get the original :attr:`Request.meta` sent from your spider. .. seealso:: :attr:`Request.meta` attribute .. attribute:: Response.flags A list that contains flags for this response. Flags are labels used for tagging Responses. For example: `'cached'`, `'redirected`', etc. And they're shown on the string representation of the Response (`__str__` method) which is used by the engine for logging. .. method:: Response.copy() Returns a new Response which is a copy of this Response. .. method:: Response.replace([url, status, headers, body, meta, flags, cls]) Returns a Response object with the same members, except for those members given new values by whichever keyword arguments are specified. The attribute :attr:`Response.meta` is copied by default (unless a new value is given in the ``meta`` argument). .. _topics-request-response-ref-response-subclasses: Response subclasses =================== Here is the list of available built-in Response subclasses. You can also subclass the Response class to implement your own functionality. TextResponse objects -------------------- .. class:: TextResponse(url, [encoding[, ...]]) :class:`TextResponse` objects adds encoding capabilities to the base :class:`Response` class, which is meant to be used only for binary data, such as images, sounds or any media file. :class:`TextResponse` objects support a new constructor argument, in addition to the base :class:`Response` objects. The remaining functionality is the same as for the :class:`Response` class and is not documented here. :param encoding: is a string which contains the encoding to use for this response. If you create a :class:`TextResponse` object with a unicode body, it will be encoded using this encoding (remember the body attribute is always a string). If ``encoding`` is ``None`` (default value), the encoding will be looked up in the response headers and body instead. :type encoding: string :class:`TextResponse` objects support the following attributes in addition to the standard :class:`Response` ones: .. attribute:: TextResponse.encoding A string with the encoding of this response. The encoding is resolved by trying the following mechanisms, in order: 1. the encoding passed in the constructor `encoding` argument 2. the encoding declared in the Content-Type HTTP header. If this encoding is not valid (ie. unknown), it is ignored and the next resolution mechanism is tried. 3. the encoding declared in the response body. The TextResponse class doesn't provide any special functionality for this. However, the :class:`HtmlResponse` and :class:`XmlResponse` classes do. 4. the encoding inferred by looking at the response body. This is the more fragile method but also the last one tried. :class:`TextResponse` objects support the following methods in addition to the standard :class:`Response` ones: .. method:: TextResponse.body_as_unicode() Returns the body of the response as unicode. This is equivalent to:: response.body.decode(response.encoding) But **not** equivalent to:: unicode(response.body) Since, in the latter case, you would be using you system default encoding (typically `ascii`) to convert the body to uniode, instead of the response encoding. HtmlResponse objects -------------------- .. class:: HtmlResponse(url[, ...]) The :class:`HtmlResponse` class is a subclass of :class:`TextResponse` which adds encoding auto-discovering support by looking into the HTML `meta http-equiv`_ attribute. See :attr:`TextResponse.encoding`. .. _meta http-equiv: http://www.w3schools.com/TAGS/att_meta_http_equiv.asp XmlResponse objects ------------------- .. class:: XmlResponse(url[, ...]) The :class:`XmlResponse` class is a subclass of :class:`TextResponse` which adds encoding auto-discovering support by looking into the XML declaration line. See :attr:`TextResponse.encoding`. Scrapy-0.14.4/docs/topics/ubuntu.rst0000600000016101777760000000332511754531743017453 0ustar buildbotnogroup.. _topics-ubuntu: =============== Ubuntu packages =============== .. versionadded:: 0.10 `Insophia`_ publishes apt-gettable packages which are generally fresher than those in Ubuntu, and more stable too since they're continuously built from `Github repo`_ (master & stable branches) and so they contain the latest bug fixes. To use the packages, just add the following line to your ``/etc/apt/sources.list``, and then run ``aptitude update`` and ``aptitude install scrapy-0.13``:: deb http://archive.scrapy.org/ubuntu DISTRO main Replacing ``DISTRO`` with the name of your Ubuntu release, which you can get with command:: lsb_release -cs Supported Ubuntu releases are: ``karmic``, ``lucid``, ``maverick``, ``natty``, ``oneiric``, ``precise``. For Ubuntu Precise (12.04):: deb http://archive.scrapy.org/ubuntu precise main For Ubuntu Oneiric (11.10):: deb http://archive.scrapy.org/ubuntu oneiric main For Ubuntu Natty (11.04):: deb http://archive.scrapy.org/ubuntu natty main For Ubuntu Maverick (10.10):: deb http://archive.scrapy.org/ubuntu maverick main For Ubuntu Lucid (10.04):: deb http://archive.scrapy.org/ubuntu lucid main For Ubuntu Karmic (9.10):: deb http://archive.scrapy.org/ubuntu karmic main .. warning:: Please note that these packages are updated frequently, and so if you find you can't download the packages, try updating your apt package lists first, e.g., with ``apt-get update`` or ``aptitude update``. The public GPG key used to sign these packages can be imported into you APT keyring as follows:: curl -s http://archive.scrapy.org/ubuntu/archive.key | sudo apt-key add - .. _Insophia: http://insophia.com/ .. _Github repo: https://github.com/scrapy/scrapy Scrapy-0.14.4/docs/topics/exceptions.rst0000600000016101777760000000273511754531743020316 0ustar buildbotnogroup.. _topics-exceptions: ========== Exceptions ========== .. module:: scrapy.exceptions :synopsis: Scrapy exceptions .. _topics-exceptions-ref: Built-in Exceptions reference ============================= Here's a list of all exceptions included in Scrapy and their usage. DropItem -------- .. exception:: DropItem The exception that must be raised by item pipeline stages to stop processing an Item. For more information see :ref:`topics-item-pipeline`. CloseSpider ----------- .. exception:: CloseSpider(reason='cancelled') This exception can be raised from a spider callback to request the spider to be closed/stopped. Supported arguments: :param reason: the reason for closing :type reason: str For example:: def parse_page(self, response): if 'Bandwidth exceeded' in response.body: raise CloseSpider('bandwidth_exceeded') IgnoreRequest ------------- .. exception:: IgnoreRequest This exception can be raised by the Scheduler or any downloader middleware to indicate that the request should be ignored. NotConfigured ------------- .. exception:: NotConfigured This exception can be raised by some components to indicate that they will remain disabled. Those components include: * Extensions * Item pipelines * Downloader middlwares * Spider middlewares The exception must be raised in the component constructor. NotSupported ------------ .. exception:: NotSupported This exception is raised to indicate an unsupported feature. Scrapy-0.14.4/docs/topics/scrapyd.rst0000600000016101777760000003767011754531743017610 0ustar buildbotnogroup.. _topics-scrapyd: ======================== Scrapy Service (scrapyd) ======================== .. versionadded:: 0.10 Scrapy comes with a built-in service, called "Scrapyd", which allows you to deploy (aka. upload) your projects and control their spiders using a JSON web service. Projects and versions ===================== Scrapyd can manage multiple projects and each project can have multiple versions uploaded, but only the latest one will be used for launching new spiders. A common (and useful) convention to use for the version name is the revision number of the version control tool you're using to track your Scrapy project code. For example: ``r23``. The versions are not compared alphabetically but using a smarter algorithm (the same `distutils`_ uses) so ``r10`` compares greater to ``r9``, for example. How Scrapyd works ================= Scrapyd is an application (typically run as a daemon) that continually polls for spiders that need to run. When a spider needs to run, a process is started to crawl the spider:: scrapy crawl myspider Scrapyd also runs multiple processes in parallel, allocating them in a fixed number of slots given by the `max_proc`_ and `max_proc_per_cpu`_ options, starting as many processes as possible to handle the load. In addition to dispatching and managing processes, Scrapyd provides a :ref:`JSON web service ` to upload new project versions (as eggs) and schedule spiders. This feature is optional and can be disabled if you want to implement your own custom Scrapyd. The components are pluggable and can be changed, if you're familiar with the `Twisted Application Framework`_ which Scrapyd is implemented in. Starting from 0.11, Scrapyd also provides a minimal :ref:`web interface `. Starting Scrapyd ================ Scrapyd is implemented using the standard `Twisted Application Framework`_. To start the service, use the ``extras/scrapyd.tac`` file provided in the Scrapy distribution, like this:: twistd -ny extras/scrapyd.tac That should get your Scrapyd started. Or, if you want to start Scrapyd from inside a Scrapy project you can use the :command:`server` command, like this:: scrapy server Installing Scrapyd ================== How to deploy Scrapyd on your servers depends on the platform your're using. Scrapy comes with Ubuntu packages for Scrapyd ready for deploying it as a system service, to ease the installation and administration, but you can create packages for other distribution or operating systems (including Windows). If you do so, and want to contribute them, send a message to scrapy-developers@googlegroups.com and say hi. The community will appreciate it. .. _topics-scrapyd-ubuntu: Installing Scrapyd in Ubuntu ---------------------------- When deploying Scrapyd, it's very useful to have a version already packaged for your system. For this reason, Scrapyd comes with Ubuntu packages ready to use in your Ubuntu servers. So, if you plan to deploy Scrapyd on a Ubuntu server, just add the Ubuntu repositories as described in :ref:`topics-ubuntu` and then run:: aptitude install scrapyd-X.YY Where ``X.YY`` is the Scrapy version, for example: ``0.14``. This will install Scrapyd in your Ubuntu server creating a ``scrapy`` user which Scrapyd will run as. It will also create some directories and files that are listed below: /etc/scrapyd ~~~~~~~~~~~~ Scrapyd configuration files. See :ref:`topics-scrapyd-config`. /var/log/scrapyd/scrapyd.log ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Scrapyd main log file. /var/log/scrapyd/scrapyd.out ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The standard output captured from Scrapyd process and any sub-process spawned from it. /var/log/scrapyd/scrapyd.err ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The standard error captured from Scrapyd and any sub-process spawned from it. Remember to check this file if you're having problems, as the errors may not get logged to the ``scrapyd.log`` file. /var/log/scrapyd/project ~~~~~~~~~~~~~~~~~~~~~~~~ Besides the main service log file, Scrapyd stores one log file per crawling process in:: /var/log/scrapyd/PROJECT/SPIDER/ID.log Where ``ID`` is a unique id for the run. /var/lib/scrapyd/ ~~~~~~~~~~~~~~~~~ Directory used to store data files (uploaded eggs and spider queues). .. _topics-scrapyd-config: Scrapyd Configuration file ========================== Scrapyd searches for configuration files in the following locations, and parses them in order with the latest ones taking more priority: * ``/etc/scrapyd/scrapyd.conf`` (Unix) * ``c:\scrapyd\scrapyd.conf`` (Windows) * ``/etc/scrapyd/conf.d/*`` (in alphabetical order, Unix) * ``scrapyd.conf`` The configuration file supports the following options (see default values in the :ref:`example `). http_port --------- The TCP port where the HTTP JSON API will listen. Defaults to ``6800``. max_proc -------- The maximum number of concurrent Scrapy process that will be started. If unset or ``0`` it will use the number of cpus available in the system mulitplied by the value in ``max_proc_per_cpu`` option. Defaults to ``0``. max_proc_per_cpu ---------------- The maximum number of concurrent Scrapy process that will be started per cpu. Defaults to ``4``. debug ----- Whether debug mode is enabled. Defaults to ``off``. When debug mode is enabled the full Python traceback will be returned (as plain text responses) when there is an error processing a JSON API call. eggs_dir -------- The directory where the project eggs will be stored. dbs_dir ------- The directory where the project databases will be stored (this includes the spider queues). logs_dir -------- The directory where the Scrapy processes logs will be stored. logs_to_keep ------------ The number of logs to keep per spider. Defaults to ``5``. runner ------ The module that will be used for launching sub-processes. You can customize the Scrapy processes launched from Scrapyd by using your own module. application ----------- A function that returns the (Twisted) Application object to use. This can be used if you want to extend Scrapyd by adding and removing your own components and services. For more info see `Twisted Application Framework`_ .. _topics-scrapyd-config-example: Example configuration file -------------------------- Here is an example configuration file with all the defaults: .. literalinclude:: ../../scrapyd/default_scrapyd.conf .. _topics-deploying: Deploying your project ====================== Deploying your project into a Scrapyd server typically involves two steps: 1. building a `Python egg`_ of your project. This is called "eggifying" your project. You'll need to install `setuptools`_ for this. See :ref:`topics-egg-caveats` below. 2. uploading the egg to the Scrapyd server The simplest way to deploy your project is by using the :command:`deploy` command, which automates the process of building the egg uploading it using the Scrapyd HTTP JSON API. The :command:`deploy` command supports multiple targets (Scrapyd servers that can host your project) and each target supports multiple projects. Each time you deploy a new version of a project, you can name it for later reference. Show and define targets ----------------------- To see all available targets type:: scrapy deploy -l This will return a list of available targets and their URLs. For example:: scrapyd http://localhost:6800/ You can define targets by adding them to your project's ``scrapy.cfg`` file, or any other supported location like ``~/.scrapy.cfg``, ``/etc/scrapy.cfg``, or ``c:\scrapy\scrapy.cfg`` (in Windows). Here's an example of defining a new target ``scrapyd2`` with restricted access through HTTP basic authentication:: [deploy:scrapyd2] url = http://scrapyd.mydomain.com/api/scrapyd/ username = john password = secret .. note:: The :command:`deploy` command also supports netrc for getting the credentials. Now, if you type ``scrapy deploy -l`` you'll see:: scrapyd http://localhost:6800/ scrapyd2 http://scrapyd.mydomain.com/api/scrapyd/ See available projects ---------------------- To see all available projets in a specific target use:: scrapy deploy -L scrapyd It would return something like this:: project1 project2 Deploying a project ------------------- Finally, to deploy your project use:: scrapy deploy scrapyd -p project1 This will eggify your project and upload it to the target, printing the JSON response returned from the Scrapyd server. If you have a ``setup.py`` file in your project, that one will be used. Otherwise a ``setup.py`` file will be created automatically (based on a simple template) that you can edit later. After running that command you will see something like this, meaning your project was uploaded successfully:: Deploying myproject-1287453519 to http://localhost:6800/addversion.json Server response (200): {"status": "ok", "spiders": ["spider1", "spider2"]} By default ``scrapy deploy`` uses the current timestamp for generating the project version, as you can see in the output above. However, you can pass a custom version with the ``--version`` option:: scrapy deploy scrapyd -p project1 --version 54 Also, if you use Mercurial for tracking your project source code, you can use ``HG`` for the version which will be replaced by the current Mercurial revision, for example ``r382``:: scrapy deploy scrapyd -p project1 --version HG And, if you use Git for tracking your project source code, you can use ``GIT`` for the version which will be replaced by the SHA1 of current Git revision, for example ``b0582849179d1de7bd86eaa7201ea3cda4b5651f``:: scrapy deploy scrapyd -p project1 --version GIT Support for other version discovery sources may be added in the future. Finally, if you don't want to specify the target, project and version every time you run ``scrapy deploy`` you can define the defaults in the ``scrapy.cfg`` file. For example:: [deploy] url = http://scrapyd.mydomain.com/api/scrapyd/ username = john password = secret project = project1 version = HG This way, you can deploy your project just by using:: scrapy deploy Local settings -------------- Sometimes, while your working on your projects, you may want to override your certain settings with certain local settings that shouldn't be deployed to Scrapyd, but only used locally to develop and debug your spiders. One way to deal with this is to have a ``local_settings.py`` at the root of your project (where the ``scrapy.cfg`` file resides) and add these lines to the end of your project settings:: try: from local_settings import * except ImportError: pass ``scrapy deploy`` won't deploy anything outside the project module so the ``local_settings.py`` file won't be deployed. Here's the directory structure, to illustrate:: scrapy.cfg local_settings.py myproject/ __init__.py settings.py spiders/ ... .. _topics-egg-caveats: Egg caveats ----------- There are some things to keep in mind when building eggs of your Scrapy project: * make sure no local development settings are included in the egg when you build it. The ``find_packages`` function may be picking up your custom settings. In most cases you want to upload the egg with the default project settings. * you shouldn't use ``__file__`` in your project code as it doesn't play well with eggs. Consider using `pkgutil.get_data()`_ instead. * be careful when writing to disk in your project (in any spider, extension or middleware) as Scrapyd will probably run with a different user which may not have write access to certain directories. If you can, avoid writing to disk and always use `tempfile`_ for temporary files. Scheduling a spider run ======================= To schedule a spider run:: $ curl http://localhost:6800/schedule.json -d project=myproject -d spider=spider2 {"status": "ok", "jobid": "26d1b1a6d6f111e0be5c001e648c57f8"} For more resources see: :ref:`topics-scrapyd-jsonapi` for more available resources. .. _topics-scrapyd-webui: Web Interface ============= .. versionadded:: 0.11 Scrapyd comes with a minimal web interface (for monitoring running processes and accessing logs) which can be accessed at http://localhost:6800/ .. _topics-scrapyd-jsonapi: JSON API reference ================== The following section describes the available resources in Scrapyd JSON API. addversion.json --------------- Add a version to a project, creating the project if it doesn't exist. * Supported Request Methods: ``POST`` * Parameters: * ``project`` (string, required) - the project name * ``version`` (string, required) - the project version * ``egg`` (file, required) - a Python egg containing the project's code Example request:: $ curl http://localhost:6800/addversion.json -F project=myproject -F version=r23 -F egg=@myproject.egg Example reponse:: {"status": "ok", "spiders": 3} schedule.json ------------- Schedule a spider run. * Supported Request Methods: ``POST`` * Parameters: * ``project`` (string, required) - the project name * ``spider`` (string, required) - the spider name * ``setting`` (string, optional) - a scrapy setting to use when running the spider * any other parameter is passed as spider argument Example request:: $ curl http://localhost:6800/schedule.json -d project=myproject -d spider=somespider Example response:: {"status": "ok"} Example request passing a spider argument (``arg1``) and a setting (:setting:`DOWNLOAD_DELAY`):: $ curl http://localhost:6800/schedule.json -d project=myproject -d spider=somespider -d setting=DOWNLOAD_DELAY=2 -d arg1=val1 listprojects.json ----------------- Get the list of projects uploaded to this Scrapy server. * Supported Request Methods: ``GET`` * Parameters: none Example request:: $ curl http://localhost:6800/listprojects.json Example response:: {"status": "ok", "projects": ["myproject", "otherproject"]} listversions.json ----------------- Get the list of versions available for some project. The versions are returned in order, the last one is the currently used version. * Supported Request Methods: ``GET`` * Parameters: * ``project`` (string, required) - the project name Example request:: $ curl http://localhost:6800/listversions.json?project=myproject Example response:: {"status": "ok", "versions": ["r99", "r156"]} listspiders.json ---------------- Get the list of spiders available in the last version of some project. * Supported Request Methods: ``GET`` * Parameters: * ``project`` (string, required) - the project name Example request:: $ curl http://localhost:6800/listspiders.json?project=myproject Example response:: {"status": "ok", "spiders": ["spider1", "spider2", "spider3"]} delversion.json --------------- Delete a project version. If there are no more versions available for a given project, that project will be deleted too. * Supported Request Methods: ``POST`` * Parameters: * ``project`` (string, required) - the project name * ``version`` (string, required) - the project version Example request:: $ curl http://localhost:6800/delversion.json -d project=myproject -d version=r99 Example response:: {"status": "ok"} delproject.json --------------- Delete a project and all its uploaded versions. * Supported Request Methods: ``POST`` * Parameters: * ``project`` (string, required) - the project name Example request:: $ curl http://localhost:6800/delproject.json -d project=myproject Example response:: {"status": "ok"} .. _Python egg: http://peak.telecommunity.com/DevCenter/PythonEggs .. _setup.py: http://docs.python.org/distutils/setupscript.html .. _curl: http://en.wikipedia.org/wiki/CURL .. _setuptools: http://pypi.python.org/pypi/setuptools .. _pkgutil.get_data(): http://docs.python.org/library/pkgutil.html#pkgutil.get_data .. _tempfile: http://docs.python.org/library/tempfile.html .. _Twisted Application Framework: http://twistedmatrix.com/documents/current/core/howto/application.html .. _distutils: http://docs.python.org/library/distutils.html Scrapy-0.14.4/docs/topics/items.rst0000600000016101777760000001463311754531743017256 0ustar buildbotnogroup.. _topics-items: ===== Items ===== .. module:: scrapy.item :synopsis: Item and Field classes The main goal in scraping is to extract structured data from unstructured sources, typically, web pages. Scrapy provides the :class:`Item` class for this purpose. :class:`Item` objects are simple containers used to collect the scraped data. They provide a `dictionary-like`_ API with a convenient syntax for declaring their available fields. .. _dictionary-like: http://docs.python.org/library/stdtypes.html#dict .. _topics-items-declaring: Declaring Items =============== Items are declared using a simple class definition syntax and :class:`Field` objects. Here is an example:: from scrapy.item import Item, Field class Product(Item): name = Field() price = Field() stock = Field() last_updated = Field(serializer=str) .. note:: Those familiar with `Django`_ will notice that Scrapy Items are declared similar to `Django Models`_, except that Scrapy Items are much simpler as there is no concept of different field types. .. _Django: http://www.djangoproject.com/ .. _Django Models: http://docs.djangoproject.com/en/dev/topics/db/models/ .. _topics-items-fields: Item Fields =========== :class:`Field` objects are used to specify metadata for each field. For example, the serializer function for the ``last_updated`` field illustrated in the example above. You can specify any kind of metadata for each field. There is no restriction on the values accepted by :class:`Field` objects. For this same reason, there isn't a reference list of all available metadata keys. Each key defined in :class:`Field` objects could be used by a different components, and only those components know about it. You can also define and use any other :class:`Field` key in your project too, for your own needs. The main goal of :class:`Field` objects is to provide a way to define all field metadata in one place. Typically, those components whose behaviour depends on each field use certain field keys to configure that behaviour. You must refer to their documentation to see which metadata keys are used by each component. It's important to note that the :class:`Field` objects used to declare the item do not stay assigned as class attributes. Instead, they can be accesed through the :attr:`Item.fields` attribute. And that's all you need to know about declaring items. Working with Items ================== Here are some examples of common tasks performed with items, using the ``Product`` item :ref:`declared above `. You will notice the API is very similar to the `dict API`_. Creating items -------------- :: >>> product = Product(name='Desktop PC', price=1000) >>> print product Product(name='Desktop PC', price=1000) Getting field values -------------------- :: >>> product['name'] Desktop PC >>> product.get('name') Desktop PC >>> product['price'] 1000 >>> product['last_updated'] Traceback (most recent call last): ... KeyError: 'last_updated' >>> product.get('last_updated', 'not set') not set >>> product['lala'] # getting unknown field Traceback (most recent call last): ... KeyError: 'lala' >>> product.get('lala', 'unknown field') 'unknown field' >>> 'name' in product # is name field populated? True >>> 'last_updated' in product # is last_updated populated? False >>> 'last_updated' in product.fields # is last_updated a declared field? True >>> 'lala' in product.fields # is lala a declared field? False Setting field values -------------------- :: >>> product['last_updated'] = 'today' >>> product['last_updated'] today >>> product['lala'] = 'test' # setting unknown field Traceback (most recent call last): ... KeyError: 'Product does not support field: lala' Accesing all populated values ----------------------------- To access all populated values, just use the typical `dict API`_:: >>> product.keys() ['price', 'name'] >>> product.items() [('price', 1000), ('name', 'Desktop PC')] Other common tasks ------------------ Copying items:: >>> product2 = Product(product) >>> print product2 Product(name='Desktop PC', price=1000) Creating dicts from items:: >>> dict(product) # create a dict from all populated values {'price': 1000, 'name': 'Desktop PC'} Creating items from dicts:: >>> Product({'name': 'Laptop PC', 'price': 1500}) Product(price=1500, name='Laptop PC') >>> Product({'name': 'Laptop PC', 'lala': 1500}) # warning: unknown field in dict Traceback (most recent call last): ... KeyError: 'Product does not support field: lala' Extending Items =============== You can extend Items (to add more fields or to change some metadata for some fields) by declaring a subclass of your original Item. For example:: class DiscountedProduct(Product): discount_percent = Field(serializer=str) discount_expiration_date = Field() You can also extend field metadata by using the previous field metadata and appending more values, or changing existing values, like this:: class SpecificProduct(Product): name = Field(Product.fields['name'], serializer=my_serializer) That adds (or replaces) the ``serializer`` metadata key for the ``name`` field, keeping all the previously existing metadata values. Item objects ============ .. class:: Item([arg]) Return a new Item optionally initialized from the given argument. Items replicate the standard `dict API`_, including its constructor. The only additional attribute provided by Items is: .. attribute:: fields A dictionary containing *all declared fields* for this Item, not only those populated. The keys are the field names and the values are the :class:`Field` objects used in the :ref:`Item declaration `. .. _dict API: http://docs.python.org/library/stdtypes.html#dict Field objects ============= .. class:: Field([arg]) The :class:`Field` class is just an alias to the built-in `dict`_ class and doesn't provide any extra functionality or attributes. In other words, :class:`Field` objects are plain-old Python dicts. A separate class is used to support the :ref:`item declaration syntax ` based on class attributes. .. _dict: http://docs.python.org/library/stdtypes.html#dict Scrapy-0.14.4/docs/topics/architecture.rst0000600000016101777760000001070211754531743020610 0ustar buildbotnogroup.. _topics-architecture: ===================== Architecture overview ===================== This document describes the architecture of Scrapy and how its components interact. Overview ======== The following diagram shows an overview of the Scrapy architecture with its components and an outline of the data flow that takes place inside the system (shown by the green arrows). A brief description of the components is included below with links for more detailed information about them. The data flow is also described below. .. image:: _images/scrapy_architecture.png :width: 700 :height: 494 :alt: Scrapy architecture Components ========== Scrapy Engine ------------- The engine is responsible for controlling the data flow between all components of the system, and triggering events when certain actions occur. See the Data Flow section below for more details. Scheduler --------- The Scheduler receives requests from the engine and enqueues them for feeding them later (also to the engine) when the engine requests them. Downloader ---------- The Downloader is responsible for fetching web pages and feeding them to the engine which, in turn, feeds them to the spiders. Spiders ------- Spiders are custom classes written by Scrapy users to parse responses and extract items (aka scraped items) from them or additional URLs (requests) to follow. Each spider is able to handle a specific domain (or group of domains). For more information see :ref:`topics-spiders`. Item Pipeline ------------- The Item Pipeline is responsible for processing the items once they have been extracted (or scraped) by the spiders. Typical tasks include cleansing, validation and persistence (like storing the item in a database). For more information see :ref:`topics-item-pipeline`. Downloader middlewares ---------------------- Downloader middlewares are specific hooks that sit between the Engine and the Downloader and process requests when they pass from the Engine to the Downloader, and responses that pass from Downloader to the Engine. They provide a convenient mechanism for extending Scrapy functionality by plugging custom code. For more information see :ref:`topics-downloader-middleware`. Spider middlewares ------------------ Spider middlewares are specific hooks that sit between the Engine and the Spiders and are able to process spider input (responses) and output (items and requests). They provide a convenient mechanism for extending Scrapy functionality by plugging custom code. For more information see :ref:`topics-spider-middleware`. Data flow ========= The data flow in Scrapy is controlled by the Engine, and goes like this: 1. The Engine opens a domain, locates the Spider that handles that domain, and asks the spider for the first URLs to crawl. 2. The Engine gets the first URLs to crawl from the Spider and schedules them in the Scheduler, as Requests. 3. The Engine asks the Scheduler for the next URLs to crawl. 4. The Scheduler returns the next URLs to crawl to the Engine and the Engine sends them to the Downloader, passing through the Downloader Middleware (request direction). 5. Once the page finishes downloading the Downloader generates a Response (with that page) and sends it to the Engine, passing through the Downloader Middleware (response direction). 6. The Engine receives the Response from the Downloader and sends it to the Spider for processing, passing through the Spider Middleware (input direction). 7. The Spider processes the Response and returns scraped Items and new Requests (to follow) to the Engine. 8. The Engine sends scraped Items (returned by the Spider) to the Item Pipeline and Requests (returned by spider) to the Scheduler 9. The process repeats (from step 2) until there are no more requests from the Scheduler, and the Engine closes the domain. Event-driven networking ======================= Scrapy is written with `Twisted`_, a popular event-driven networking framework for Python. Thus, it's implemented using a non-blocking (aka asynchronous) code for concurrency. For more information about asynchronous programming and Twisted see these links: * `Asynchronous Programming with Twisted`_ * `Twisted - hello, asynchronous programming`_ .. _Twisted: http://twistedmatrix.com/trac/ .. _Asynchronous Programming with Twisted: http://twistedmatrix.com/projects/core/documentation/howto/async.html .. _Twisted - hello, asynchronous programming: http://jessenoller.com/2009/02/11/twisted-hello-asynchronous-programming/ Scrapy-0.14.4/docs/topics/settings.rst0000600000016101777760000006125311754531743017775 0ustar buildbotnogroup.. _topics-settings: ======== Settings ======== .. module:: scrapy.conf :synopsis: Settings manager The Scrapy settings allows you to customize the behaviour of all Scrapy components, including the core, extensions, pipelines and spiders themselves. The infrastructure of the settings provides a global namespace of key-value mappings that the code can use to pull configuration values from. The settings can be populated through different mechanisms, which are described below. The settings are also the mechanism for selecting the currently active Scrapy project (in case you have many). For a list of available built-in settings see: :ref:`topics-settings-ref`. Designating the settings ======================== When you use Scrapy, you have to tell it which settings you're using. You can do this by using an environment variable, ``SCRAPY_SETTINGS_MODULE``. The value of ``SCRAPY_SETTINGS_MODULE`` should be in Python path syntax, e.g. ``myproject.settings``. Note that the settings module should be on the Python `import search path`_. .. _import search path: http://diveintopython.org/getting_to_know_python/everything_is_an_object.html Populating the settings ======================= Settings can be populated using different mechanisms, each of which having a different precedence. Here is the list of them in decreasing order of precedence: 1. Global overrides (most precedence) 2. Project settings module 3. Default settings per-command 4. Default global settings (less precedence) These mechanisms are described in more detail below. 1. Global overrides ------------------- Global overrides are the ones that take most precedence, and are usually populated by command-line options. Example:: >>> from scrapy.conf import settings >>> settings.overrides['LOG_ENABLED'] = True You can also override one (or more) settings from command line using the ``-s`` (or ``--set``) command line option. .. highlight:: sh Example:: scrapy crawl domain.com -s LOG_FILE=scrapy.log 2. Project settings module -------------------------- The project settings module is the standard configuration file for your Scrapy project. It's where most of your custom settings will be populated. For example:: ``myproject.settings``. 3. Default settings per-command ------------------------------- Each :doc:`Scrapy tool ` command can have its own default settings, which override the global default settings. Those custom command settings are specified in the ``default_settings`` attribute of the command class. 4. Default global settings -------------------------- The global defaults are located in the ``scrapy.settings.default_settings`` module and documented in the :ref:`topics-settings-ref` section. How to access settings ====================== .. highlight:: python Here's an example of the simplest way to access settings from Python code:: >>> from scrapy.conf import settings >>> print settings['LOG_ENABLED'] True In other words, settings can be accesed like a dict, but it's usually preferred to extract the setting in the format you need it to avoid type errors. In order to do that you'll have to use one of the following methods: .. class:: Settings() There is a (singleton) Settings object automatically instantiated when the :mod:`scrapy.conf` module is loaded, and it's usually accessed like this:: >>> from scrapy.conf import settings .. method:: get(name, default=None) Get a setting value without affecting its original type. :param name: the setting name :type name: string :param default: the value to return if no setting is found :type default: any .. method:: getbool(name, default=False) Get a setting value as a boolean. For example, both ``1`` and ``'1'``, and ``True`` return ``True``, while ``0``, ``'0'``, ``False`` and ``None`` return ``False```` For example, settings populated through environment variables set to ``'0'`` will return ``False`` when using this method. :param name: the setting name :type name: string :param default: the value to return if no setting is found :type default: any .. method:: getint(name, default=0) Get a setting value as an int :param name: the setting name :type name: string :param default: the value to return if no setting is found :type default: any .. method:: getfloat(name, default=0.0) Get a setting value as a float :param name: the setting name :type name: string :param default: the value to return if no setting is found :type default: any .. method:: getlist(name, default=None) Get a setting value as a list. If the setting original type is a list it will be returned verbatim. If it's a string it will be split by ",". For example, settings populated through environment variables set to ``'one,two'`` will return a list ['one', 'two'] when using this method. :param name: the setting name :type name: string :param default: the value to return if no setting is found :type default: any Rationale for setting names =========================== Setting names are usually prefixed with the component that they configure. For example, proper setting names for a fictional robots.txt extension would be ``ROBOTSTXT_ENABLED``, ``ROBOTSTXT_OBEY``, ``ROBOTSTXT_CACHEDIR``, etc. .. _topics-settings-ref: Built-in settings reference =========================== Here's a list of all available Scrapy settings, in alphabetical order, along with their default values and the scope where they apply. The scope, where available, shows where the setting is being used, if it's tied to any particular component. In that case the module of that component will be shown, typically an extension, middleware or pipeline. It also means that the component must be enabled in order for the setting to have any effect. .. setting:: AWS_ACCESS_KEY_ID AWS_ACCESS_KEY_ID ----------------- Default: ``None`` The AWS access key used by code that requires access to `Amazon Web services`_, such as the :ref:`S3 feed storage backend `. .. setting:: AWS_SECRET_ACCESS_KEY AWS_SECRET_ACCESS_KEY --------------------- Default: ``None`` The AWS secret key used by code that requires access to `Amazon Web services`_, such as the :ref:`S3 feed storage backend `. .. setting:: BOT_NAME BOT_NAME -------- Default: ``'scrapybot'`` The name of the bot implemented by this Scrapy project (also known as the project name). This will be used to construct the User-Agent by default, and also for logging. It's automatically populated with your project name when you create your project with the :command:`startproject` command. .. setting:: BOT_VERSION BOT_VERSION ----------- Default: ``1.0`` The version of the bot implemented by this Scrapy project. This will be used to construct the User-Agent by default. .. setting:: CONCURRENT_ITEMS CONCURRENT_ITEMS ---------------- Default: ``100`` Maximum number of concurrent items (per response) to process in parallel in the Item Processor (also known as the :ref:`Item Pipeline `). .. setting:: CONCURRENT_REQUESTS CONCURRENT_REQUESTS ------------------- Default: ``16`` The maximum number of concurrent (ie. simultaneous) requests that will be performed by the Scrapy downloader. .. setting:: CONCURRENT_REQUESTS_PER_DOMAIN CONCURRENT_REQUESTS_PER_DOMAIN ------------------------------ Default: ``8`` The maximum number of concurrent (ie. simultaneous) requests that will be performed to any single domain. .. setting:: CONCURRENT_REQUESTS_PER_IP CONCURRENT_REQUESTS_PER_IP -------------------------- Default: ``0`` The maximum number of concurrent (ie. simultaneous) requests that will be performed to any single IP. If non-zero, the :setting:`CONCURRENT_REQUESTS_PER_DOMAIN` setting is ignored, and this one is used instead. In other words, concurrency limits will be applied per IP, not per domain. .. setting:: DEFAULT_ITEM_CLASS DEFAULT_ITEM_CLASS ------------------ Default: ``'scrapy.item.Item'`` The default class that will be used for instantiating items in the :ref:`the Scrapy shell `. .. setting:: DEFAULT_REQUEST_HEADERS DEFAULT_REQUEST_HEADERS ----------------------- Default:: { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en', } The default headers used for Scrapy HTTP Requests. They're populated in the :class:`~scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware`. .. setting:: DEFAULT_RESPONSE_ENCODING DEFAULT_RESPONSE_ENCODING ------------------------- Default: ``'ascii'`` The default encoding to use for :class:`~scrapy.http.TextResponse` objects (and subclasses) when no encoding is declared and no encoding could be inferred from the body. .. setting:: DEPTH_LIMIT DEPTH_LIMIT ----------- Default: ``0`` The maximum depth that will be allowed to crawl for any site. If zero, no limit will be imposed. .. setting:: DEPTH_PRIORITY DEPTH_PRIORITY -------------- Default: ``0`` An integer that is used to adjust the request priority based on its depth. If zero, no priority adjustment is made from depth. .. setting:: DEPTH_STATS DEPTH_STATS ----------- Default: ``True`` Whether to collect maximum depth stats. .. setting:: DEPTH_STATS_VERBOSE DEPTH_STATS_VERBOSE ------------------- Default: ``False`` Whether to collect verbose depth stats. If this is enabled, the number of requests for each depth is collected in the stats. .. setting:: DNSCACHE_ENABLED DNSCACHE_ENABLED ---------------- Default: ``True`` Whether to enable DNS in-memory cache. .. setting:: DOWNLOADER_DEBUG DOWNLOADER_DEBUG ---------------- Default: ``False`` Whether to enable the Downloader debugging mode. .. setting:: DOWNLOADER_MIDDLEWARES DOWNLOADER_MIDDLEWARES ---------------------- Default:: ``{}`` A dict containing the downloader middlewares enabled in your project, and their orders. For more info see :ref:`topics-downloader-middleware-setting`. .. setting:: DOWNLOADER_MIDDLEWARES_BASE DOWNLOADER_MIDDLEWARES_BASE --------------------------- Default:: { 'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100, 'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300, 'scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware': 350, 'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400, 'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500, 'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550, 'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600, 'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700, 'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750, 'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 800, 'scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware': 830, 'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850, 'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900, } A dict containing the downloader middlewares enabled by default in Scrapy. You should never modify this setting in your project, modify :setting:`DOWNLOADER_MIDDLEWARES` instead. For more info see :ref:`topics-downloader-middleware-setting`. .. setting:: DOWNLOADER_STATS DOWNLOADER_STATS ---------------- Default: ``True`` Whether to enable downloader stats collection. .. setting:: DOWNLOAD_DELAY DOWNLOAD_DELAY -------------- Default: ``0`` The amount of time (in secs) that the downloader should wait before downloading consecutive pages from the same spider. This can be used to throttle the crawling speed to avoid hitting servers too hard. Decimal numbers are supported. Example:: DOWNLOAD_DELAY = 0.25 # 250 ms of delay This setting is also affected by the :setting:`RANDOMIZE_DOWNLOAD_DELAY` setting (which is enabled by default). By default, Scrapy doesn't wait a fixed amount of time between requests, but uses a random interval between 0.5 and 1.5 * :setting:`DOWNLOAD_DELAY`. You can also change this setting per spider. .. setting:: DOWNLOAD_HANDLERS DOWNLOAD_HANDLERS ----------------- Default: ``{}`` A dict containing the request downloader handlers enabled in your project. See `DOWNLOAD_HANDLERS_BASE` for example format. .. setting:: DOWNLOAD_HANDLERS_BASE DOWNLOAD_HANDLERS_BASE ---------------------- Default:: { 'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler', 'http': 'scrapy.core.downloader.handlers.http.HttpDownloadHandler', 'https': 'scrapy.core.downloader.handlers.http.HttpDownloadHandler', 's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler', } A dict containing the request download handlers enabled by default in Scrapy. You should never modify this setting in your project, modify :setting:`DOWNLOAD_HANDLERS` instead. .. setting:: DOWNLOAD_TIMEOUT DOWNLOAD_TIMEOUT ---------------- Default: ``180`` The amount of time (in secs) that the downloader will wait before timing out. .. setting:: DUPEFILTER_CLASS DUPEFILTER_CLASS ---------------- Default: ``'scrapy.dupefilter.RFPDupeFilter'`` The class used to detect and filter duplicate requests. The default (``RFPDupeFilter``) filters based on request fingerprint using the ``scrapy.utils.request.request_fingerprint`` function. .. setting:: EDITOR EDITOR ------ The editor to use for editing spiders with the :command:`edit` command. It defaults to the ``EDITOR`` environment variable, if set. Otherwise, it defaults to ``vi`` (on Unix systems) or the IDLE editor (on Windows). .. setting:: ENCODING_ALIASES ENCODING_ALIASES ---------------- Default: ``{}`` A mapping of custom encoding aliases for your project, where the keys are the aliases (and must be lower case) and the values are the encodings they map to. This setting extends the :setting:`ENCODING_ALIASES_BASE` setting which contains some default mappings. .. setting:: ENCODING_ALIASES_BASE ENCODING_ALIASES_BASE --------------------- Default:: { # gb2312 is superseded by gb18030 'gb2312': 'gb18030', 'chinese': 'gb18030', 'csiso58gb231280': 'gb18030', 'euc- cn': 'gb18030', 'euccn': 'gb18030', 'eucgb2312-cn': 'gb18030', 'gb2312-1980': 'gb18030', 'gb2312-80': 'gb18030', 'iso- ir-58': 'gb18030', # gbk is superseded by gb18030 'gbk': 'gb18030', '936': 'gb18030', 'cp936': 'gb18030', 'ms936': 'gb18030', # latin_1 is a subset of cp1252 'latin_1': 'cp1252', 'iso-8859-1': 'cp1252', 'iso8859-1': 'cp1252', '8859': 'cp1252', 'cp819': 'cp1252', 'latin': 'cp1252', 'latin1': 'cp1252', 'l1': 'cp1252', # others 'zh-cn': 'gb18030', 'win-1251': 'cp1251', 'macintosh' : 'mac_roman', 'x-sjis': 'shift_jis', } The default encoding aliases defined in Scrapy. Don't override this setting in your project, override :setting:`ENCODING_ALIASES` instead. The reason why `ISO-8859-1`_ (and all its aliases) are mapped to `CP1252`_ is due to a well known browser hack. For more information see: `Character encodings in HTML`_. .. _ISO-8859-1: http://en.wikipedia.org/wiki/ISO/IEC_8859-1 .. _CP1252: http://en.wikipedia.org/wiki/Windows-1252 .. _Character encodings in HTML: http://en.wikipedia.org/wiki/Character_encodings_in_HTML .. setting:: EXTENSIONS EXTENSIONS ---------- Default:: ``{}`` A dict containing the extensions enabled in your project, and their orders. .. setting:: EXTENSIONS_BASE EXTENSIONS_BASE --------------- Default:: { 'scrapy.contrib.corestats.CoreStats': 0, 'scrapy.webservice.WebService': 0, 'scrapy.telnet.TelnetConsole': 0, 'scrapy.contrib.memusage.MemoryUsage': 0, 'scrapy.contrib.memdebug.MemoryDebugger': 0, 'scrapy.contrib.closespider.CloseSpider': 0, 'scrapy.contrib.feedexport.FeedExporter': 0, 'scrapy.contrib.spidercontext.SpiderContext': 0, 'scrapy.contrib.logstats.LogStats': 0, 'scrapy.contrib.spiderstate.SpiderState': 0, } The list of available extensions. Keep in mind that some of them need to be enabled through a setting. By default, this setting contains all stable built-in extensions. For more information See the :ref:`extensions user guide ` and the :ref:`list of available extensions `. .. setting:: ITEM_PIPELINES ITEM_PIPELINES -------------- Default: ``[]`` The item pipelines to use (a list of classes). Example:: ITEM_PIPELINES = [ 'mybot.pipeline.validate.ValidateMyItem', 'mybot.pipeline.validate.StoreMyItem' ] .. setting:: LOG_ENABLED LOG_ENABLED ----------- Default: ``True`` Whether to enable logging. .. setting:: LOG_ENCODING LOG_ENCODING ------------ Default: ``'utf-8'`` The encoding to use for logging. .. setting:: LOG_FILE LOG_FILE -------- Default: ``None`` File name to use for logging output. If None, standard error will be used. .. setting:: LOG_LEVEL LOG_LEVEL --------- Default: ``'DEBUG'`` Minimum level to log. Available levels are: CRITICAL, ERROR, WARNING, INFO, DEBUG. For more info see :ref:`topics-logging`. .. setting:: LOG_STDOUT LOG_STDOUT ---------- Default: ``False`` If ``True``, all standard output (and error) of your process will be redirected to the log. For example if you ``print 'hello'`` it will appear in the Scrapy log. .. setting:: MEMDEBUG_ENABLED MEMDEBUG_ENABLED ---------------- Default: ``False`` Whether to enable memory debugging. .. setting:: MEMDEBUG_NOTIFY MEMDEBUG_NOTIFY --------------- Default: ``[]`` When memory debugging is enabled a memory report will be sent to the specified addresses if this setting is not empty, otherwise the report will be written to the log. Example:: MEMDEBUG_NOTIFY = ['user@example.com'] .. setting:: MEMUSAGE_ENABLED MEMUSAGE_ENABLED ---------------- Default: ``False`` Scope: ``scrapy.contrib.memusage`` Whether to enable the memory usage extension that will shutdown the Scrapy process when it exceeds a memory limit, and also notify by email when that happened. See :ref:`topics-extensions-ref-memusage`. .. setting:: MEMUSAGE_LIMIT_MB MEMUSAGE_LIMIT_MB ----------------- Default: ``0`` Scope: ``scrapy.contrib.memusage`` The maximum amount of memory to allow (in megabytes) before shutting down Scrapy (if MEMUSAGE_ENABLED is True). If zero, no check will be performed. See :ref:`topics-extensions-ref-memusage`. .. setting:: MEMUSAGE_NOTIFY_MAIL MEMUSAGE_NOTIFY_MAIL -------------------- Default: ``False`` Scope: ``scrapy.contrib.memusage`` A list of emails to notify if the memory limit has been reached. Example:: MEMUSAGE_NOTIFY_MAIL = ['user@example.com'] See :ref:`topics-extensions-ref-memusage`. .. setting:: MEMUSAGE_REPORT MEMUSAGE_REPORT --------------- Default: ``False`` Scope: ``scrapy.contrib.memusage`` Whether to send a memory usage report after each spider has been closed. See :ref:`topics-extensions-ref-memusage`. .. setting:: MEMUSAGE_WARNING_MB MEMUSAGE_WARNING_MB ------------------- Default: ``0`` Scope: ``scrapy.contrib.memusage`` The maximum amount of memory to allow (in megabytes) before sending a warning email notifying about it. If zero, no warning will be produced. .. setting:: NEWSPIDER_MODULE NEWSPIDER_MODULE ---------------- Default: ``''`` Module where to create new spiders using the :command:`genspider` command. Example:: NEWSPIDER_MODULE = 'mybot.spiders_dev' .. setting:: RANDOMIZE_DOWNLOAD_DELAY RANDOMIZE_DOWNLOAD_DELAY ------------------------ Default: ``True`` If enabled, Scrapy will wait a random amount of time (between 0.5 and 1.5 * :setting:`DOWNLOAD_DELAY`) while fetching requests from the same spider. This randomization decreases the chance of the crawler being detected (and subsequently blocked) by sites which analyze requests looking for statistically significant similarities in the time between their requests. The randomization policy is the same used by `wget`_ ``--random-wait`` option. If :setting:`DOWNLOAD_DELAY` is zero (default) this option has no effect. .. _wget: http://www.gnu.org/software/wget/manual/wget.html .. setting:: REDIRECT_MAX_TIMES REDIRECT_MAX_TIMES ------------------ Default: ``20`` Defines the maximun times a request can be redirected. After this maximun the request's response is returned as is. We used Firefox default value for the same task. .. setting:: REDIRECT_MAX_METAREFRESH_DELAY REDIRECT_MAX_METAREFRESH_DELAY ------------------------------ Default: ``100`` Some sites use meta-refresh for redirecting to a session expired page, so we restrict automatic redirection to a maximum delay (in seconds) .. setting:: REDIRECT_PRIORITY_ADJUST REDIRECT_PRIORITY_ADJUST ------------------------ Default: ``+2`` Adjust redirect request priority relative to original request. A negative priority adjust means more priority. .. setting:: ROBOTSTXT_OBEY ROBOTSTXT_OBEY -------------- Default: ``False`` Scope: ``scrapy.contrib.downloadermiddleware.robotstxt`` If enabled, Scrapy will respect robots.txt policies. For more information see :ref:`topics-dlmw-robots` .. setting:: SCHEDULER SCHEDULER --------- Default: ``'scrapy.core.scheduler.Scheduler'`` The scheduler to use for crawling. .. setting:: SPIDER_MIDDLEWARES SPIDER_MIDDLEWARES ------------------ Default:: ``{}`` A dict containing the spider middlewares enabled in your project, and their orders. For more info see :ref:`topics-spider-middleware-setting`. .. setting:: SPIDER_MIDDLEWARES_BASE SPIDER_MIDDLEWARES_BASE ----------------------- Default:: { 'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': 50, 'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500, 'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': 700, 'scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware': 800, 'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': 900, } A dict containing the spider middlewares enabled by default in Scrapy. You should never modify this setting in your project, modify :setting:`SPIDER_MIDDLEWARES` instead. For more info see :ref:`topics-spider-middleware-setting`. .. setting:: SPIDER_MODULES SPIDER_MODULES -------------- Default: ``[]`` A list of modules where Scrapy will look for spiders. Example:: SPIDER_MODULES = ['mybot.spiders_prod', 'mybot.spiders_dev'] .. setting:: STATS_CLASS STATS_CLASS ----------- Default: ``'scrapy.statscol.MemoryStatsCollector'`` The class to use for collecting stats (must implement the Stats Collector API, or subclass the StatsCollector class). .. setting:: STATS_DUMP STATS_DUMP ---------- Default: ``True`` Dump (to the Scrapy log) the :ref:`Scrapy stats ` collected during the crawl. The spider-specific stats are logged when the spider is closed, while the global stats are dumped when the Scrapy process finishes. For more info see: :ref:`topics-stats`. .. setting:: STATS_ENABLED STATS_ENABLED ------------- Default: ``True`` Enable stats collection. .. setting:: STATSMAILER_RCPTS STATSMAILER_RCPTS ----------------- Default: ``[]`` (empty list) Send Scrapy stats after spiders finish scraping. See :class:`~scrapy.contrib.statsmailer.StatsMailer` for more info. .. setting:: TELNETCONSOLE_ENABLED TELNETCONSOLE_ENABLED --------------------- Default: ``True`` A boolean which specifies if the :ref:`telnet console ` will be enabled (provided its extension is also enabled). .. setting:: TELNETCONSOLE_PORT TELNETCONSOLE_PORT ------------------ Default: ``[6023, 6073]`` The port range to use for the telnet console. If set to ``None`` or ``0``, a dynamically assigned port is used. For more info see :ref:`topics-telnetconsole`. .. setting:: TEMPLATES_DIR TEMPLATES_DIR ------------- Default: ``templates`` dir inside scrapy module The directory where to look for templates when creating new projects with :command:`startproject` command. .. setting:: URLLENGTH_LIMIT URLLENGTH_LIMIT --------------- Default: ``2083`` Scope: ``contrib.spidermiddleware.urllength`` The maximum URL length to allow for crawled URLs. For more information about the default value for this setting see: http://www.boutell.com/newfaq/misc/urllength.html .. setting:: USER_AGENT USER_AGENT ---------- Default: ``"%s/%s" % (BOT_NAME, BOT_VERSION)`` The default User-Agent to use when crawling, unless overridden. .. _Amazon web services: http://aws.amazon.com/ .. _breadth-first order: http://en.wikipedia.org/wiki/Breadth-first_search .. _depth-first order: http://en.wikipedia.org/wiki/Depth-first_search Scrapy-0.14.4/docs/topics/logging.rst0000600000016101777760000000701211754531743017554 0ustar buildbotnogroup.. _topics-logging: ======= Logging ======= Scrapy provides a logging facility which can be used through the :mod:`scrapy.log` module. The current underlying implementation uses `Twisted logging`_ but this may change in the future. .. _Twisted logging: http://twistedmatrix.com/projects/core/documentation/howto/logging.html The logging service must be explicitly started through the :func:`scrapy.log.start` function. .. _topics-logging-levels: Log levels ========== Scrapy provides 5 logging levels: 1. :data:`~scrapy.log.CRITICAL` - for critical errors 2. :data:`~scrapy.log.ERROR` - for regular errors 3. :data:`~scrapy.log.WARNING` - for warning messages 4. :data:`~scrapy.log.INFO` - for informational messages 5. :data:`~scrapy.log.DEBUG` - for debugging messages How to set the log level ======================== You can set the log level using the `--loglevel/-L` command line option, or using the :setting:`LOG_LEVEL` setting. How to log messages =================== Here's a quick example of how to log a message using the ``WARNING`` level:: from scrapy import log log.msg("This is a warning", level=log.WARNING) Logging from Spiders ==================== The recommended way to log from spiders is by using the Spider :meth:`~scrapy.spider.BaseSpider.log` method, which already populates the ``spider`` argument of the :func:`scrapy.log.msg` function. The other arguments are passed directly to the :func:`~scrapy.log.msg` function. scrapy.log module ================= .. module:: scrapy.log :synopsis: Logging facility .. attribute:: started A boolean which is ``True`` if logging has been started or ``False`` otherwise. .. function:: start(logfile=None, loglevel=None, logstdout=None) Start the logging facility. This must be called before actually logging any messages. Otherwise, messages logged before this call will get lost. :param logfile: the file path to use for logging output. If omitted, the :setting:`LOG_FILE` setting will be used. If both are ``None``, the log will be sent to standard error. :type logfile: str :param loglevel: the minimum logging level to log. Availables values are: :data:`CRITICAL`, :data:`ERROR`, :data:`WARNING`, :data:`INFO` and :data:`DEBUG`. :param logstdout: if ``True``, all standard output (and error) of your application will be logged instead. For example if you "print 'hello'" it will appear in the Scrapy log. If ommited, the :setting:`LOG_STDOUT` setting will be used. :type logstdout: boolean .. function:: msg(message, level=INFO, spider=None) Log a message :param message: the message to log :type message: str :param level: the log level for this message. See :ref:`topics-logging-levels`. :param spider: the spider to use for logging this message. This parameter should always be used when logging things related to a particular spider. :type spider: :class:`~scrapy.spider.BaseSpider` object .. data:: CRITICAL Log level for critical errors .. data:: ERROR Log level for errors .. data:: WARNING Log level for warnings .. data:: INFO Log level for informational messages (recommended level for production deployments) .. data:: DEBUG Log level for debugging messages (recommended level for development) Logging settings ================ These settings can be used to configure the logging: * :setting:`LOG_ENABLED` * :setting:`LOG_ENCODING` * :setting:`LOG_FILE` * :setting:`LOG_LEVEL` * :setting:`LOG_STDOUT` Scrapy-0.14.4/docs/topics/shell.rst0000600000016101777760000001762111754531743017244 0ustar buildbotnogroup.. _topics-shell: ============ Scrapy shell ============ The Scrapy shell is an interactive shell where you can try and debug your scraping code very quickly, without having to run the spider. It's meant to be used for testing data extraction code, but you can actually use it for testing any kind of code as it is also a regular Python shell. The shell is used for testing XPath expressions and see how they work and what data they extract from the web pages you're trying to scrape. It allows you to interactively test your XPaths while you're writing your spider, without having to run the spider to test every change. Once you get familiarized with the Scrapy shell, you'll see that it's an invaluable tool for developing and debugging your spiders. If you have `IPython`_ installed, the Scrapy shell will use it (instead of the standard Python console). The `IPython`_ console is much more powerful and provides smart auto-completion and colorized output, among other things. We highly recommend you install `IPython`_, specially if you're working on Unix systems (where `IPython`_ excels). See the `IPython installation guide`_ for more info. .. _IPython: http://ipython.scipy.org/ .. _IPython installation guide: http://ipython.scipy.org/doc/rel-0.9.1/html/install/index.html Launch the shell ================ To launch the Scrapy shell you can use the :command:`shell` command like this:: scrapy shell Where the ```` is the URL you want to scrape. Using the shell =============== The Scrapy shell is just a regular Python console (or `IPython` console if you have it available) which provides some additional shortcut functions for convenience. Available Shortcuts ------------------- * ``shelp()`` - print a help with the list of available objects and shortcuts * ``fetch(request_or_url)`` - fetch a new response from the given request or URL and update all related objects accordingly. * ``view(response)`` - open the given response in your local web browser, for inspection. This will add a `\ tag`_ to the response body in order for external links (such as images and style sheets) to display properly. Note, however,that this will create a temporary file in your computer, which won't be removed automatically. .. _ tag: http://www.w3schools.com/TAGS/tag_base.asp Available Scrapy objects ------------------------- The Scrapy shell automatically creates some convenient objects from the downloaded page, like the :class:`~scrapy.http.Response` object and the :class:`~scrapy.selector.XPathSelector` objects (for both HTML and XML content). Those objects are: * ``spider`` - the Spider which is known to handle the URL, or a :class:`~scrapy.spider.BaseSpider` object if there is no spider found for the current URL * ``request`` - a :class:`~scrapy.http.Request` object of the last fetched page. You can modify this request using :meth:`~scrapy.http.Request.replace` or fetch a new request (without leaving the shell) using the ``fetch`` shortcut. * ``response`` - a :class:`~scrapy.http.Response` object containing the last fetched page * ``hxs`` - a :class:`~scrapy.selector.HtmlXPathSelector` object constructed with the last response fetched * ``xxs`` - a :class:`~scrapy.selector.XmlXPathSelector` object constructed with the last response fetched * ``settings`` - the current :ref:`Scrapy settings ` Example of shell session ======================== Here's an example of a typical shell session where we start by scraping the http://scrapy.org page, and then proceed to scrape the http://slashdot.org page. Finally, we modify the (Slashdot) request method to POST and re-fetch it getting a HTTP 405 (method not allowed) error. We end the session by typing Ctrl-D (in Unix systems) or Ctrl-Z in Windows. Keep in mind that the data extracted here may not be the same when you try it, as those pages are not static and could have changed by the time you test this. The only purpose of this example is to get you familiarized with how the Scrapy shell works. First, we launch the shell:: scrapy shell http://scrapy.org --nolog Then, the shell fetches the URL (using the Scrapy downloader) and prints the list of available objects and useful shortcuts (you'll notice that these lines all start with the ``[s]`` prefix):: [s] Available objects [s] hxs [s] item Item() [s] request [s] response [s] settings [s] spider [s] xxs [s] Useful shortcuts: [s] shelp() Prints this help. [s] fetch(req_or_url) Fetch a new request or URL and update objects [s] view(response) View response in a browser >>> After that, we can star playing with the objects:: >>> hxs.select("//h2/text()").extract()[0] u'Welcome to Scrapy' >>> fetch("http://slashdot.org") [s] Available Scrapy objects: [s] hxs [s] item JobItem() [s] request [s] response <200 http://slashdot.org> [s] settings [s] spider [s] xxs [s] Useful shortcuts: [s] shelp() Shell help (print this help) [s] fetch(req_or_url) Fetch request (or URL) and update local objects [s] view(response) View response in a browser >>> hxs.select("//h2/text()").extract() [u'News for nerds, stuff that matters'] >>> request = request.replace(method="POST") >>> fetch(request) 2009-04-03 00:57:39-0300 [default] ERROR: Downloading from : 405 Method Not Allowed >>> .. _topics-shell-inspect-response: Invoking the shell from spiders to inspect responses ==================================================== Sometimes you want to inspect the responses that are being processed in a certain point of your spider, if only to check that response you expect is getting there. This can be achieved by using the ``scrapy.shell.inspect_response`` function. Here's an example of how you would call it from your spider:: class MySpider(BaseSpider): ... def parse(self, response): if response.url == 'http://www.example.com/products.php': from scrapy.shell import inspect_response inspect_response(response) # ... your parsing code .. When you run the spider, you will get something similar to this:: 2009-08-27 19:15:25-0300 [example.com] DEBUG: Crawled (referer: ) 2009-08-27 19:15:26-0300 [example.com] DEBUG: Crawled (referer: ) [s] Available objects [s] hxs ... >>> response.url 'http://www.example.com/products.php' Then, you can check if the extraction code is working:: >>> hxs.select('//h1') [] Nope, it doesn't. So you can open the response in your web browser and see if it's the response you were expecting:: >>> view(response) >>> Finally you hit Ctrl-D (or Ctrl-Z in Windows) to exit the shell and resume the crawling:: >>> ^D 2009-08-27 19:15:25-0300 [example.com] DEBUG: Crawled (referer: ) 2009-08-27 19:15:25-0300 [example.com] DEBUG: Crawled (referer: ) # ... Note that you can't use the ``fetch`` shortcut here since the Scrapy engine is blocked by the shell. However, after you leave the shell, the spider will continue crawling where it stopped, as shown above. Scrapy-0.14.4/docs/topics/stats.rst0000600000016101777760000002270311754531743017270 0ustar buildbotnogroup.. _topics-stats: ================ Stats Collection ================ Overview ======== Scrapy provides a convenient service for collecting stats in the form of key/values, both globally and per spider. It's called the Stats Collector, and it's a singleton which can be imported and used quickly, as illustrated by the examples in the :ref:`topics-stats-usecases` section below. The stats collection is enabled by default but can be disabled through the :setting:`STATS_ENABLED` setting. However, the Stats Collector is always available, so you can always import it in your module and use its API (to increment or set new stat keys), regardless of whether the stats collection is enabled or not. If it's disabled, the API will still work but it won't collect anything. This is aimed at simplifying the stats collector usage: you should spend no more than one line of code for collecting stats in your spider, Scrapy extension, or whatever code you're using the Stats Collector from. Another feature of the Stats Collector is that it's very efficient (when enabled) and extremely efficient (almost unnoticeable) when disabled. The Stats Collector keeps one stats table per open spider and one global stats table. You can't set or get stats from a closed spider, but the spider-specific stats table is automatically opened when the spider is opened, and closed when the spider is closed. .. _topics-stats-usecases: Common Stats Collector uses =========================== Import the stats collector:: from scrapy.stats import stats Set global stat value:: stats.set_value('hostname', socket.gethostname()) Increment global stat value:: stats.inc_value('spiders_crawled') Set global stat value only if greater than previous:: stats.max_value('max_items_scraped', value) Set global stat value only if lower than previous:: stats.min_value('min_free_memory_percent', value) Get global stat value:: >>> stats.get_value('spiders_crawled') 8 Get all global stats (ie. not particular to any spider):: >>> stats.get_stats() {'hostname': 'localhost', 'spiders_crawled': 8} Set spider specific stat value (spider stats must be opened first, but this task is handled automatically by the Scrapy engine):: stats.set_value('start_time', datetime.now(), spider=some_spider) Where ``some_spider`` is a :class:`~scrapy.spider.BaseSpider` object. Increment spider-specific stat value:: stats.inc_value('pages_crawled', spider=some_spider) Set spider-specific stat value only if greater than previous:: stats.max_value('max_items_scraped', value, spider=some_spider) Set spider-specific stat value only if lower than previous:: stats.min_value('min_free_memory_percent', value, spider=some_spider) Get spider-specific stat value:: >>> stats.get_value('pages_crawled', spider=some_spider) 1238 Get all stats from a given spider:: >>> stats.get_stats('pages_crawled', spider=some_spider) {'pages_crawled': 1238, 'start_time': datetime.datetime(2009, 7, 14, 21, 47, 28, 977139)} .. _topics-stats-ref: Stats Collector API =================== There are several Stats Collectors available under the :mod:`scrapy.statscol` module and they all implement the Stats Collector API defined by the :class:`~scrapy.statscol.StatsCollector` class (which they all inherit from). .. module:: scrapy.statscol :synopsis: Basic Stats Collectors .. class:: StatsCollector .. method:: get_value(key, default=None, spider=None) Return the value for the given stats key or default if it doesn't exist. If spider is ``None`` the global stats table is consulted, otherwise the spider specific one is. If the spider is not yet opened a ``KeyError`` exception is raised. .. method:: get_stats(spider=None) Get all stats from the given spider (if spider is given) or all global stats otherwise, as a dict. If spider is not opened ``KeyError`` is raised. .. method:: set_value(key, value, spider=None) Set the given value for the given stats key on the global stats (if spider is not given) or the spider-specific stats (if spider is given), which must be opened or a ``KeyError`` will be raised. .. method:: set_stats(stats, spider=None) Set the given stats (as a dict) for the given spider. If the spider is not opened a ``KeyError`` will be raised. .. method:: inc_value(key, count=1, start=0, spider=None) Increment the value of the given stats key, by the given count, assuming the start value given (when it's not set). If spider is not given the global stats table is used, otherwise the spider-specific stats table is used, which must be opened or a ``KeyError`` will be raised. .. method:: max_value(key, value, spider=None) Set the given value for the given key only if current value for the same key is lower than value. If there is no current value for the given key, the value is always set. If spider is not given, the global stats table is used, otherwise the spider-specific stats table is used, which must be opened or a KeyError will be raised. .. method:: min_value(key, value, spider=None) Set the given value for the given key only if current value for the same key is greater than value. If there is no current value for the given key, the value is always set. If spider is not given, the global stats table is used, otherwise the spider-specific stats table is used, which must be opened or a KeyError will be raised. .. method:: clear_stats(spider=None) Clear all global stats (if spider is not given) or all spider-specific stats if spider is given, in which case it must be opened or a ``KeyError`` will be raised. .. method:: iter_spider_stats() Return a iterator over ``(spider, spider_stats)`` for each open spider currently tracked by the stats collector, where ``spider_stats`` is the dict containing all spider-specific stats. Global stats are not included in the iterator. If you want to get those, use :meth:`get_stats` method. .. method:: open_spider(spider) Open the given spider for stats collection. This method must be called prior to working with any stats specific to that spider, but this task is handled automatically by the Scrapy engine. .. method:: close_spider(spider) Close the given spider. After this is called, no more specific stats for this spider can be accessed. This method is called automatically on the :signal:`spider_closed` signal. .. method:: engine_stopped() Called after the engine is stopped, to dump or persist global stats. Available Stats Collectors ========================== Besides the basic :class:`StatsCollector` there are other Stats Collectors available in Scrapy which extend the basic Stats Collector. You can select which Stats Collector to use through the :setting:`STATS_CLASS` setting. The default Stats Collector used is the :class:`MemoryStatsCollector`. When stats are disabled (through the :setting:`STATS_ENABLED` setting) the :setting:`STATS_CLASS` setting is ignored and the :class:`DummyStatsCollector` is used. MemoryStatsCollector -------------------- .. class:: MemoryStatsCollector A simple stats collector that keeps the stats of the last scraping run (for each spider) in memory, after they're closed. The stats can be accessed through the :attr:`spider_stats` attribute, which is a dict keyed by spider domain name. This is the default Stats Collector used in Scrapy. .. attribute:: spider_stats A dict of dicts (keyed by spider name) containing the stats of the last scraping run for each spider. DummyStatsCollector ------------------- .. class:: DummyStatsCollector A Stats collector which does nothing but is very efficient. This is the Stats Collector used when stats are disabled (through the :setting:`STATS_ENABLED` setting). Stats signals ============= The Stats Collector provides some signals for extending the stats collection functionality: .. currentmodule:: scrapy.signals .. signal:: stats_spider_opened .. function:: stats_spider_opened(spider) Sent right after the stats spider is opened. You can use this signal to add startup stats for the spider (example: start time). :param spider: the stats spider just opened :type spider: str .. signal:: stats_spider_closing .. function:: stats_spider_closing(spider, reason) Sent just before the stats spider is closed. You can use this signal to add some closing stats (example: finish time). :param spider: the stats spider about to be closed :type spider: str :param reason: the reason why the spider is being closed. See :signal:`spider_closed` signal for more info. :type reason: str .. signal:: stats_spider_closed .. function:: stats_spider_closed(spider, reason, spider_stats) Sent right after the stats spider is closed. You can use this signal to collect resources, but not to add any more stats as the stats spider has already been closed (use :signal:`stats_spider_closing` for that instead). :param spider: the stats spider just closed :type spider: str :param reason: the reason why the spider was closed. See :signal:`spider_closed` signal for more info. :type reason: str :param spider_stats: the stats of the spider just closed. :type reason: dict Scrapy-0.14.4/docs/topics/item-pipeline.rst0000600000016101777760000001154111754531743020671 0ustar buildbotnogroup.. _topics-item-pipeline: ============= Item Pipeline ============= After an item has been scraped by a spider, it is sent to the Item Pipeline which process it through several components that are executed sequentially. Each item pipeline component (sometimes referred as just "Item Pipeline") is a Python class that implements a simple method. They receive an Item and perform an action over it, also deciding if the Item should continue through the pipeline or be dropped and no longer processed. Typical use for item pipelines are: * cleansing HTML data * validating scraped data (checking that the items contain certain fields) * checking for duplicates (and dropping them) * storing the scraped item in a database Writing your own item pipeline ============================== Writing your own item pipeline is easy. Each item pipeline component is a single Python class that must implement the following method: .. method:: process_item(item, spider) This method is called for every item pipeline component and must either return a :class:`~scrapy.item.Item` (or any descendant class) object or raise a :exc:`~scrapy.exceptions.DropItem` exception. Dropped items are no longer processed by further pipeline components. :param item: the item scraped :type item: :class:`~scrapy.item.Item` object :param spider: the spider which scraped the item :type spider: :class:`~scrapy.spider.BaseSpider` object Additionally, they may also implement the following methods: .. method:: open_spider(spider) This method is called when the spider is opened. :param spider: the spider which was opened :type spider: :class:`~scrapy.spider.BaseSpider` object .. method:: close_spider(spider) This method is called when the spider is closed. :param spider: the spider which was closed :type spider: :class:`~scrapy.spider.BaseSpider` object Item pipeline example ===================== Price validation and dropping items with no prices -------------------------------------------------- Let's take a look at the following hypothetic pipeline that adjusts the ``price`` attribute for those items that do not include VAT (``price_excludes_vat`` attribute), and drops those items which don't contain a price:: from scrapy.exceptions import DropItem class PricePipeline(object): vat_factor = 1.15 def process_item(self, item, spider): if item['price']: if item['price_excludes_vat']: item['price'] = item['price'] * self.vat_factor return item else: raise DropItem("Missing price in %s" % item) Write items to a JSON file -------------------------- The following pipeline stores all scraped items (from all spiders) into a a single ``items.jl`` file, containing one item per line serialized in JSON format:: import json class JsonWriterPipeline(object): def __init__(self): self.file = open('items.jl', 'wb') def process_item(self, item, spider): line = json.dumps(dict(item)) + "\n" self.file.write(line) return item .. note:: The purpose of JsonWriterPipeline is just to introduce how to write item pipelines. If you really want to store all scraped items into a JSON file you should use the :ref:`Feed exports `. Activating an Item Pipeline component ===================================== To activate an Item Pipeline component you must add its class to the :setting:`ITEM_PIPELINES` list, like in the following example:: ITEM_PIPELINES = [ 'myproject.pipeline.PricePipeline', 'myproject.pipeline.JsonWriterPipeline', ] Item pipeline example with resources per spider =============================================== Sometimes you need to keep resources about the items processed grouped per spider, and delete those resource when a spider finishes. An example is a filter that looks for duplicate items, and drops those items that were already processed. Let say that our items have an unique id, but our spider returns multiples items with the same id:: from scrapy.xlib.pydispatch import dispatcher from scrapy import signals from scrapy.exceptions import DropItem class DuplicatesPipeline(object): def __init__(self): self.duplicates = {} dispatcher.connect(self.spider_opened, signals.spider_opened) dispatcher.connect(self.spider_closed, signals.spider_closed) def spider_opened(self, spider): self.duplicates[spider] = set() def spider_closed(self, spider): del self.duplicates[spider] def process_item(self, item, spider): if item['id'] in self.duplicates[spider]: raise DropItem("Duplicate item found: %s" % item) else: self.duplicates[spider].add(item['id']) return item Scrapy-0.14.4/docs/topics/signals.rst0000600000016101777760000001746111754531743017577 0ustar buildbotnogroup.. _topics-signals: ======= Signals ======= Scrapy uses signals extensively to notify when certain events occur. You can catch some of those signals in your Scrapy project (using an :ref:`extension `, for example) to perform additional tasks or extend Scrapy to add functionality not provided out of the box. Even though signals provide several arguments, the handlers that catch them don't need to accept all of them - the signal dispatching mechanism will only deliver the arguments that the handler receives. Finally, for more detailed information about signals internals see the documentation of `pydispatcher`_ (the which the signal dispatching mechanism is based on). .. _pydispatcher: http://pydispatcher.sourceforge.net/ Deferred signal handlers ======================== Some signals support returning `Twisted deferreds`_ from their handlers, see the :ref:`topics-signals-ref` below to know which ones. .. _Twisted deferreds: http://twistedmatrix.com/documents/current/core/howto/defer.html .. _topics-signals-ref: Built-in signals reference ========================== .. module:: scrapy.signals :synopsis: Signals definitions Here's the list of Scrapy built-in signals and their meaning. engine_started -------------- .. signal:: engine_started .. function:: engine_started() Sent when the Scrapy engine has started crawling. This signal supports returning deferreds from their handlers. .. note:: This signal may be fired *after* the :signal:`spider_opened` signal, depending on how the spider was started. So **don't** rely on this signal getting fired before :signal:`spider_opened`. engine_stopped -------------- .. signal:: engine_stopped .. function:: engine_stopped() Sent when the Scrapy engine is stopped (for example, when a crawling process has finished). This signal supports returning deferreds from their handlers. item_scraped ------------ .. signal:: item_scraped .. function:: item_scraped(item, response, spider) Sent when an item has been scraped, after it has passed all the :ref:`topics-item-pipeline` stages (without being dropped). This signal supports returning deferreds from their handlers. :param item: the item scraped :type item: :class:`~scrapy.item.Item` object :param response: the response from where the item was scraped :type response: :class:`~scrapy.http.Response` object :param spider: the spider which scraped the item :type spider: :class:`~scrapy.spider.BaseSpider` object item_dropped ------------ .. signal:: item_dropped .. function:: item_dropped(item, spider, exception) Sent after an item has been dropped from the :ref:`topics-item-pipeline` when some stage raised a :exc:`~scrapy.exceptions.DropItem` exception. This signal supports returning deferreds from their handlers. :param item: the item dropped from the :ref:`topics-item-pipeline` :type item: :class:`~scrapy.item.Item` object :param spider: the spider which scraped the item :type spider: :class:`~scrapy.spider.BaseSpider` object :param exception: the exception (which must be a :exc:`~scrapy.exceptions.DropItem` subclass) which caused the item to be dropped :type exception: :exc:`~scrapy.exceptions.DropItem` exception spider_closed ------------- .. signal:: spider_closed .. function:: spider_closed(spider, reason) Sent after a spider has been closed. This can be used to release per-spider resources reserved on :signal:`spider_opened`. This signal supports returning deferreds from their handlers. :param spider: the spider which has been closed :type spider: :class:`~scrapy.spider.BaseSpider` object :param reason: a string which describes the reason why the spider was closed. If it was closed because the spider has completed scraping, the reason is ``'finished'``. Otherwise, if the spider was manually closed by calling the ``close_spider`` engine method, then the reason is the one passed in the ``reason`` argument of that method (which defaults to ``'cancelled'``). If the engine was shutdown (for example, by hitting Ctrl-C to stop it) the reason will be ``'shutdown'``. :type reason: str spider_opened ------------- .. signal:: spider_opened .. function:: spider_opened(spider) Sent after a spider has been opened for crawling. This is typically used to reserve per-spider resources, but can be used for any task that needs to be performed when a spider is opened. This signal supports returning deferreds from their handlers. :param spider: the spider which has been opened :type spider: :class:`~scrapy.spider.BaseSpider` object spider_idle ----------- .. signal:: spider_idle .. function:: spider_idle(spider) Sent when a spider has gone idle, which means the spider has no further: * requests waiting to be downloaded * requests scheduled * items being processed in the item pipeline If the idle state persists after all handlers of this signal have finished, the engine starts closing the spider. After the spider has finished closing, the :signal:`spider_closed` signal is sent. You can, for example, schedule some requests in your :signal:`spider_idle` handler to prevent the spider from being closed. This signal does not support returning deferreds from their handlers. :param spider: the spider which has gone idle :type spider: :class:`~scrapy.spider.BaseSpider` object spider_error ------------ .. signal:: spider_error .. function:: spider_error(failure, response, spider) Sent when a spider callback generates an error (ie. raises an exception). :param failure: the exception raised as a Twisted `Failure`_ object :type failure: `Failure`_ object :param response: the response being processed when the exception was raised :type response: :class:`~scrapy.http.Response` object :param spider: the spider which raised the exception :type spider: :class:`~scrapy.spider.BaseSpider` object request_received ---------------- .. signal:: request_received .. function:: request_received(request, spider) Sent when the engine receives a :class:`~scrapy.http.Request` from a spider. This signal does not support returning deferreds from their handlers. :param request: the request received :type request: :class:`~scrapy.http.Request` object :param spider: the spider which generated the request :type spider: :class:`~scrapy.spider.BaseSpider` object response_received ----------------- .. signal:: response_received .. function:: response_received(response, request, spider) Sent when the engine receives a new :class:`~scrapy.http.Response` from the downloader. This signal does not support returning deferreds from their handlers. :param response: the response received :type response: :class:`~scrapy.http.Response` object :param request: the request that generated the response :type request: :class:`~scrapy.http.Request` object :param spider: the spider for which the response is intended :type spider: :class:`~scrapy.spider.BaseSpider` object response_downloaded ------------------- .. signal:: response_downloaded .. function:: response_downloaded(response, request, spider) Sent by the downloader right after a ``HTTPResponse`` is downloaded. This signal does not support returning deferreds from their handlers. :param response: the response downloaded :type response: :class:`~scrapy.http.Response` object :param request: the request that generated the response :type request: :class:`~scrapy.http.Request` object :param spider: the spider for which the response is intended :type spider: :class:`~scrapy.spider.BaseSpider` object .. _Failure: http://twistedmatrix.com/documents/current/api/twisted.python.failure.Failure.html Scrapy-0.14.4/docs/topics/downloader-middleware.rst0000600000016101777760000005025711754531743022410 0ustar buildbotnogroup.. _topics-downloader-middleware: ===================== Downloader Middleware ===================== The downloader middleware is a framework of hooks into Scrapy's request/response processing. It's a light, low-level system for globally altering Scrapy's requests and responses. .. _topics-downloader-middleware-setting: Activating a downloader middleware ================================== To activate a downloader middleware component, add it to the :setting:`DOWNLOADER_MIDDLEWARES` setting, which is a dict whose keys are the middleware class paths and their values are the middleware orders. Here's an example:: DOWNLOADER_MIDDLEWARES = { 'myproject.middlewares.CustomDownloaderMiddleware': 543, } The :setting:`DOWNLOADER_MIDDLEWARES` setting is merged with the :setting:`DOWNLOADER_MIDDLEWARES_BASE` setting defined in Scrapy (and not meant to be overridden) and then sorted by order to get the final sorted list of enabled middlewares: the first middleware is the one closer to the engine and the last is the one closer to the downloader. To decide which order to assign to your middleware see the :setting:`DOWNLOADER_MIDDLEWARES_BASE` setting and pick a value according to where you want to insert the middleware. The order does matter because each middleware performs a different action and your middleware could depend on some previous (or subsequent) middleware being applied. If you want to disable a built-in middleware (the ones defined in :setting:`DOWNLOADER_MIDDLEWARES_BASE` and enabled by default) you must define it in your project's :setting:`DOWNLOADER_MIDDLEWARES` setting and assign `None` as its value. For example, if you want to disable the off-site middleware:: DOWNLOADER_MIDDLEWARES = { 'myproject.middlewares.CustomDownloaderMiddleware': 543, 'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None, } Finally, keep in mind that some middlewares may need to be enabled through a particular setting. See each middleware documentation for more info. Writing your own downloader middleware ====================================== Writing your own downloader middleware is easy. Each middleware component is a single Python class that defines one or more of the following methods: .. module:: scrapy.contrib.downloadermiddleware .. class:: DownloaderMiddleware .. method:: process_request(request, spider) This method is called for each request that goes through the download middleware. :meth:`process_request` should return either ``None``, a :class:`~scrapy.http.Response` object, or a :class:`~scrapy.http.Request` object. If it returns ``None``, Scrapy will continue processing this request, executing all other middlewares until, finally, the appropriate downloader handler is called the request performed (and its response downloaded). If it returns a :class:`~scrapy.http.Response` object, Scrapy won't bother calling ANY other request or exception middleware, or the appropriate download function; it'll return that Response. Response middleware is always called on every Response. If it returns a :class:`~scrapy.http.Request` object, the returned request will be rescheduled (in the Scheduler) to be downloaded in the future. The callback of the original request will always be called. If the new request has a callback it will be called with the response downloaded, and the output of that callback will then be passed to the original callback. If the new request doesn't have a callback, the response downloaded will be just passed to the original request callback. If it returns an :exc:`~scrapy.exceptions.IgnoreRequest` exception, the entire request will be dropped completely and its callback never called. :param request: the request being processed :type request: :class:`~scrapy.http.Request` object :param spider: the spider for which this request is intended :type spider: :class:`~scrapy.spider.BaseSpider` object .. method:: process_response(request, response, spider) :meth:`process_response` should return a :class:`~scrapy.http.Response` object or raise a :exc:`~scrapy.exceptions.IgnoreRequest` exception. If it returns a :class:`~scrapy.http.Response` (it could be the same given response, or a brand-new one), that response will continue to be processed with the :meth:`process_response` of the next middleware in the pipeline. If it returns an :exc:`~scrapy.exceptions.IgnoreRequest` exception, the response will be dropped completely and its callback never called. :param request: the request that originated the response :type request: is a :class:`~scrapy.http.Request` object :param reponse: the response being processed :type response: :class:`~scrapy.http.Response` object :param spider: the spider for which this response is intended :type spider: :class:`~scrapy.spider.BaseSpider` object .. method:: process_exception(request, exception, spider) Scrapy calls :meth:`process_exception` when a download handler or a :meth:`process_request` (from a downloader middleware) raises an exception. :meth:`process_exception` should return either ``None``, :class:`~scrapy.http.Response` or :class:`~scrapy.http.Request` object. If it returns ``None``, Scrapy will continue processing this exception, executing any other exception middleware, until no middleware is left and the default exception handling kicks in. If it returns a :class:`~scrapy.http.Response` object, the response middleware kicks in, and won't bother calling any other exception middleware. If it returns a :class:`~scrapy.http.Request` object, the returned request is used to instruct an immediate redirection. The original request won't finish until the redirected request is completed. This stops the :meth:`process_exception` middleware the same as returning Response would do. :param request: the request that generated the exception :type request: is a :class:`~scrapy.http.Request` object :param exception: the raised exception :type exception: an ``Exception`` object :param spider: the spider for which this request is intended :type spider: :class:`~scrapy.spider.BaseSpider` object .. _topics-downloader-middleware-ref: Built-in downloader middleware reference ======================================== This page describes all downloader middleware components that come with Scrapy. For information on how to use them and how to write your own downloader middleware, see the :ref:`downloader middleware usage guide `. For a list of the components enabled by default (and their orders) see the :setting:`DOWNLOADER_MIDDLEWARES_BASE` setting. .. _cookies-mw: CookiesMiddleware ----------------- .. module:: scrapy.contrib.downloadermiddleware.cookies :synopsis: Cookies Downloader Middleware .. class:: CookiesMiddleware This middleware enables working with sites that require cookies, such as those that use sessions. It keeps track of cookies sent by web servers, and send them back on subsequent requests (from that spider), just like web browsers do. The following settings can be used to configure the cookie middleware: * :setting:`COOKIES_ENABLED` * :setting:`COOKIES_DEBUG` .. setting:: COOKIES_ENABLED COOKIES_ENABLED ~~~~~~~~~~~~~~~ Default: ``True`` Whether to enable the cookies middleware. If disabled, no cookies will be sent to web servers. .. setting:: COOKIES_DEBUG COOKIES_DEBUG ~~~~~~~~~~~~~ Default: ``False`` If enabled, Scrapy will log all cookies sent in requests (ie. ``Cookie`` header) and all cookies received in responses (ie. ``Set-Cookie`` header). Here's an example of a log with :setting:`COOKIES_DEBUG` enabled:: 2011-04-06 14:35:10-0300 [diningcity] INFO: Spider opened 2011-04-06 14:35:10-0300 [diningcity] DEBUG: Sending cookies to: Cookie: clientlanguage_nl=en_EN 2011-04-06 14:35:14-0300 [diningcity] DEBUG: Received cookies from: <200 http://www.diningcity.com/netherlands/index.html> Set-Cookie: JSESSIONID=B~FA4DC0C496C8762AE4F1A620EAB34F38; Path=/ Set-Cookie: ip_isocode=US Set-Cookie: clientlanguage_nl=en_EN; Expires=Thu, 07-Apr-2011 21:21:34 GMT; Path=/ 2011-04-06 14:49:50-0300 [diningcity] DEBUG: Crawled (200) (referer: None) [...] DefaultHeadersMiddleware ------------------------ .. module:: scrapy.contrib.downloadermiddleware.defaultheaders :synopsis: Default Headers Downloader Middleware .. class:: DefaultHeadersMiddleware This middleware sets all default requests headers specified in the :setting:`DEFAULT_REQUEST_HEADERS` setting. DownloadTimeoutMiddleware ------------------------- .. module:: scrapy.contrib.downloadermiddleware.downloadtimeout :synopsis: Download timeout middleware .. class:: DownloadTimeoutMiddleware This middleware sets the download timeout for requests specified in the :setting:`DOWNLOAD_TIMEOUT` setting. HttpAuthMiddleware ------------------ .. module:: scrapy.contrib.downloadermiddleware.httpauth :synopsis: HTTP Auth downloader middleware .. class:: HttpAuthMiddleware This middleware authenticates all requests generated from certain spiders using `Basic access authentication`_ (aka. HTTP auth). To enable HTTP authentication from certain spiders, set the ``http_user`` and ``http_pass`` attributes of those spiders. Example:: class SomeIntranetSiteSpider(CrawlSpider): http_user = 'someuser' http_pass = 'somepass' name = 'intranet.example.com' # .. rest of the spider code omitted ... .. _Basic access authentication: http://en.wikipedia.org/wiki/Basic_access_authentication HttpCacheMiddleware ------------------- .. module:: scrapy.contrib.downloadermiddleware.httpcache :synopsis: HTTP Cache downloader middleware .. class:: HttpCacheMiddleware This middleware provides low-level cache to all HTTP requests and responses. Every request and its corresponding response are cached. When the same request is seen again, the response is returned without transferring anything from the Internet. The HTTP cache is useful for testing spiders faster (without having to wait for downloads every time) and for trying your spider offline, when an Internet connection is not available. Scrapy ships with two storage backends for the HTTP cache middleware: * :ref:`httpcache-fs-backend` * :ref:`httpcache-dbm-backend` You can change the storage backend with the :setting:`HTTPCACHE_STORAGE` setting. Or you can also implement your own backend. .. _httpcache-fs-backend: File system backend (default) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default, the :class:`HttpCacheMiddleware` uses a file system storage with the following structure: Each request/response pair is stored in a different directory containing the following files: * ``request_body`` - the plain request body * ``request_headers`` - the request headers (in raw HTTP format) * ``response_body`` - the plain response body * ``response_headers`` - the request headers (in raw HTTP format) * ``meta`` - some metadata of this cache resource in Python ``repr()`` format (grep-friendly format) * ``pickled_meta`` - the same metadata in ``meta`` but pickled for more efficient deserialization The directory name is made from the request fingerprint (see ``scrapy.utils.request.fingerprint``), and one level of subdirectories is used to avoid creating too many files into the same directory (which is inefficient in many file systems). An example directory could be:: /path/to/cache/dir/example.com/72/72811f648e718090f041317756c03adb0ada46c7 .. _httpcache-dbm-backend: DBM storage backend ~~~~~~~~~~~~~~~~~~~ .. versionadded:: 0.13 A DBM_ storage backend is also available for the HTTP cache middleware. To use it (instead of the default filesystem backend) set :setting:`HTTPCACHE_STORAGE` to ``scrapy.contrib.httpcache.DbmCacheStorage``. By default, it uses the anydbm_ module, but you can change it with the :setting:`HTTPCACHE_DBM_MODULE` setting. HTTPCache middleware settings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`HttpCacheMiddleware` can be configured through the following settings: .. setting:: HTTPCACHE_ENABLED HTTPCACHE_ENABLED ^^^^^^^^^^^^^^^^^ .. versionadded:: 0.11 Default: ``False`` Whether the HTTP cache will be enabled. .. versionchanged:: 0.11 Before 0.11, :setting:`HTTPCACHE_DIR` was used to enable cache. .. setting:: HTTPCACHE_EXPIRATION_SECS HTTPCACHE_EXPIRATION_SECS ^^^^^^^^^^^^^^^^^^^^^^^^^ Default: ``0`` Expiration time for cached requests, in seconds. Cached requests older than this time will be re-downloaded. If zero, cached requests will never expire. .. versionchanged:: 0.11 Before 0.11, zero meant cached requests always expire. .. setting:: HTTPCACHE_DIR HTTPCACHE_DIR ^^^^^^^^^^^^^ Default: ``'httpcache'`` The directory to use for storing the (low-level) HTTP cache. If empty, the HTTP cache will be disabled. If a relative path is given, is taken relative to the project data dir. For more info see: :ref:`topics-project-structure`. .. setting:: HTTPCACHE_IGNORE_HTTP_CODES HTTPCACHE_IGNORE_HTTP_CODES ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. versionadded:: 0.10 Default: ``[]`` Don't cache response with these HTTP codes. .. setting:: HTTPCACHE_IGNORE_MISSING HTTPCACHE_IGNORE_MISSING ^^^^^^^^^^^^^^^^^^^^^^^^ Default: ``False`` If enabled, requests not found in the cache will be ignored instead of downloaded. .. setting:: HTTPCACHE_IGNORE_SCHEMES HTTPCACHE_IGNORE_SCHEMES ^^^^^^^^^^^^^^^^^^^^^^^^ .. versionadded:: 0.10 Default: ``['file']`` Don't cache responses with these URI schemes. .. setting:: HTTPCACHE_STORAGE HTTPCACHE_STORAGE ^^^^^^^^^^^^^^^^^ Default: ``'scrapy.contrib.downloadermiddleware.httpcache.FilesystemCacheStorage'`` The class which implements the cache storage backend. .. setting:: HTTPCACHE_DBM_MODULE HTTPCACHE_DBM_MODULE ^^^^^^^^^^^^^^^^^^^^ .. versionadded:: 0.13 Default: ``'anydbm'`` The database module to use in the :ref:`DBM storage backend `. This setting is specific to the DBM backend. HttpCompressionMiddleware ------------------------- .. module:: scrapy.contrib.downloadermiddleware.httpcompression :synopsis: Http Compression Middleware .. class:: HttpCompressionMiddleware This middleware allows compressed (gzip, deflate) traffic to be sent/received from web sites. ChunkedTransferMiddleware ------------------------- .. module:: scrapy.contrib.downloadermiddleware.chunked :synopsis: Chunked Transfer Middleware .. class:: ChunkedTransferMiddleware This middleware adds support for `chunked transfer encoding`_ HttpProxyMiddleware ------------------- .. module:: scrapy.contrib.downloadermiddleware.httpproxy :synopsis: Http Proxy Middleware .. versionadded:: 0.8 .. class:: HttpProxyMiddleware This middleware sets the HTTP proxy to use for requests, by setting the ``proxy`` meta value to :class:`~scrapy.http.Request` objects. Like the Python standard library modules `urllib`_ and `urllib2`_, it obeys the following enviroment variables: * ``http_proxy`` * ``https_proxy`` * ``no_proxy`` .. _urllib: http://docs.python.org/library/urllib.html .. _urllib2: http://docs.python.org/library/urllib2.html RedirectMiddleware ------------------ .. module:: scrapy.contrib.downloadermiddleware.redirect :synopsis: Redirection Middleware .. class:: RedirectMiddleware This middleware handles redirection of requests based on response status and meta-refresh html tag. .. reqmeta:: redirect_urls The urls which the request goes through (while being redirected) can be found in the ``redirect_urls`` :attr:`Request.meta ` key. The :class:`RedirectMiddleware` can be configured through the following settings (see the settings documentation for more info): * :setting:`REDIRECT_ENABLED` * :setting:`REDIRECT_MAX_TIMES` * :setting:`REDIRECT_MAX_METAREFRESH_DELAY` .. reqmeta:: dont_redirect If :attr:`Request.meta ` contains the ``dont_redirect`` key, the request will be ignored by this middleware. RedirectMiddleware settings ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. setting:: REDIRECT_ENABLED REDIRECT_ENABLED ^^^^^^^^^^^^^^^^ .. versionadded:: 0.13 Default: ``True`` Whether the Redirect middleware will be enabled. .. setting:: REDIRECT_MAX_TIMES REDIRECT_MAX_TIMES ^^^^^^^^^^^^^^^^^^ Default: ``20`` The maximum number of redirections that will be follow for a single request. .. setting:: REDIRECT_MAX_METAREFRESH_DELAY REDIRECT_MAX_METAREFRESH_DELAY ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Default: ``100`` The maximum meta-refresh delay (in seconds) to follow the redirection. RetryMiddleware --------------- .. module:: scrapy.contrib.downloadermiddleware.retry :synopsis: Retry Middleware .. class:: RetryMiddleware A middlware to retry failed requests that are potentially caused by temporary problems such as a connection timeout or HTTP 500 error. Failed pages are collected on the scraping process and rescheduled at the end, once the spider has finished crawling all regular (non failed) pages. Once there are no more failed pages to retry, this middleware sends a signal (retry_complete), so other extensions could connect to that signal. The :class:`RetryMiddleware` can be configured through the following settings (see the settings documentation for more info): * :setting:`RETRY_ENABLED` * :setting:`RETRY_TIMES` * :setting:`RETRY_HTTP_CODES` About HTTP errors to consider: You may want to remove 400 from :setting:`RETRY_HTTP_CODES`, if you stick to the HTTP protocol. It's included by default because it's a common code used to indicate server overload, which would be something we want to retry. .. reqmeta:: dont_retry If :attr:`Request.meta ` contains the ``dont_retry`` key, the request will be ignored by this middleware. RetryMiddleware Settings ~~~~~~~~~~~~~~~~~~~~~~~~ .. setting:: RETRY_ENABLED RETRY_ENABLED ^^^^^^^^^^^^^ .. versionadded:: 0.13 Default: ``True`` Whether the Retry middleware will be enabled. .. setting:: RETRY_TIMES RETRY_TIMES ^^^^^^^^^^^ Default: ``2`` Maximum number of times to retry, in addition to the first download. .. setting:: RETRY_HTTP_CODES RETRY_HTTP_CODES ^^^^^^^^^^^^^^^^ Default: ``[500, 503, 504, 400, 408]`` Which HTTP response codes to retry. Other errors (DNS lookup issues, connections lost, etc) are always retried. .. _topics-dlmw-robots: RobotsTxtMiddleware ------------------- .. module:: scrapy.contrib.downloadermiddleware.robotstxt :synopsis: robots.txt middleware .. class:: RobotsTxtMiddleware This middleware filters out requests forbidden by the robots.txt exclusion standard. To make sure Scrapy respects robots.txt make sure the middleware is enabled and the :setting:`ROBOTSTXT_OBEY` setting is enabled. .. warning:: Keep in mind that, if you crawl using multiple concurrent requests per domain, Scrapy could still download some forbidden pages if they were requested before the robots.txt file was downloaded. This is a known limitation of the current robots.txt middleware and will be fixed in the future. DownloaderStats --------------- .. module:: scrapy.contrib.downloadermiddleware.stats :synopsis: Downloader Stats Middleware .. class:: DownloaderStats Middleware that stores stats of all requests, responses and exceptions that pass through it. To use this middleware you must enable the :setting:`DOWNLOADER_STATS` setting. UserAgentMiddleware ------------------- .. module:: scrapy.contrib.downloadermiddleware.useragent :synopsis: User Agent Middleware .. class:: UserAgentMiddleware Middleware that allows spiders to override the default user agent. In order for a spider to override the default user agent, its `user_agent` attribute must be set. .. _DBM: http://en.wikipedia.org/wiki/Dbm .. _anydbm: http://docs.python.org/library/anydbm.html .. _chunked transfer encoding: http://en.wikipedia.org/wiki/Chunked_transfer_encoding Scrapy-0.14.4/docs/topics/_images/0000700000016101777760000000000011754532077016777 5ustar buildbotnogroupScrapy-0.14.4/docs/topics/_images/scrapy_architecture.png0000600000016101777760000026461611754531743023570 0ustar buildbotnogroup‰PNG  IHDRМюђvHu pHYsЦЩmMб] vpAgМю.zЫ{€IDATxкьнw|\Чu(ўsfnй^а{'XСо‹z—eKqw\Rэє—Мќ’ММ$NsъK“8ХЉNм‹,KВDuQЄи+@‚…шmэ{ЫЬљ§qЊV!‰Тљ~( ФюонЛїоГ3gЮ ЂмМO;"zџ”’„B)%‘BИЎ‹ˆъ˜˜‹1д41Юумћ›y7Нъ§T”Ѕ‡ˆˆˆ1іЊпK)…p]WИB8ЖNЇSЉT2™LЅRЉTj||\зД‚Up7‹…УсP(žУяїsЮu]7 cю6qPЈ‚хF0їШЗm'“ЩLMM і&““йlVгŒP(2>>Њiš:(цђљљ|†sцїћЃбXEEuiiiqqI(єЮw‹тLЇ(я€”в;iQ*•ВmлЖэL&“H$&'''''3™L&“žœœBD№dˆHDСP˜ЄЬхВŒ1"Iф§ч‹8‹ЧbEEEбh4‰xOJDоѓ.ЬcJ Ъв7{ф'“Щсс‘ЎЎ‹НН]Ў С`1чІЯтмаѕ€Іљ]7П0дyԘцК!зЕ …”išcc=ŒЩњњІццe••бhцМЩŠВЬ†ТRЪююююююссЁСT*хИ.чœ1Ю Ÿ? e’“ˆ@R " R’tq@Тdˆ CdГ? c\гјd"ašFMMMCCCmmmiiiII‰з†7ъф˜_*hP–Вйўѓl6{ю\чаааЅKч‚СВттЦ`Адя/6ЭАЎ›œ{wžяц.T^%%иЖeлйBa"“яЮfGjjššš›šЃбЈ­P–€йpˆклл;;;;::\W•U&Ц†u]’$I!…ƒм`Оˆ›уў3ќL03ШŒ€ї'сH;'<йyщЄ““v^:yr Ш4)Э 2ЧБ-Ы2tНВВВКККЊЊЊІІІВВ^Ož ”%‹ˆМ#эєщ3gЯv ї—•­()Y”њ|1ЮAJ ) @,”crAђЮг=ІŒ”Я'sЙБ‘‘ŽDЂЛДДlхЪUыж­›яv* Юь%fс\і~ )х‰'клл{{{ГйЌЯчcŒ3ЭpВ @ЦЬїЧx Ъ§Q-XЄKˆщ~І™Ј™LїЁfЂŽШНз$€\"з’Т&з&з"з–Т–vСItF/Šь2kиŽc[ЖiEEEЭЭЭ---+WЎ„9gГyЇ‚e)K$шыыбДp}§ŽHЄЦ0ќ %бЬylAŠ‹„wТ Dd Рu Щф@oялNжддnпОЃДДtОЉЬwpaЛFзТзнь[y.)хбЃG;::._ОœЯч УрŒIс0#рЋj3JЕ@ 5ЙššС8C€@@4ћƒœ-1@D„йH‚ДiЅнЬxaєbaИSф 2€Й|ОЊКк*ЊЋЋяОћюЂЂЂkИчо4(KЭьЉстХ‹ћіН˜ЯЪЫзддlб4L.І/= –wъ№B!œЃƒƒЧsлЖ эЛ‘rэМбхY‘NЇЦЦЦ,Ы2 ЃМММЄЄ${sp^їQ№N;$оњcпшžЖmŸ>}zџў§SSSёЂтб‘!s`f(PЗ)PП™›Aф иœрМŸiІ+gџzЭWš§ыЪ?!2r‰„хЄЦ #жHЇШ&‘#9Ўp…ˆF"{іьйКuыlVц<Y*hP–яpB=zєшбCІkhизK9}ЛъWИкІ;l!™ьщy.ŸŸ\Зnуіэлc*nXТц^}ЧБmлВЌёёёСССЁЁЁСССL&у G|~џш№АЎыDЄыZQQQUUUEEE4++-5Mг0 ]зНЭОѕ!|/OцІ :Žу5f||<‘˜А,+—ЫўX4VW_‡gЏЛГ9†йlімЙsћіэ›˜˜@Ц$c jF v}А~ 3}Џ о(2x‡я# РЬ+і"7=š88эfЦ‘k(\ЗЉЉщЎЛюЊЌЌb чёШRAƒВtx’mл8sцd<оиаpГ”k‹1АЌLoя‹ccVЎ\}гM7yз 7,1оœ@Ю9ŒŽŽ єwwї ŒŽКŽЫ9GЦчH’„‹Œ2щы€ w]7‰˜І9аw9”””TVV64д——WTTThšBˆ7™s(Ѕ„9Б‚”2™LNMMMŒ ŽŽŽцrЙт’в@ иллнид21>ЪлАa§ъеkJJJ€ї,gЯž}њщЇ†‡G}> S@Э4т5ЦmОВVц3ШЗ5pSЉtч3…Ёvie™p]‡#оuїн[Жlёf†ЯЫ‘Ѕ‚e‰№>ЩŽc>|ьЬ™“EEЭЗ0І{Й ѓнК1†B8}}/Žžom]ЖgЯюйoЪв0;ЗvbbтL{ћЁƒЁ№Фи˜+„Ўы$ ‡ЄKB0#Р§Qю ю#з–…ŒАввЪJЗ`bм иЙг зucХХ%уc#mmmЫ–-ЋЏЏ“yМГЫ|>?66–Эf/\М8апљђхXМHJ™Ых У$asнЧИ–OŽјcТБМ­‘ѕѕ ѕІiNNNž:u:—ЯqЦ„GЎ%MўЊеўšuЬдЩѕКцщьA ‘CОЏ#siП=о…šСu3№756оwп}ѓudЉ AY"Мo?ћїяoooЏЊZ[UЕ•1MJѕ5їК""Ц@g|МЃЋkџЪ•+nОљЦа„1х›А,ышбЃ'Nœшяязu=+.Є'+ˆZ Ў…JxАX ѓ@L Фy ЦtПt-QHЩBZв"Ÿ…”›MИщQ‘›"Эєћ#ХЙдd>Ÿз4­ЎЎЎББqУ† ХХХЏЄ”ЇNж4mttд0LЮа+ur)+DюiЁц2ЦD!c'.Ѓt‚eѕщБ~А,ЫrœX4B„м0\л2‹§5k}+Ип”IРy/@@„:ЙlЖчpцв2žžšмОcЧ{оѓžy™ŠЉ‚eщ8vьјћJK—76оЪЙЎ"†y1гп`ѕѕ<НqуІ;wЊAŠХnv?~ќШ‘#}}}RJŸЯ'\[јJ›ЬВeZЈ„›af†И/ФL$€фt*2ˆ@ввJK+'ЌД›З'ћЌёЫ”ŸфІ …B4))-нО}ћЊUЋц~xЄ”/ОјтЙsчњћћ]з5Mг4}‚@2D’G*Œx*бТЅмaF5j:йy7—АЧ{rCgэБ‹вzВSуzДЬЩІЄFМЖxч‡Qѓ“Т›П0пoљЬ;/%r† ЌбюЉSпsS#\ї љнЛwп{яНзП=*hP–ˆžžоЧ{$Ћ_Жь1Ь//nвНpс‰dВїц›o]ЙrЅ*ЙHЭ^ГЛККžўљўўўBЁ` HсњЪ[У­З№p 3ќŒ€є…йL"œ8@г[Cv%† mKZYkьRЖїА›FЦ‚Бвєф˜ЎыЗнvлŽ;fsј№ся=ђH0“RZЙгќUЋЕ`‘QвФ§ІP7˜6QшЕ€Й rI{ЂлšИ,2ЂфЁR™Ÿв"хЁІ]FIЅД]D>oуoКuf'†'|UdЦ›Žcпzы­ЗнvлuŽШUа ,ЉTњ›пќ:‘Б|љ}@‘*b˜w^RjЎН§›К.яОћžђђrепАшxЛЬqœЃG:thl|œ3Ц€P‹”‡Wмц+kAЮf…йi‡oОЃi6Œd^?I зБ'ћГ_,Œw#IBFRnнКuзЎ]КЎ_О|љ›пќІуК€\ѓWЌ№UЎ6Šj™юЦНbJгЏh‰В C@ ]1'І!@ŽœƒœПє…ЗК3$jЬžшOўŠЬ'ыТqюМѓЮн{іРuЌЅ‚eqѓ>РO<ёDwwOKЫЅЅ-Ў+qў#Ц :uъЭЭ-wоyЇ—oЏт†ХТыJЇгпџўї=R^]oeгљ\F—†šЖъЗ"Gš…јnћѓiКO2Є;Ÿ%'LЗэBYYЙЎiЃccгOƒлјў@§š+S^Q,с-<ЫЋН(Ё†…‘юЉЃ_“vљ§ў‡zЈЅЅЎз‘Хчw~gОпEy‡МЏAчЯŸ?qтxIIkMЭf!HE ˜f{zN†УБВВ2PAУ"сE ƒƒƒпќц7Я;†sЉсPгЖињ‡Ьђ œЙжтеؘГГДV •й“}фZм№k† мА­‚юHз ЗоjоJЮЋ&FН•f,ђЯ"шс8”†;u]їљќЎыдее†q}zђДљ~хђњ …ТЉSЇ8їежюPy !byyлФDзщг'ыыыBЁЄXрМФ;{іьЃ>šL&§ Аrfq}dе]ОŠщ9зЖ3ŸђW/чОHцт‹vЂ/k &$@ Њ]lйIB.єб„kїцђW­p’Л3чžš(фЧоН{ЗWyт:PAƒВXyн.^М4>>RWЗл4ЊˆгТƒBaјЋЋ7œ=ћhwwO[лšљn’ђfМˆAJyрРgžyкu…aТЮћЋлЂkяхЈАФєb з"9в(ЊŒЖнoїЂn"ЂtmfјЕH9r}Є \;€фЏ\‘>Зз4}“““™LІИИјњ<Й ”EЩ;ЕхѓљююK~YYй*UФiaђІиEЃu+ллOЗД4ћ§~ейА0ЭжT=rфШгO?%„фœIс†[o/П 4“‰Œ_Їж #W23Ј_3e–ˆ€„З*э ќљёв:4S •’•’’FGGыыыЏЯ1ЅF•EЩ;Лѕїєїї–—Џрм КJЬt5{рhоœMЏcїV!h№ksx1šі–у#ЎП^чљзm”вчѓGЃЕЃЃЃƒƒƒ в*oШoxxјтХ‹ЁHœ1ШЃы ЏКИ BТuž4‹ $I[J[’3§їєb 76’Рє€)—ЎЃщњрр тњ<Е ”EЩ[ ЉПП_ж!ТUI€&ІЃЦРБ йtjjjj*™LgryЫ 5|ЋЯANfr*™ЩЛW§ь†фф3“‰dЮ}‹MqѓЉфT*c_хvМЭV сpM(TкбqFЪХ’Њ~Уё‚†БББЮЮЮ\6 ё­ 6l@4?й+ўЈа0ЕH ‡s>44ф8Юѕyn5<Ё,>^7УдддР@oIIГa„МњЖя~Г\C;yљм‘o?џв#Жd(]ЩC-Зьиѓ›жХ9‰дЅС9$Žџк/|ўЬЦ{ўф?Е]І%pvЮs$$чдСџўЇŸ§NпGўјЗeyЉfлoмЯOt..ўЭяўэWЈхў№—яг­љіЦШƒСX8\кзw6ŸЯƒСып хђЊoЅR)лЖ}ІЉGЪЭтЦщ•ŸUча‚B’щL —чZ6›uЧ0 xхТйЏ§снSAƒВXeГЙ\.___ЯЙ&хЛNh œaЊ}яќяožЌYГgы­ї—D"шЄІ&/wŸ}ц›џјЬХ‡ўђ3янjˆ+еэЄrf’:cœЭЦh†ЉiœH с1@dŒёWМ$Ѕ”3[dœБW‡Dbњ.$‘€išЯа5я,@вu љмЇ щИ9дtУGЧWntNЫ_ѕМDвu%rЮ‘„„LЛ*1„BUщєа№№PssЫUй ryзu‰„Ўы@R —ЭwЃ”7DмцсŠX(@nЮчѓНIХU)хьв!яrХ 4(‹ї‰ŸššвД@ PьЅGНЛHšЄD_а9їєЗ^<кSyѓя~њcYU Ў„ 19§еќ‡?>№•пhl{фСFŸ-IJdО fšРH@!{vа—ˆЄdСˆщѓr9щТЬилBˆљ `$СБ WlіЕ”ЈѓАŸщ–гG?A=lB>ykњ = ™ЯRžНЪНЏ~­Ь j>ЏхЎЙМ”Раыѕ ъEdв@Кїƒ›‡Œuv™” •ћ|сссa4,XЙ\njjŠsHjЁR`oyLNЙЎizАШЅМЮиГЯ>‰D4MГm; њ|>ПпяїћНМЧgyk‹ПГшA ЪЂ$„t]т\П*ЋзCЭI&GЦГхїяиSrу )YtХ‡оџ‘аІ,U„PxEj5fкcэ/=нqёRк=\ПbУнk›JЙ+ $Іћ1?єђ“_эIѓGЋ6mоqk, H:3s'<зyЙ;m‘,ЋYqЧжЕЭ>W"$ЭЧrЧ_:ђ|ЯDJBАj§ћok щшѕp ью§џ№HG`гЮяX)9jTИМџ t„ЗюzџжеБќЋв7‰@c>{ЌcпГ]І RїU.ЛeлњеaŽвŸџ‡ЧЛšюНѓ}№{ЯЛрkўфндŒяv:+‘пЫч3УУюЛм–rэL 'aы‘2|{ РЪuCHR“Kч’LфŸ~њщX,цѓљBЁaІiњ|>г[дЫч3M3 •”””—————_йаLФ[|b4(‹ЬL1|;“IƒEšИ*'5$ nzаЇM\ьъ+ЌЎŽј5лЫG&(фЈ~Ы§ЫM rЖБФсo}уož=vк2Ът~HOэ{ёБ]яћхїоМ)@$бˆФ‘‡П№ШГчэXTŸъ?ŸbпыИдѓБ§бpm]УСЇў§ўёш….nЊŽГ‰Ыћ^ќжЫ;~сЧо‰Ў+ЙЅЮ~ћk_џќС ‰@еђђ€8sцPїšƒЙ;qўп}В(м№РžЖх 9ичПўн'+ŠšюлЕ.–››uH’tЮ'|ѕЋџвщv+аP]ьKєОєТ7іoўЬ|№C5Ійљбѓ_џоў-QыќайoŸЯG›ƒя%М ЙgDЄišЯNЅ&д”ЫШл)Й\.‘Hјќ~!‹a:ХXэЌ ™›ŸзтсВ0›dFРeF$dЖm …ЉЉI)Щ›B””#2]зЃбhUUUcccCCƒišoы9Uа ,JBШB!Џi!ЮЕЋ4 ‚(PhEs[Kэу'§ЭПљРэ7?АЊОкЧ’„LЪЦYFž{ъ_}ц\л'ўіЃ7m`јжќЪwўћяЋ*џшŽЖ2"ЭуN>Њяўб_§ј­Хš.rg§лŸzќдЃћO>иМ-fцЯыkџТЉсЭŸјчьXa 2ЗяЙ/§ЦЗїўіџT­ј™››‰1#~п‹џёТч–ЯќлћЗЏє1з:јшЗџќХѓй`\ѓъRhўЂx4югйь44дќХёhмgАЙЅ+ˆ@ RџїПћOь?ГђC§‰[75ЦхиЁo§оWіўў—ЪZсО>ЮŒЂ–jжyьRрюўїOЏЊfdВЋ7™Kг|љМ˜MкR/ŒK&“Bа‚qІ™jlbBF.БЪpы­љўZАФх†ф†4„Уg `NЁo)\аќ…l*•œяэН|ќјqУ0ЪЫЫ›››WЌXЧпЪ3Ћ AY”Є”BHŸЯDdWeъ2&ѓYѓб~0э<ќѕоcџљ/‡ўxeг–•+wЕ­кPURъd ђAЖыxЧёgь5Пњо={jŠ oAlљЗмћгЃпо—Шe,QЦ9Кљ|Щšw=єОrŒШЦ;—?іЭDЂПOоOјю‰KчBлўт“wo,тр Нѕ}§бsƒПпёфw†Зџrm’Н'yлoнОi}iђ–^yлmЃ{:џфhг!‚Bˆщ‡й%§цќrЮ[м‡ЉіgOžйы~џSїпTлвыю~џ_ш|~яЗћoоXцˆP˜LЎОћŽm[[у.иWЉд&€І4MЗ,K 4H)jы кZІы8Љn†…Šё`гЖ`УжЙG?‘k‰BJвТЪШBZвВvѓS‚iЬ ‡Ш)фE.ŸOІв—.u;wЎЊЊъІ›nђћ§0ГђШы>Ї ”E }О€ІљЏо€+"зсел~њWVођШуЧЮ™Je†?ю‘‡ПФkж>tя?БcyЕŸCпиРР`Оњ}Э‘и!‰e3иАх3ПБу3Т@ Œ•”oЊA&эу.Ђ?9,cаг3˜ЪШђe•…ФФ ’ФFmIЈЪш?б“–Э1ж79>2цVюЊљСБ„””+hЅU •ўгљ6Лї‰ N]§cNХM•N:1”‚@J Jт‘Іа…“Н)g]˜3Вl­ІОД!Т)Ÿ“РљUМhpn!ЕИю‚•Ыхћz{~C/iЦеŽZаМНУЎŒ " 03€БŠщЛс•ћ;ЩQkМлžшq3увЪJ+-н‚)ОxёbggчwмбккЊiš”ђu3%Uа ,JˆhY9зКЊЎD$] б_ЗёŽЯlПЌЉЩОЫg{КOіt:vт;џўw—2?џ…Ќd щ”( ˜œHєОЃI— ЂІ!г˜в$Ащ#ˆ'g*Wpб?№§_ћЃ'qzЊ!c"5>˜‘Бž„­ељrV&cћK>HD’ ћzаGЉЗ§нŸ8иЉlЦТHъ…?јѓ}@N?/—йЩбЉœьЫB dD 3ЄкW3ѕ€аВ’ЉдЄъfXАЄ”B)@J•ЙаœПчўšцќvfйp-RІЧЫАu›Ш[vЂзNє9Љ75ъф'}ОРФDт+_љЪ† Жoп^YY ЏЌњрQAƒВ(1Ц|>ЁрHщrЎ_…'5“3 ЎKV^r„ZМІugSлNЬџPыџќп/э;љТ3/нПщNЄз9LЩ›лј&YšѓL$Xљъ{7”A^ЙћытК  IЏнмыžУще?ОA+Q *nН}SMХѓ2VYс—Вx-VGз-pЮ уэх^)зM0(++sЧIєцЙЈDШХ _ѕ "ЧЫ‡2}•­ўкViƒ5r>зw"пŠqsѓрСƒнннлЗoпИqЃЯч{еUа ,2^иЋыz(™œœpнМІщWсЫ‚t2Ѓ]э ГЎЁВ2ИD"з–vо_љЖ]mп:м.ЇФ!36ѓS™‚ыђщxkL7€loќ hQПO‡||гфІŠ+oЦ€$фSрH0@@/d,KH/‘‰‘“Щ:™<Ю,k1“ќx‘–у§єЊ'`„§AЅƒk>ђў{ы‚‚ц?˜ Љ)‘HИ‰„H'3gŽutіdr‚A,/ЫСЎ‘|ИЦ4Ÿ‰Ѓ#}CУ]gDР4Ca“kI`HЄ†КGЙЦшUЯ рЌЊ(/-х^к70rхy3Љќй“&3юмщкWїRAˆX(d8ч%%ЅзцCЁ\eeeёx,—Э0ЎЇЯ>-g^VœPЎНщ J["зƒ Šw|2дМK8g+ЉиЗяЅЇžz ч&!ЉƒВјxŸрЂЂЫšЪх&о§Ј;ИDЊ—е4­чўупџ§O_<7ъHщЭбnКћЅјчя>ŸЦŠMЗюіх!дИОeѕ.Ѓѓ‹пyёЅё<1 УЧїэ§ыgЛБ4 ш 1ЦМ ‹sž…yЕІн”lИЋЕК1{ќЏОsИKrЮз :ќ…/ў§џѕзŸЭ!К6зЕTЗЖц§з‹g.e0]f{<џТЗћАШЯ$з+#™ЎЁгу˜Cauќчч‡а%h@dœ!ЗБ•Л—Еl 3їэЇ Р9уК‰уgўчK_ј‘џїп&l`ˆ€Œ]эЎ"b 2™Ё|>Y^^ёю7Ј\uо!сpИ­­Mу<+sГу“GО*­Ю™МЇ,AШ@ЙЄ…тбuїoџ„*MOŽЙЎsєшбSЇNyŸ яОjxBYЌЂбh<Ÿšъ‹Ч›cяЊГ“в6ъo}џџЕœџїєЉGџыЯПѓяZ$‰p7JЇ0ќњнјЭзIGŒК=w§ШРи_юфуљњŠІЦ{SvёэљЅ=kъИ BфВйTСvgjуH7“ЩЄ ŽdЖoгƒќбФ§ЫЫџёё.Џ/ eG/ONБјžм{sTсHQДqзŽv_ўтSџђУЧ_^ъ+Œ чšжЎ_БьЙчЯMК$ъ% нКъ™ЇžџЫП<АЂЎ83tz(SДaw§WА—@фsй)(и€РШЭЫюyпЅ>єkŸўѕЇ[ыЫbvЂ{xbЬ nyрОлKќ ]!ьt&Уm!Џnљ%DHЇ‡\7_[[7ŸхЭx§LЫ—/9}њ4ц‡ЮвсЏDз> G‹IЈф†ЅЫ[ЫF šЭ1у‡/‰SЮВэчž{ЎЊЊЊЄЄФРтПѓ;П3пU”ЗЧ;Е†>>>10аSZкЊыцЛЭ…DIв,[ЕхОе Е!(рѓћќ!АДМf§К­|рУПrћšJШ@‹6­kлYвЩБIХЋwміаЏ<АgC˜ШЙЫЩHѓђ+KШ@dNІw0kYЙsyC YТ(kлВ~sмЧЅЕ\2Ђ +Ж~ъ#?ќгk+|B"cѕ›[ыы r гЫWнњ Мї6}В/gЌмМq]XgzАКyХК ›Ъц““iЭэ}єч—›#™’еm[*‚"=<deuЛ6ЎЈf„ Ч—mмАГ""†І † €$иXієь д Ч†\ўJ$УtћНUЉ`z!)†>њt„pЁP WОтIИŽ>h РЕ gщŸй Ив[ъ§~аЙg@>ЄAиVђ6 a?€€tvNѕDг†>Нv–pР*€#СNбM™Яƒe_/–DRзйаа™ЮЮЇюКыю•+W^—ЯˆђЖб ЮЙeY_џњз'''$ѓ…Ђk№W.`ЊПсF€ &};зsD3§йLцОћяпЕk"Њс eQђЎЋЕЕ5Ы–­;_TДœѓЋRˆI82cбLю " 2фЏј~Œˆ’ 9‘ЯMЏwe}jrм‰ФЋ•–Ж;‘ŸѓKD”2Ÿ‘9я‰qњY^ё$ТiKNЏ)Уg˜ЯКYЩИЗк5"™MЩ+ї`lw"Œs†Ž˜В$ч3.‰ 9‘Їщо„+kv#€“wЧГ3П ˆ1ДЌB>?QZZтMЈЕ'/V`ŒyћЅПППЏЏOJ9>>ЮGЮYњьSZАиˆ—“{5жˆS6"ŠЌКлI;Љ‘’ђŠююЎЖЖЖX,І‚eQђzCЁPUUu_пЫ““—JJZЏоЖ9чoщŽ ЕзO&ЦW-EћПDЦјHFFфЏj cњ+ МтL™љ+ц<бk~ЅхoєŒLгЎ^†4pŽщєар`ћюн{?М%ѕ”ыУЋ§—NЇлллЛККFGGGGG‰РяїV†K znмˆЌКsќЅ‚ооЫccc*hPН––цііг}}‹ŠšTЯйТCŒЁm[УУЇB!_mm5cLu3,@===‡LЅв…Bžsюѓ™@$…У4ŸQTЫќ_Хj-RЊКn DzЄв(ЊЩL ц,{hhhйВeъ$Ћ,V^gC0\НzЭ/ ЎЋл!„К -,ˆ01qa|ќвM7нVTTЄ"†bvGLMM=ёФ™LІ`РБm3ŸЁу€\зjз›eЫP7љt ŒЪ‚ЛQ  fбJwВп4ЭЁЁ!ЧqTа ,bоYoэкu]]—†‡OЧb ‘HЅŠ b sЙDOЯK Э+V,Ÿяі(3Љ:ˆhYжщгЇŸyц™t:­щ†nјAИЬ rЬWоъЋYЏ…тГЙР *\ ’‚шю)яŠ$fhZИ\ ›™a4(K"ьо}гУЋЗwпђхябuП”*nXаuюючu7nмфїћпdН]хњ˜э`И|љђsЯ=wюм9гчз4œп-~нWЈ5A!їZHв%`зяSGRJ‰\[*gB†ZАЙЮ'&&rЙœ ”Ѕ ДДdћі]Я?џL_пўЦЦ›гTм0пћњLMѕэкЕЇЖЖЦKЮŸяVнаМˆ!“Щ–dˆte1"hZ~№Ѕя' ЅiѓjS yЋhЄ™ђDˆгSLН_2 I€Œ!I Шцnsюd")aњƒ€оfHІ:Ž?к^rгЪ(q‰,Ы†\GЭ€ЂЂbег ,о!ЛsчЮB!пйљВІљ*+зЩ%ь/>ŒсјјљЫ—жззяоН[… ѓЮ‹ЦЧЧ~јсСССHQy:1DРУkюжoф0ЙDDШоЪTуљхBгHEи 3Џ”Z3с"7МUчAXфњдаОПыЊјѕwЎев–€Lg\ŸЉлц‚ыI& Р9Cp,а‚‰šсНƒ lГЇDЭ?3—™@и ЩBŒ.}яФ“ЇЖпўсzџ+ Л-bфквЩ#уЩd2Ј AY"ˆHзЕ={nвuувЅCŽ“Џ­нгi_Kри]МU08x|`рp]]эmЗнцѓљдŒ‰љхE “““?ќ№хЫ— УH%FЬx}Єэ~=VфHXр ЏF$!0@gјдпўŸбŠЌY=yњсяMхm-Оfљћ~Ѕ5жћвПќЏ3CРЧўцП№Ф–ŸњЭ†š(КжФБЏœzцЙЉtNъёвЕXЧбЮ~їЙЏѕ™xђХУџќУЦВцл?ZЪRў–Ж~тgЪЃAFюјОп{ьŸОœЏИm§CПА~[ЫфГŸ{ќ_џ'У­Laєхгп§FвЗqХнї‡˜ыLœ8џј?ЛhЌќс?Йхcїї<|шПўnЬЦOyљџїСcЙЪ[>НѕСOTћ:ќ§Џ;<Нщ'Z[ЪbЫоџЋkwnс6аbЁ@$nfЅ’ЪЪЪЊЇAYJМИС4;яМг0ŒЮЮгљ|ЂБё6П?€RJ\ф'Ч…‰HzЫ€ ЉžžЦЧ/-_ОrЯž=~П_E ѓ“ЩфЗП§K—.љќ~aŒЂњЂ­хІzеЪХОƒPу(xycг§ŸY^kм|ЯооЏщ›віДЌ ъфЏЈZгшГР™WYY *hP–&D”R666”––ttœ=qтШјxOmэЦPЈ<Љзu&ФєlЊ9ч…ЅqЌ_ гЇ`яэBDЮQ˜šъŸœь:Х9mлЖsеЊU‘HиKЛSУМѓтЖ‹/NNN!"EзОW ХЅ#q Юg™%МbЁищЗЙ/BВЈ|їяо}_E>Œ pЄƒе.!Нb–іыeAI#Њs.ТwюјЩŸ+‚ы (8ŒЙ„ Д$жл{; u–Йp(}ўyn Й\]]mYYЈ AYТcRЪP(Дuы–ІІІ3gN_Ом94t:,ŽDЊ#‘кpИRгІ№%p˜_;Г"щєh*u9•HЇЧ ккVЗЖЎ(//ƒ™D§љnЏrХ№№АeYшцТ-ЛЭв Q‹a^х[17кЧзЉ3ѕk\GА@ЏЋ4??й} IH hђј™c#e;яŽ1№О>Мr]њWm!аŠ‹+ъ/=98ёcхХ” skшФ“уbeЫц$IЈ19сЮї;єŽЁŽЙогщГO €ЎlуЦЁPˆˆTа ,eГ#ы%%Х7нts:ъшш8wЎcppdlЌƒsSзCС`‰a„4ЭЇыІ” "v˜ЎГ0Dt]Ыu ЖЭfЧ8зrЙqлЮ†жжЖrхЪUёx|ЖС*bX8БP(ЄвiтF€‹чЛEW‘”Тr]1Н&†k WLїt…kKЈљЬЈыxіТ>ГrйкЂик5яНѕёЏќУѓўмЊ +1}ітуџ9 vо~г=(]щXRвtд@$]KЬ–e@r„у‚“ч•;l8vєшўЏ|яЪŠyЊусO/ш[40ТQшИјЬї§kZbu-~$Zt§—ЈaОяЬдЉG@ "д4эОћю[Бb…ї•@ Ъ7чz†бhtЧŽ[Жl˜šГэ1!„Ў›ІL&Чгц}~І-,~У№;NC"*++ЋЎЎЏЉЉ­ЎЎ1 }ІС„ЏѓmO™7оv]˜дCЅ$aiь#нW†E˜#1#Vˆ„qfhѕXЈЈвёU7jєБяŸќЮќияХтХeЗќюНС/xі‘Cп<iњш­ї|Ив6JЊLSŸ>є™nЦЋЁ  QŠ SBЦvьјщ?<ўo—џѓѓ„<иађё?YЕuЯAhѓOЌьј“ Яџ§Љє'v4ДЄ”ИHъ4CDV!}щЅlЯaун~ћэ6l˜эDФђmFQц…eй““‰\.чКЎТЖmєJЪЭ гЇЯЩ5kкCšя”5Ю9чša~П?ћ|ОљlђжxЇј—^zщЩН{A8FЄМhлЧyЈф|žЎІCгИЮьm i@. ШsщN/QС4˜.1$@Jn8Wj>2PЮb` i щ Цgr ˆ@ №ЪШq $HwОП|МED„#! #гчžrІ˜fFуХЙLjыж­Зп~ћмМfег м Мpй4ŠŠŠљnЫЋuuu !жЏ_?п yГ +ЯwC”,NСB6ЭЬ Ž/ЅIв‘bfЩi_љ@’c CrЩqНBЅ8ѓ@ЂщЋ9Ть'™Ш-амЩив’єъm^љи“$!`v;8“BDЎ=ѓЫ…џVIфœih'FГн‡В=‡"7ЙlЦ4вїо{яњѕы_5J Ъ ъЪёПР:лˆHJIDЎыNї.˜ГƒX\„ч€LC Ѕ+—F$Рмb‹Џ.М8чSŠˆ№š_'Яр5ьзйц+ў W"…7нЮТфѕ.pЦ4юfВ™‹'sНGœф ю ЛЎуфskжЌйЙsgccуkчNЋ AЙб-Дƒ…охYх*яч|rj’ƒN^ф-ф†Zњѕ†G@€ЂЩžЖЧКD>% )ЭЮхrХХE7нtгкЕkMг|нЩP*hP”……ˆTe$х]ђ>?•••~ŸЯЖђdчмєИQTMB.™ хmCЏж>фћЯхћO8ЩA‘›$)є`м E2щдж­[іьйS\\ o<}Z ŠЂ(KSUU•Іi–…вЮ9щQЃДФ|ЗI™/вЪхћNф.љ$ЙФ€qЗ]бЖІ­mmKK‹7MоЈ›S ŠЂ(KЗx[iiЉa˜й\^к97=† $‘ъhИсDхЮg/юs’ƒвЕаЄiZ0рЏЉЉйМyskkыььє7яцTAƒrc! йтђŠВф•••ŽOL H‘–‹гг ечџЦ‚жи‘@Э@сjК^755mоМЙЄЄф•їќŸ 4(7\$ЇK5OAЙ*ъъъFЧЦВ™Ќ5оS:яЏ]EŽJkИсYм\8#Ќ,ДДДМџ§яїVЌ|Л“ЈUnЖrc)8i[фчЛ?€зC8;ИЈ(я€wXЕj•пчЦЄ•ЩœСЭЄ3ЕдЪ‘\2ЫjЕP)7ўpljjjttL!Ѕ|ЛпOTа мX.ŒъKДЬfQ”ЅЌИИxеЊUЙtŠ›{ђrњм3$\`0яЕв•ыˆ™С№ŠлPїIзъъъzњщЇоYwІ ”KЧаsG^†…WгIQЎ‘нЛwЏ[ПОЯq3˜Л|4wљ2T#7dв–fyƒYжbхВХ%eЩdђШ‘#ˆ(Ѕ|[[RAƒrƒ HцFЦг—GRнЖШГЙшe‰ђ–НуŽ;JKK]ЧfŒ'л/ ] d3 'Јњ€Œ‘ ‘•wњ*W8Ž366zј№с‰‰‰З; Њ‚х†рGI9;й;~ $НН[QяЋdqqё=їмŒkšЦ‡ўgъш7ЌёЫфкЈ!r’*tИ0У жЌsэ<зєl6;88јЖЗ0п/AQЎ"ъ™8ND'г;qrО›Ѓ(з‰їUrХŠ{vя‰DЂ ЇЧї§ЫфБoцЮ‰B–щl&tP‘єRFОЊUrф№фј1‰|Jф&эёюьЅfyЋQм GЪ™/Ь8›M”єЎ/о?UTБ˜ BaфœДsDР ЃЈЈЄTAƒЂЬ№›шŸlOц†uЭO$@зќ#ЉKЃЉюАЏdf5 EЙ!xЋzНЁPhеЊUЫ–Е>|јќљѓ–e9.qзІЬЈ›Mфzrd €ˆs”ћЃ<cО(г|Шk8pІт†Х"зs-лnniЉ­­…ЗSCTа ,mФ щŽ$/ r @ nЙЙЁф…ІвMLu3(7žЙKъКV[[[[[›Эf>|іьйЩЩЉt:UVYžš#i3ЦР-8Й„=нл@$Ѕ*%вЪљkжFVп\Wй ‚5очІF@гДццfг4…œПМ.4(K™wNMwЄ.мO39свфўОФ™Œ•ћJоn*Ђ, ЏњиƒС[nЙхц›onoo?sцŒЦyЯхr–e9ŽУ^ЩдIВ0œы;ŽК]sЙЄ M.X$‰›,з{Єc;nEEХЪ•+Нк_ok;*hP–2я6šъžЪћѕˆ$с§žHjмI]JdТО’wё ŠВЄxУkжЌYГfЭииикЩЩ\.—ЩdВйl.—ЫчѓопЖmћ Ьчќф„ЯчЯ^кoФыќ5+ШQqУ‚D’щЬNŒйнЎcБКККx<.ЅTAƒЂL# DfЛљЩsјz%I$ЩўЩŽкЂ5LЭКT˜“, ЅЅЅЅЅЅsoЕm;ŸЯ ЫВˆhj*Йwя“ЩdRзxЊ§ћzЄ”‡Š@ЈИa!фXЉГOŠќTIy ‚XЗnн;ыaUЃЙЪвEЩќHпфCЛ261sЃ44џЅбУ–“ѓ~1пЭU”…Т€ ")Ѕ”R!„ "У0Ђбhyyy]]]}}§КukїьйЃištГЩ3O‚є"u4-,uю…Та9@žЫІ›››пA ЄG Ъ’хMЄIue ‰зєr$й‰lŸwїљnЏЂ,,ˆшEœsЮ9"вŒй`bЫ–-ЋWЏv™VъHŸ}иьDLe @ŽйюУ™ /0MBј|цž={cя,‘K ЪREрˆBїи1Џ_.š‘_=<пMU”ХgЬЭˆМџўћ+*Ъ$‘ŠeК^Nžј9ієJЪќ"‰:ц‡.Єкgš.„єћ§ња‡Тс№;оЄ ”Ѕ,kM^Nœж™IЏwў" ŽZзиGXѓнREY|МОПпџРя­ЊЊд!PЖkт№џ8Щ1д™ZєЊрЭžЙpv<.ŒЌ+j# ЕХb†jЬ|ё–оŽD"=іXwwЗЯяљЉЩ#_ ЅoѕUЎдcХ \ 0}$*з I@ЦMюLMфњNd{Ё(јCБфTbчЮ[ЗnѕŠˆП›gPAƒВdMf‡ћэDR @Р™>=ЃŒ„ЎЗ‚Ÿ”Nїи1{YЮаѓнdх№ЂНT~є`їЗ€Шљн-‹*HхЗ^ˆ(ЅЌЊЊњ№‡?МwяSЧг4 9Ou<™я?iељ*WњЪ[€ЙЄцd^ Ј3‘З2ч_ЮžqІ€iІ/јжп~ћэ^хG4(ЪыЫк‰eхлМ*бФ™qyтdоN@,P^muЅ…Рˆ$gzЦJiейА(bоIЗ'_ёПзxНФDР+‡1‘ШЇэЩ~-TbDЫH^хHвя’FQr>šHšiѕ5:™аtкь['„Шwd/юwв# B.QCсЎXОќ–[nЉЈЈрœ_­bљ*hP–Ќй€8hоДsяМ„Ш9г2Е(іRB$% IR’;пmЙAyѓ)БЁЁЁОО~xxјрСƒ—.]ЪfГ–mkЎУ)™я;–< DzДЪ(Ў3тuZЄŒi&r5Џ4”ЗВм•Hт•2˜­20ї:(„$a зЙ„=vЉ0zбIъсфfpйMўЊжЋД@!Уєљѓ}'1щкСЦэОŠ•мFIяUМQњ'Юќѕ[276"†ЈHЧ•vоž, ŸЕFЮ#CieР• щЌЖВђц›onmўжt—зQAƒrC$ђ+Ї"я—\ KИТ561OЎЄ#VVV>јрƒЙ\юјёуЃЃЃ™LІЈ8žœLшКюNі:]i)™fъБj=^mеq ™†šŽ|і"› &@: ­МДs % лŸ"сКйq'9ь&‡œє(2& M7€HaЅЇŽKѓџЈ/—ŽФЗЙрТDR2ƒчzЯЄЮ>Ѕ>Ўn>:ѓhКѓu›ПЊbЈћ™сcК—§кmXШз^ЮЇg7 Ђ7ВТНю†D –›J:ЉkЄГ0vQвŒыؘt(ŽNN&jjj6nмИeЫ–зн)яž ” Ос?”E‘qд`ц„;пЭQІQ иЕkзЎ]Л:;;/\И` э™t&›@на5Э` нЉ>{ьbFї#з"ѓ…™dfAf™`КuЄVжЭŒЛщ1'5„FXЄI8Р8tз‘kŒq а}фд”ЎыіФАa˜ rSЧПYД§c<“Ž@ііЧАH 7y~И7uц{(;œbŒљ|~”ы9œшаЬ€.еТeлВlлŽDЃ--Э‘Ш†элЗћ|>/ђИ‹їЊ AQ”Ѕr/d˜]ЮT™wГ+`!тђхЫ—/_>66Жjѕъ‰‰‰БББбббБББT*Ѕi:cЌ(VL$3“cр„&)‰HI$‘ыhј‰Ќ4"cšN€ёhЙf;+œHA$iЬ2Є З@ЎMТ‘Т)‘cˆLB№ћ2Љ”›Я›†БЌЅЅЉЉЉЊЊЊЄЄ$‰xЭBxЕНЏнЎTAƒЂ(KCЦ™>пSп”7у]ЬМ%ЏМЦуёx<оааАnнК\6k;NСВRЩd&“IЅR™L&=#“Щ`ШЇЅжедVTTTVV–••yЕ€kšЁы†aј|ОЙO*„а4эС|єбGЯuv=й7q№ЫFМZVъ‘J=VЅЃоgѓf3ˆР™ДFЯ[унNzTц“ЈљЄЄ|>ЛyѓцЛюК‹ˆcuuuuuu[ЖlI$&GGG‡‡‡ћћћ &2’W"CяЕ#“BИ8у Ђcх"сˆ•Щ8Vž1NШ@ѓ{но;…Жk™ІІёвтЂЖЖЖКККp8L—–ёЂ1Dœ}7Ў4(ŠВШ!CЎ1ЭK+sDaОлЄМ!oЩ+˜Щјѓvc,4ГŠRUeЅїƒœC‘Эf(ыКЎiк› иЯ^DНы(ƒСxРЖэsgЯТ1NaЈУЛ„Œrц ›Бj-Ra”дkС‘OКщQ'=тІFєЈД2фZœq3Яф3LЪЂЂЂнЛwoиАaі"эƒС`0X[[ГaУзumлЮd2љ|~юkBИЎ;;:399™#Y %\[HЁqŽˆ†aƒСаС`0љ|>]зчОRянЛnЛO ŠЂ,ŒqЮ   ‚†ХСЛъЯ^ћчЎ‰р§ђU=эС`№U[xнe^{]XыЃ§шоН{ћњћSЉдT2iHD нє37—э=Ъ4ˆ5 1 ….ЖIZ–œУnYYйЪ•+НZIsŸbЖeЖ†a† …оќMR&‰d2щКЎ(ƒСЙaСыšћDзyЏЉ AQ”Ѕ€!зљtЭ;[ ‹а;HѕыётЮљ=їмглл{іьйв’’l.—NЇb(ъO'бq5Убl*с !ЅЬ ХЅœЬp(X\\\WWЗqуЦ7YWњМЦXIIIIIЩЕ~ЏЎ4(ŠВpдtnz…РеZчЪkЭŽ‰дззззз@6›;ў|WWWkkk2™L$!ŸVШђh4‰D‚С`eUU,ѕ’+Нэ\ХBIГЎн ЩЋN ŠЂ,ŒqD‘ЛЂ VЋR^—зхрЅx)RЪK—.эмЙ3‰Œ1Ц1‰„УсЙ sѓ$ЎEЋцћyЋTа (Ътцnr›„ˆЖ[И_•Ѕav–7УЖm"2 ЃЌЌЌЌЌlю=gУ‹yЩX˜Tа (ЪRРkм "ДEž€TШ М9œУ М:soН“4(ŠВиyIјLу&‘DGXDЄF'”ЗhюdеЃ№цдЛЃ(ЪЂчєMю— А‚“&яzЋŠЂМš eСёRЎ”ЗЮ L=H$1oЇˆд{Ј(WŸ eСBЈ$ОwРаќœi˜ГSЏ[іGQ”wI ŠЂ,їмO QPЅхZPAƒЂ(K€Ю§†цїњ NjО›Є(K EYєМБCѓš_’єF(цЛQŠВЉ AQ”%Bч>CѓIШ;щљnŽЂ,A*hP”…hюкСЪ[€`h~ƒˆd­Щљn’Ђ,A*hP”…HЭžx{РЇ…ќzH’ФT~lОлЄ(K EYєHjм№aIг…ёљn”Ђ,A*hPe)$РЇ‡ ’љбљn‘Ђ,A*hPe)№ЦsFTч&хьЄ$wОЅ(K EY‚FЬд’\!нt!*™TQЎ4(ŠВ4 ЬЈЉ9г5nxi *dP”ЋH-­(ЪRрЭ6 б€Cd3–ёzдйŠrЕЈžEQ–DћŠ9у™ОсЉ c™P} ŠrЉ AQ”Ѕ% ћќF”!Cd‰ь хъQAƒЂ(K„W@3т/хL—фf "BTg9EЙjдсЄ(Ъс•бŒ*tЭŽАв…qT ŠrЕЈ AQ”% ъЏаЙ ŽА&sƒj EЙJTа (ЪRсѕ4+uюWІrУ вхъQAƒЂ(K„ЗEШ,2ИlQ˜ШєЈžEЙjTа (Ъвсu*Фƒ• Й”n2?"I 25ёRQЎ 4(ŠВtxi ‘fЮt†1‘œѓX†ˆ^œ@$^y:†ˆ’$ чˆЃПєј‘ю‚GєB|EлЊAхMЉžEQ– "тLЏ+ZsЂяq‡†І:г… 3МЊO’•‹Ыюџс[ЂDЁыШё?xьтйСсЇNMнБ+† й#ƒS}ЩB˜?i­ŽD)9yf$—Д\ mj№Ÿщv0^[НжЬvїŽwŒІ–В†G ТддЉоd>[Q+збЮѕ ІFВЖЭЕH<ОЊмЏ"wl0q9mчqMХV”јx&г=8qрхЎŽ,”iЩZsCEГ‰рњ‡Ї3ŽMЈFIYQKDƒ™ЮEy54(ŠВ$…jŠƒЕ+!Iі%Ю”„jЏвЕhЄ7™єћУЂрHЦЙЏiSыЦ'/+Ъg]ŠћвcOНиўЕ#—OЄСвіEЗпВщgwзЖј0п{ўŸ;§Ш…Щ„,„wn‹ї<гyШ оћ ўcЇћпПјмПф?їЋЗџH€8CЇўтПt8{іќбЧ6”щш_ўкгэЗєnиB/ЋИѓЮ­?ЗЙ(’OzўшПш;5х2MІ,^йXїаћЖ|JќЏ}ц›Ћіщю‰CПгS§ЫПTб01ђФГ'ОttшBtt“Т\ЖЊѕcїЎ§P$€ЈP^K ŠЂ,Aод_V_yЊЏЉ:‡їЏЋНgN>СЛA… љ<E;MюvзСsG FАВФ‡ЉgПшOŒ–­љ7ДадОЇŽ§§ї—дџZ}юЫп8єŸ#њън{ўx{y,нїпщИ„ОњЊђЛj!qдЁ@гŠКeРЕЇ&3ЌЈxscЌ)˜њв7іЁ[жnні—лKУ‰О/?vцk]БтжЛћ;џцбѓЃmmџыуMЋЙ?іŸо{ wЯ‡šјЎЖєK]/йХљс›n.Ќи=‡к?џLŸя–­ŸлRQ! ='Oџ§Ы=OuTмTW_$ејЕђZ*hPeIBI‚3Н2ЖьTџ“:ї!`І0ё—]…m‚lOь:ћ_ьšNztэ‘Сб ЎоВmеO,їхЯЖ?о1ž•ўШЖМЏ Jƒ#œ:л9™Л0бёєА(Љ­џаНЫ7ћ twUяЩБ„э+ZŽЮјTВ;/Ѓ•ЈrжРхTC%~ РШSіZбКцO`х.Pъ;ошьисQ{WЁ0ХLƒ ХзVаTЖfc&Яг/Ђ~Ъл…xеюЖъuВЩ‚“гѕ 0qщКЎk)лДЫb!_дjЪPAƒЂ(K“—Yin)н:œК”Шœzq[гћп§–НџФX+%#;™821=_KVЏљпл›nmŒ—hЮ‰ѓЩо<†|ЙЇўѓ;ЯK"зvВІVтdžЪ0=\ZБЭЎ-Qwђ. ЦxYEхNNN€o{™ПDp ЙŽ1Тсh4j@nпйlNчlr№‹ѕs‰ щ‚аР&-^]wOuїWЛzўэћОjšЭяП}ЭэХ Р:2œОХтuБ$ФX ЙЎrWё№ѓ'NџёЙГcњк–џ№­Ы6јUFƒђЦTа (Ъвф-RUЊ ˜бŒ5Ёsч№ў­Ны‰—Р ‰ЄeЫpй{>vЫЇтœчџ§kGПyЙ G+юXи ЫI `ЁXSЙi І…§мЕиђryўe'ЭєъЂтb)ЦL™ьI[iф+›Ђzjrh0уБ†Ј/.I2”™‰Ž4љ%Х~€‘AKˆ‡b%ЫKЙ” zШ`‚ŒЕХ†ЋћЙŸЙЭKДuЇrќЃKƒ]?~їO–f&2Љ1р+ja 82ˆЎZћЛ?UђН§Ov&ђщуЯ8л;ўГŸи~_!WqƒђZ*hPeЩ"М:ОъТШA!Ќ5й5vЌЉlН‹аЉП/›ЕСчm­ёG€ дp_ЫЅ—њ{ЯДяІЊOФз )ђе+~ѕcuХрф{G ZqДџŸ.0Р3иg.ŸžЬЙfxy­щЄr] —…"eБˆ!@ўд‰о^рё"ГЊИ„Рcm›яіШdКЗ@сЂH‰Ь Œ:ˆмrїю[юАGўщožћЪdjџEћ'ƒљ‰D:‡с5U‘JDiхrC)с/Њz№СЊ`ДыїўюХ'&’‡{нћVыDW'§CYZTа (Ъ’…ШЈЁx]QАj(y‘€ЮМд\Ж™оMЕ"D€Т…„(pУ+ЉGA‚aуЖњ'Чћ’SOН<ђћ*ќрk­‹Tэ?жuслэцVПнљв‘?yajХGюљ‡[bѕКWЄz.>rБЖ$9іЬs—/ц™?Yшwѓ.ZютХўcy>мољ•Syн№•‡УЫ ˆno |{`|ш\ЧwыЉЇПwј_zЬџТ§ПьПєЇŸ?в^нќЉЛ—o r™J8nAјЊuШXЩŒУЭ`Њј VКЎжŸ>{ђWџѓМГiэяЊm6бKЇlA~ЃО”ЊˆAy]*hPeЩB@IТoD*b­Ѓщ"9’ъJхGУўRЏВв;и&!@aјhтB’ZJŠŠС›ЌAkx Ё§щч†N8њЏюљйJ^Б~ХЇІ,БПћoџЁЧдШ6ТыoлќЉЕEanо~ћВуž{ъє‰_k?зКВnЅЯчЙоpI­zYщ­kKњцз‡ž(Š.oЎ^fdjЬp5I}нm,ќЋ';~уЏ;|@,V|з}k?\яБЦм<ўЏ{ўцѓ&2ŽQQqыЋЄžA.жZ Ž}уЫO=ГѕіЏўHSysы'wЅПtьдo;№a!'cѕuИyЭeŒˆдtKхѕЉ AQ”Ѕ hyљЮѓУ N:]HД>ЗЃљCNЯ’|ЛфРŠќрЭ[mVTеafЎikямё‡+2)4ŠLiЦvнЙНqљВюДcъў@CmI PМnу/ЧЋя›Дlfд6‡ћ}љЏ.ЩpMI тЛпsЫЏ™JаCсхѕqоUЙ5k”е”„DбЪ§Px§–Љс‚ШќЁ№ђ†X ДуОн kЦ{вvо%Іы‘XДЕ*"€@ёMїм_•œ" –W”АXщ{~шце›Yз–ФMГИ$жZт7дР„ђ†Tа (ЪR†ШˆdyЄЉ,\9qFHчђФ™Е5wЬи;ьl@#ВfedЭЋoˆ—m_™дЩˆ$3ЋЊЊцмK0"VRWЙЇФаЁdjœёЕ Ё ё`tнъш•Ч,kМћЪг#I_ЈuYЈѕЪ=H2 ze]eхмVyЇєh|c[|юяIѓ76U7ОђЮjdBy3j&ЎЂ,8ЄЊџ_]ˆˆЌ­цŽšЮЭ‘дЅKЃG@Оѓm’”RH)^ЕЋˆ„”B’œYHŠIщ§rњїlК†4$зŽ$189”H'…ПЕЬє"б+BsЗ 3‘аєfIH"№ђ:_§tB^IљМВЭ™Ж!НЊm*bP~4(Ъ‚уКю|7aIё 64•nŽ*@HЇ{ќЈхfВwŸy JБWЯKœYVjNN€З"{ЭяrЦt†v†t3аTWН!ЊѓщЭМт!јšЧЮnљ+Ъ\Отщцоte›јкАщЅ­ц{_) žPхFБЁўоНэ_0ѕ`_ЂН/боRЖ@Юk#/№Џ\§‹+Wџт|П=ŠђƒЉžEQnЭЅ[М2вЎДлžЕнМ7'sОлгУjXJY№Tа (ЪBчОMѕя)8Пщ?~9qjО[4mz˜@( ž EЙ!"6—mЉˆ4;ЂР™~ЌїQGМ9™ѓн:EYTа ( ŽJbПHњШ†К{mQаЙ94uўтшЁщEy Tа ( ‘4ЈЙ—W"`}ёКњЂЖ‚“бЙяхKп,8™љn•Ђ,*hPхFˆ’DРŒ­ЎО3щТјСЎo‚ŠЯх­QAƒЂ(7oКDsйцІвЭЖ(pІzqpЊгЋt4п­S”…N ŠЂм@МЬћ6е?2у’DСIюўЖ+lR‘ŠђцTа (Ъ…!'’б–•{$ЩтPэXњђщўЇr5HЁ(oN ŠЂмx‰фц†ї6ЏГœœхfє~whЊ‘Iѓн8EYИTа (Ъ бЇ‡65<wR˜ГІ^юњVоNЉўEy*hPхЦ„T_ЕЁю>GZІъ?~Њ/€Њк (oH ŠЂм Ме/З6>T]fЙ9Ÿ:д§ #НdЩљnЂ,D*hPхFF†цПyљјД  —!{Оѓ?Цв= d!+EYhTа (Ъ Ј<вД{йG‘хэф3gџ5яЄеšŠђZ*hPхF'IЎЈмгVsЇу4n%/<пљяЎА @Х Š2— EЙЁ! 2Іmoў@}Щ:ЫЭššџмаО/| Aѕ7(Ъ*hP”…Х[ЊJ­ry=!"™Zрі•?QiДнМЉNѕэ}щтWМлU~ƒЂxTа ( Тlm€\.'„Аmлu]ЮUЭ€ы‘ЩАЏфЎU? ”;Тв5пбžGuМтг*nP4(ЪB@D^зТ№№№#<‚ˆccc=іиФФ"‘ ЎoЦDiИўюе?0bЎАunО|щы/w}ƒ€PэEQAƒЂЬ3)%" !NŸ>§шЃ& D4 ЃЏЏяБЧ;ў<"zЁУ|Зtщѓ*7TЦZo_ѕ“>=фЫаќ/}ѓЅ џ#IЈН (*hP”љ$ЅdŒЅгщчŸ~пО}B]зѓљ|.—ѓљ|Й\ю™gžyщЅ—,ЫRWЌы†H6–lИcеЇFФv :7лŸпёЋЖ[P+h+78mО (7."bŒ 8p`ddФ4M)ЅbѕъеЎы^МxQг48qтD"‘иО}{iiщь@†rэxЫV5•nв˜БЗу\щ„ЭЂЃНLх‡on§dиWB$е7.хFЄ‚E™7ˆxтФ‰уЧчѓyг4]зѕћ§mmmеееD‡;::lл6MѓђхЫЩdrгІM+WЎœяVпrIЂЎИэю5?їв…/ІЛuюЛ8r0[˜МyљЇ*Ђ-^–У|7SQЎ7,+Ъќ( {їю=x№ eYКЎ;ŽS^^ОkзЎšš)%555эмЙ3;ŽcF&“yёХїэлч8Ь™mЁ\# 9‘Ќ‰ЏКsеO•†ыQ0ДРHКы{'џЂc№9/bP{AЙбЈ AQЎŸйkЬаааwПћн .0ЦМd…жжж­[З†Уa!„w!D<пЙsg}}Н7*ˆ'OžœUъЂuyѓ)ŠBеnјѕІ’MЎАrЫЭ=еёЯЯžћзМ“žй *ЫAЙQЈ AQЎ“йYЇNђ.ќ†a8Ž 7mкджжЦ“RОъ!КЎoоМЙ­­Э4MЧqLг|ф‘GЮŸ?*;ђZѓz|zшўuПДЎіŽм•ЖЎљNі=ё№ё?эŸь€™U8RЙЈ AQЎ‡WЭ’RjšfлvUUеж­[ыыы]з}н‘ЂЅЅeлЖm%%%Жm{|њщЇ8ЯчёUЁ†r-0ф7/џф­+<ц/ЗœŒOЅЛПsќ_К№хd~”!'ЉК”ЅN%B*Ъ5чЭ’шыы;tшааапяw]WЌXбккj†mлo>'ТqœтттmлЖ={ЖЗЗt]?zєшјјјж­[ЫЫЫН dО_швFDДЂbwIЈюРХЏ];Ќs?CvЈћ[}“эkkю\QЙлKŸDd*GRYЊTа (заl.Т‰'Nœ8‘Эf§~ПeYсpxѕъе^ЮЃ@Мљvбu]ŸЯЗnнКx<оббaY–Ячыыы›ššкКuыђхЫaNeIх@D”$JBuw­љщށU‡{О“wв~#:’МєLцђХбCkkюl(Yjn…ВTЉ AQЎяžЯчїяпсТD4MгВЌŠŠŠЖЖЖh4*„xы—y/Ђбш‰'&''u]ЯfГЯ?џќшшшіэлu]ŸяНФ1ф’ЄЉ7дпW[мvрвз.бЙ]cGЇ:WWнвZБЃ<в<чA7VсхvмP/љЂ‚EЙњf;їэл7>>ЎыК7‘rХŠЫ—/з4Эы`x[оЅ”ёx|зЎ]эээНННоЈФ™3gЦЦЦnОљцттbP]зCцu$”„jяkћ…Юс§‡ЛП“.Œ3фІьzО}№ЙšјЊЭ я-‹42ф33,—Rїƒї&ЄђЃƒSч‹C5ё`5GŽШц† гѓJ–ж ПСЉ AQЎ2/Н@бббq№рAЏ2ДыКЁPШ’Bx3)оЭSшКО~§њx<оооюeGŽŽŽ>ђШ#ЛwяnjjbŒЉИск™.вФ™ОЊъц–В­GzщxNуzоqЅt{ЦїNœЌŽЏ\[sWEД%`DМ ’’фЬТч‹зЂ#Ќ—/}}*?ё•VЦ–UD—UF[BОbC м7Їn&I’шђЬwг•wN Šr5ЭЮ’8rфHGG‡aоАByyљкЕk#‘Шl†wЩ›iйии‰DNŸ>H$cЖmянЛwэкЕ6l*;ђššН№šgѓ‡VWнrДчЛ}‰3‰мGЎ3}`ВЃ{ьXYЄqeхЭеёё@ЅЁљН‡HохsGˆр7Т70яЄ.ŒьоO$#ў’ђhKyЄЙ,м2‹FдoDrяqD’H2TФ"Є‚EЙ:МЋИЗ–Фў§ћGFFќ~Пу8šІЕДДЌXБ‚sўVrпзu‹‹‹Зoп~іьйžžаu§Ф‰ууу;wюєжЊ€™q хк!’Qйm+bгnB2ЕЦuBdг (8™KЃ‡Яэ' x В8T[Њ+ UG§хQYаŒЯі@H€€j-ХB ŠrЬŽœ:uъШ‘#–eНv–„—ЦxuŸз›UaЦКuыbБXGGGЁP№љ|пџўї7oоь­UЁ†*Ў5Џv$‘, еэYіБ•Л/'ЮtыŸl'’A3JDчGіŸоWЌ. з—†ЪЃЭ•бeГ}$IЬtрУ ˆРKv$I‚ЁЦP#’D@0]jŒщмgp?фœTzќФ…бƒyФ_ TФё@UqЈЖ8T2‹цћх(o хнђ.ЩйlіР/^DDЏўReeхš5kЂбЈWИщ]ЖНтNˆш Ux‘ГkUŒэиБCЭЊИ‘I* з•„ы–WьMw_=riьPоNšЁ–.L$Вчс@11qѓЭ7ЉЏtз"Coи hЦЭx]Qл–Ц'Яž~ipЊL-@$uю˜<'ЉувиaS š/ь+- 7”†ыуСЊЈПLчОљ~5Џ@$svВрЄГV2яЄrv*o'-7—ШіiЬxЃrц^їЦИЩt”фf­ЩЉмpиW\m. еЭї‹Sо*4(Ъ;фE RЪ3gЮ>|иK_Bx%˜*++рZ IМDtЧчѓ­]Л6z 4M~ф‘GЖmлЖ|љro­ 5Tq­M‡@@Ф™ѕ—E§e­Л2VЂwтdзиББT—Љ4nКвq„эˆX4•˜ь`Ј1Ц2ПŽ*bђ˜П"ЈˆјKM-Ф!2DЦй;Ъ„№ЎяфЇаtЏ€$"ЧЭчьTСMчэLСIчэTЮIхэTЮNцЌЄ- DвЛчЬЅW…т­d$ 2 rЄ@:7KЂ-ыjяj*нфb(‹… хm›Э.Ьхrћїя?ўМІiо4ЫВВВuыжЭ.Vy/ЯГыP455EЃбSЇNMLLpЮ …ТѓЯ??66ЖuыVг4Uмp}  з—я•4рL‹њЫжжмЙЖцЮМ˜:з<=’ъЮлiлЭY"ч›‘‹ШP0†ЬvѓЩќhяЭd"3yРg„ќzиЇ‡|zШЇ‡5njLчLяܘfjВнœ” ЄŽ#l!mGXЎД\сиnЮvѓ–›ѓўиnnzNЧмц{?€з]@0'ޘƒ 5›oђ>0dрИ@™EЅсЦuЕwжЏ›я§ЃМ*hP”ЗgіŠы Ix‹UКЎЫ9omm[Иiщ8NQQбŽ;:::МYœѓSЇN% 5Ћтњ{эМJПi)лкRЖйбTїxцђTn8gOхэДхf-7WpвШ˜Цsф 9"sЅ.LЄђуГпјЇ“НП‰PH'т/вЭк“ušОіOяящ™о„ODD`€Ш8 щM‰$Ђ9?KЦ4›3tnjмєўж˜npоIg.‰зіy0фTpГ yqЈЖ&ОbEхMб–љо!Ъ;Ї‚EyМЪRЪгЇO=zдВ,г4mл…BЋV­Њ­­B\Я!‰7тЭЊ№ @EЃбЮЮNoй‹ўўўЧ|Ы–-jVХМѓОЌ@QАК(Xэ§2яЄRЙбŒ5™Б&3V"o'sv2gЇrvвrВŽ(xcгƒРМЭD 5n"r Ещy ^ЯH щ…гУ$%HŽšЦ ™>-Јk>ћtю34ПС§†ц3Е€С§ІђщAS њєЉL=ЈsŸЮЭўЩГžќ [КlЮg‰!' ‚“сLЋ+Z[_МЖЙlK

    Ÿ/—ЫН№Т ођ˜jЈbЭІ#xзo €~=тFЪчмЭrsy;wR–“uЅe‹‚у,7;;Вр €\a”B’+Єkh0Тоšнœi 9CЎs“1MgІзs`pŸЮ}КцзЙЁ1C›ўЉsŸЦMCѓy+yОбK щY—^ѕhšy]ŒHœŒЮЭ•ЛšJЗд­ №ТРй*OЪbЄ‚EyЋб+м466Іы:ЙЎЛjеЊццfЏПa–_DDлЖKJJЖlйrюмЙююnЦ"zC;vь(++SqУќšЉ ЩрЪ\ BфІ0Е@ Ъ_ћ@)]W:’ " щ…щ-ЬVœєК%‘Ѓ†Шу coщќ?=B1g0k&еˆ!š1ЦfЊ=9nЮЏGVзнвZБЃ$м`pЈpa QAƒЂМUЧ?~ќxЁPаu]кккМYЎы.РˆСу U˜Іщ-ЋнооnY–a^(5TБ р•Ьƒ™š‹p%rЮн"2ІoэТџF^ЃЬl{n"$xЧu6 @Рˆ €.0™EmеЗЗ”oјKМA’@`*\X2Tа (oh6[0ŸЯПјт‹]]]^с&зuНх­gз’Xр—лйPоВк'Ožœ˜˜а4-ŸЯП№Т [ЖlQC гьŒзл1oXсЭfУ‚WФ(ящмЇsIЈnc§§-e[gk\z {Њpa‰QAƒЂМОЙГ$f—ЗB0Ц–/_О@fIМu^;…БXlчЮgЮœЙ|љ2išvђфЩБББнЛw—––‚ъrXLо`?]ЧНчuGмЙњ3•‘жЙkbПХт ЪЂЃvЊЂМя{ЙтфЩ“?ўјФФ„išŽу„УсM›6ЕЕЕys(уХUJЩ9пИqукЕk§~Пmл>ŸoxxјбGэьь„™ ЪљnІВ˜TF[ЇЋ9L[|Ч…ђЉžEyЕйх­:дййЉыКЎы…BЁІІfхЪ•EEEЖm/Цpa.зu›››ЃбшьZЖm?ѓЬ3ccc7nTЫj+o‹WНJХ 74(Ъ+xѓ*/_О|шаЁссaŸЯче]XЕjеВeЫМ‹ыb`ІцtqqёЖmлМY^К†—юАmлЖŠŠ 5NЁМEЊшТC}“P”W@ФуЧ?§єгЃЃЃ>ŸЯqœ@ АeЫ–UЋV-Ў$†ЗђJНYmmm6l№Ю0Mspp№Щ'Ÿ<{іЌїJеP…Ђ(ГTOƒЂЬdџykIЬnrЇММ|экЕо,‰ЅїЭлЫл`ŒеззЧbБуЧONNЮЮЊPЫj+Šђ**hPn\о7шW-o=11сЭ’@ФхЫ—ЏXБТ[‰ ќМЪwЦK{$Ђh4КkзЎ3gЮєііzПoooЛх–[Š‹‹AЭЊPE O(78o„ыКЇNzєбGЇІІМ†P(ДqуЦЖЖЖg*”RгД 6Ќ_По4M!„Іiccc?ќpggЇїFyKh*ŠrУвЄ”7Ш9qЁAD•>oМZ{$9зRЉд‘#GЮž=k†з]_YYйжжFНЕ$цЛ­зЗV…ЗЌіщгЇЧЧЧН45ЋBQІŽх†ƒоъ~RуF__пЫ/П<22тїћЧб4mйВe­­­šІнhУє{ƒhлЖ7ЋЂГГѓвЅKŒ1]зOž<9>>О}ћv5ЋBy—рздЙŸч…гМ…y”iЇOŸY˜[ЊA‰Dж­[чѓљцЛ97Dœs†ьиёЃ'ŽŸД,ЫЋq лккЊЊЊˆh!,o=_cЖm›ІЙfЭšX,vњєiЏдРРР“O>ЙyѓцUЋVЭw•Efn ЙаŽ,oвћўМПEЯГŸя†LгŽ9’NЅиBiЯ  A444ј|>ѕНэz""Юy:•йїќЫ'лh\з4ЭЖэЙkIЈ=тх~"b}}}$9uъдии˜išйlvпО}ЃЃЃЛvэRГ*”ЗЮ; ,Ћ`YЖыŠѓmž8ч™L“ЩT$’tgaTП&DІыšaІiЮwc^As€ЗХpA€Ю`ьШЅLrp:Yео|)’J+Л~ˆШ›0С9шяяѓiКц-ђЗrхЪжжVЮЙW†с<о›рКn<ї @uuuqЮрьйГуууЗмrKII ЈYЪ+Э§<сŽŽŽ ŽNŒЛЎУ9FЃњФDБ‘b<3ыX;x№асУGB“€soЅDD]7KKKjj**+ЫуёЮ,6_Ч(… *hИРжР!PЇы‡ˆCD$"RjŒ "ПЯПnнКЪЪJ/Pэ‘WёІ–†БnнКX,жббQ(МY<ђШŽ;Z[[c*nP`Юьe"‘LІ;;/ž={!›ЭФуyЮЦмЂЂœa8@оя7ЮхfІЭД@"@$ДmнЖѕl60<<44t.—3ŠŠŠзЌimiiєљГЛЎѓЁЇ0jгыA2``}F—0яMцLKЇ3Й\ŽMŸ Є”ВМВ|mлКp8ь•aP^—wn’R666F"‘3gЮŒŽŽzSŸ{юЙбббЭ›7/ъYB)…w,2†œksЯТBˆй‡WiћUЗЮN@cŒqЮчоъКЎ7I_qЋ—7C$gž—iк•Њ9о­3EoЫѓ§>§Г€ББсŽŽѓ]]'цkkЅЅcёxТчГМw‘ažќHЋf›гk€a6˜˜(-ŸœœzўљО3gj–-ЋkiiŽFуpнЛМ)С|Џ4BDЎdЈн_WnŒW9џf#ёў—^8” 'Е&Ѕ44}йђх­Ћ4ЎПЫЪа$Ѕ”’ІŸ НЫcl‰}ёіжЊ(**кКuЋЗVшК~ъдЉD"БcЧŽђђђ…–Бѕ‘ы:КnЬН$‘ыКšІy?шК>їV)ЅЗLЈ”RёЊ[Н№ТЛеЋ{1їщМOчмЫ›yэ­ŒБйJbЏКеqlЮюd7/bШdвgŽ;/DjеЊxЋЖЖПЁЁ7“ Ž–її'їэjoПАyѓъeЫVъК~=Cі…Q’ Mзи.И4ћKA„lA$Ѕ(‹‘€бёуЧŸ8fe%k \QVVЖЎqmAЄпн, ’4Ує›†Ц"I!ЛЯ\bK,ПxюPE4=wю\6› O<ёФьЌŠХ2TсЕSзsч:ОїНЧ‡‡ЧуёhSS§‡?ќ!oТ­ЗКщёуGŸxтiЫrˆфŠ-љШGРЛ•1іђЫія?из7‰Жnнxџ§я™{ыsЯ=ћЬ3/AбhЈ­mѕНїо;{+<іиcЇOЗЇRMуЗнvгЭ7п2їжя}я{‡OЅ2ЕЕ•;wnлО}Ьљ6П xSz{{>кп?каhjъ-)жuЧuЕBС‡HоŸљnщbEЎЋ9ŽnVSгХŠŠЁккъѓчkŸz*{љrџ–-[‹‹KЎлq‡џјЏџQуŒмчф<%BJІУ…ГЃ_ъЭЕV§Џzf:ь@GАХ‚љКJЏгСjЗ*?ёўї•UT.–“ьb”Эf_zщЅЎЎ.@аРgе@JИЮВт=лЊ?žs’œНуИ™4П†ЮмјTїрxк–ˆ<)­[О~Чž-u~ЗрввлЏ^w‚ЎыуууоB ^M‹хЫ—яиБУ4Э…џ‘іЎОПџћŸћжЗОыїДДј„рнн 5~њг?ђ™Я|f||ьчўП№ТѓUUщeЫBЃЃљ3gœššЕŸ§ь/ПїНž>}ъїџOŽн_^ž)-Ѕ‰ ЗПпзвВюWѕ—яКыЎsчЮўъЏўЦЁCћ+*2--ЁDB^КФзўюяўњЭ7пђьГЯќіoџqoящeЫdq1Пp!=0мЖmчŸ§йЖЖЎxќёЧџьЯўъвЅS55љXŒŽвФDdыж=Ÿ§ьџ]ЙrЅbU?~ьшбSB$з­ыЎЉщ5Э‚ыj^а>пM[jЄdœ Ms3™шЅKЭчЮUG"ёнЛЗ566_ŸЬO‡БёЬ’еЅхџЋHшšе?њлGГwьЊОЃXqхфK0'ц5 ВMџ’^sŸ+TЫИоіэл711Ёis€ЎТдg’ЄљйШcŸџќзŸяNYˆ—Fќ2›™Ътг{ŸјўЖ§вЯнQУђ.Ьі7М"…х5еdqцWn{UжЫ+|хEЬНлмQѓз}яђЕ{є–еоКuыйГgЛЛЛНЏПэээ“““Лvэ*++Лš;ђjѓЎЛG§йŸ§yФsпјЦЎњњ=EEО|оI$Ќ‡nџэпўЕЧ{ЌЇЇ'ыџюww56‡BКmЫссм—Оtђ'~тЧoЙхПЮœ9ЗlйјУяЊЌŒЦbf:mŽўщŸŽ|њг?zгMw>|`§њќSOнTY ‡ѕ|оЮќе_њр?vгM[8ђоїъџќЯ7UV†ќ~-•r3Ÿћмўћя`ћіћї?§ўї‡ўсn-+ѓ…BЦд”5<œќ­пzњŽ;žџЇњћћя7z2пят•žМ_|сєщЮX,ЕyѓЉXl‚mл`LЊˆсZ`LJЩlл№ћ3kжœ**š)ˆ…ДюCн?К/џЫ]і@ЙљщD$IР8ѓт"^œ0§vЯмGJШйєR‚@ЮлЇYѕ4\SГ'В'N;vЬЖmoХgРЗyУŽг™/цК€ЈЅhзіšOцн${GщПRH#lœџђџСП~?QџрџїKпRєvЁШ\оћoё^(оєБп§“O7CСAovѓЦП”ТuWzћ™f˜й–-™nЙ–хH "@ЎыКЦgR7]ЧqФЬ ф†Љ3щZ–‹†ih HКЎэ8’qM7 цZ–#hюлƒšЁы–хМЫƒРы”FФоо^oV…7gеяїoнКuхЪ•оŽXhŸm/bИpсќ‡>єбэл“љ—Зњ|~@ `.ŒЎ]ћЕ5kт|\г„wЎ ?љфљїМчёO|bйПx3х№§я_|шЁ'яМГъЛп}?€;ѓX@эЫ_>§Б=ћЇКхW~e€=3g 4"њѓ??єыП~јї~oгЏџњЎ™&рRЪ?џѓƒј‡=пљЮ7nОљ–yМ=+„xс…gлл/TW'6m:цѓх…рЊƒсњ№оgЮE2YtшаКT*ОkзцЕkз_ычхМяЁˆЬ.ƒq>/7 —“OBеХUшТ–cЉь—R‡'iUЕ?($q` ™†ТvЧвіHкI„C4˜зЏ€vСъJЙLч~”C‰Т`ЦЩи’љM†™LЁ'щLц…K4ч­&"Ш1Œ\сuЋVCaXTЙc жьшl6ћмsЯ>}fŠUVVnкДЉЂЌъТјОœ;EўКšШ:WZяЈ‹”`ёђwПњНC—ЬўоoОoE1gLгu]уfЈbеІeљЮ ЎWЌоБ"Ž а|&Г“уЃ#УЃ‰ЉLое!П†RЂЬ'њК‡ВZ Ш­D__џX†G‚rMчXHŒŽŽM$’9‡ _аЇ“+ˆ1цfFzћGs* ЩёОžЫƒУу‰tС_8h ДВЩ‘žžqЁћ}>mц NРЙШŒŽ ŒXО_wGМїЁ%ЂЂЂЂвввT*•ЩdМ,ПЎЎЎBЁP^^Ўыњ‚ŠМFзu>ўёŸЌЌ<їЏџzПap!lD9§ХХс‡jјќчЯ–—kызW8ŽХМг !œeЫ*ккŠўјOlп^Z_&rg>€QЖД”ЖЕХОјХ ;vзжF]зf ˆМyОюкЕЕЕЕцяўюБїМЇВИ8@$fы2F;wжˆџќЯ‹љHНпЏI)gоc—1иЕЋ1™§‹ПићсџP(žзТгУsЯНаллби8Ж~§1УАЅф0ѓЏ\s8Н€ѓљђххуљ|рТ…<чрЅ$_ЛЯЦќїqЭEРf“џёTя—ВўшАх2”.ЃСїЏ/џxS Ю€z.єь:?ГЕђ&HќCg!уИ§9\пZіЖЦйРи_ŸI Xb,эFЪbŸйVљJ„њ|/ГIџў§ћНђ…оЗБ–––+V зuЏЮўі‚a[’ЙЦ ŸЉeвЧ%`ШаE­љЧў№o? €•ЫЛ€ма­O=ќЕЏ?vzЈ€ !TЛѓž}ьCЗ5Х§І;qшыПјЃ/ћЭŸЙWўЇ~КЏто?ќЗпМ5,гн§в—П{Јsде АmoОэ§§Р=;ывеќ™УŸџєŸkМуз>НќшПy<#мЬDBVЌя‡>ёбЗШs{џё7ѕЩтŸћЇпџё•З HЂ?dŸјцŸ}юOі…юKў‰цИт*ФЮBˆh4КsчЮ3gЮ\О|YašцЉSЇ&&&vюмЙ fU‘ыкіgŸ?}њб>ЩТсЏžМЋДГzuйч>ЗљџўпУЗнV][ѓю†ˆŒmg|pХщгcџјгŠD|ЎDQЮћозќ§яїњг/>ѓЬХbЬћJƒˆD DцЧ|н / ўьЯю{ійїp"щн @Юg?ЛэЙч†?ѕЉчyфНD–ЗЭ™ѕE­пњ­›žzъ?~ѓ7џ№oџіO5mо2" У‡_no?за\Еъ„Ў;BpеС0/ЄdС`fэкг/Пlьп,№/[ЖќкхЬ.˜ž†Оф3ЃVИІјЁ2Ио\nц3… ўЃЛЋ~ruбІА2qИќ7іі?GўЌЏј‘Uё[k|z2ѓ_ЇІ&ТСmЅІJЅїŽ8ЉŒжк\ёЉeс'їшхєСBoJЌ_]љѓmEЛŠ`џљЉ} иг)’з=ПAѕ4\uоЉSJyђфЩ_|1“Щ˜ІiYV4]ЛvэВeЫМхЉtn\šмŸsІр]і4 w˜†ђэvvЄX$/.---Žњ4† …уИ@ЎэH"є™й—џчЏўш {љКћ?ќЁ‡юОy[Ѓ1єТ#п>8^О}Ыђ"нМјђсn'згя”Ќ|р#МmгЪцjџрЁџў­_ћыѓЁяџФЧМчжнлVЧВgПѓѕg‘–- W:у'^85’эя3oљФфЛvoЌЫœzъбЇOк ЗмЗЅh21|ьщse;ЖЎЌ-aЎ#ˆќљcOэнћЬРюŸћ™{V–3саUњј{прЋЊЊLгL&“љ|очѓMMMѕєє˜ІYVVЖ†*МFŽ§ФO|ђ/ўbЫЎ]ѕЎ›гДз§­ZUќєгSїппLфЮN†сœЙЎЕgOЭПџћЙBAюо]љЪ‹%АлoЏќТЮњ|ДsgЃыZ^\тНz!фЦхіg'š›#Ы—аœ Т›o.ћмчŽЏ_mi)ѓJ(ТtсBišЦњѕ•Ÿ§ьп{я**Ъa>Nо~МpсмО}GЊЋЇ6m:l*b˜gDhviщд№pМЗwЊКК" _Ѓ#na ‘ът‡Ъ4Щx4Ъњ“ЯВOюЈиYa†b!їѕ#Cџ№џЗеўєъp}иlЊя.е&F&П:ˆЛjC5ALNL}Й3-ќя[kwФX,ф_•{;гн.оК­сЇWјbš^[щЯuOГDymб:?‚ы6иљр­EЃНЧїž26пЖЖ>ЄЙ‚|щѓO?ў•'ЧЖ|њ‡oo*ж‘?€IDAT\їЊѕoЯ€*)))**ЪfГЉTЪ0 зu{zzђљ|YY™7›|~?ъˆ№wїoЃЃO§оянŠ˜з4ўКЭљџйЛюЛŠъ}fц–зпіо[ЖІїZ„оTХ V,иQ,XаŸŠŠTšЂ‚BhЉЄ’dS7йd7Лйоїѕїn™9П?ююfГI l Мьо6sч–9ї”яžЁЩЩьОћv.[–™žюхœи ˆР˜”œЌмwпЮ;яЌ>f(„Ђи)?|№ъЋѓ\Ў#.ыШЩЩоŽџ_ўВџŽ;*GˆхЊpЙь==‘W^iџрЫј­9ЅФ4ѕмм”лЖlщЙъЊЫ)= e„ССў—_^эtњЇOпцpDLSІ4n1œeAэіˆЧЃ55% јѓѓѓEЛсœ+љЕ*„!b˜ЩЙ.b`0ЖЋ#šZ’И4Ы\"ш‚ЅКzl=С=!e("DЊHq•лИТрК"ЛLHOtЬL—ЭЉsСMZрM`@Уs№ьу8%XDKKЫŠ+ъыы­‡ЋЊЊІM›цѕzu]—YŠPТ5š5ѓC_ќўНпМ}^Zhы‹O>ќч‡~џ›_оїу{ОљЕќхХaйЮ(ЊоP{АЇЯŸ}С’‡ж?ш tBіхПчїПљвьLЛ03b&LšXU˜`єtѕ‡ЂšŽjюфыПyЯ—Ў™_J}Н§§]=AУ[Ц§A=аˆD„СЅ”ь)sKЅоо nъЁС€Їzvƒ˜Џnw‡T>НВЌ&Џѓе•ƒ!Є’Ќ’ОІцњ7\<Ћ0#‘ЦщхГИx-ЈYГf•––Z\F’$эйГgХŠсєЛ=Žэa$НџўŸ^vYЉЫ%[Qƒ1*DlўќœМ<з+ЏД[IŽЃжуІ›JLSќсu’•VŠˆxёХйЭЭС5kZ)ЕёQa IЂœGПњеI‘кк^‹!qB ,Ыз__ИfMЧ† -ŒЉЃї%„ a|ьc•/Ојd4ЊŸЅФuы6™f КzПЧу7 ™вИ†Юй!hšRZZЧФ‰Э--нћіе“ЇсмЪi9{ CJ#іFЭVк‚7щ^‚$ Z{ …ћ‚& • ЃВЬ$DA"Q6BМJH8Гф>Pрщ}iЦq&1ТrП}ћі;vhšІЊЊa.—ЋІІЦв’x—ToBxLSRK^ž7qўхњ{ZюйЕkл››voЎлЕgWэюП№йk& П} тл'eйPЦeD˜[’QФŒ˜"ШЙšшv$й)7%™Q@Тйl~ѓпOџkMc_TXѓ(ЂНЛ“s‘ЪŠ'7LdЅ@9)У‰ѕ<д;Ђ&Э(ЋкАuЭwп6%НиО‡jЗє–н4­4ЭƒzЦƒ7m„ЊІІ&11qїюнšІЩВмееЕbХŠYГf]ЈЕk7ЋърПИœѓˆ,ПељІ)<їWф?јрО›o.MKs"ŽОЃ!єЦ‹|pџwLBП †Ёeм|sёгOКёЦRIЂ#6 !@fdx'NLўсkŸ{ю2Ф# c”}кДД3вўїПжЙs‹c#o*ЦчњђхХ™™яПџo}ыЋp§”жUЋЋлзжж^\м•нЗЮ5Aѓѓuw'mоМЇ  /9љє'Ež7пкЂ”‡b[ћ_mЌlЌ<xЅ1PЩ^‰Ž˜у„ЧEъ;ь`<лgЧ;ƒхKАЊ$^}ѕеЭ›7›ІiћeddЬ;733г"ёч—)aВLЬXФ NoJV~щЄЙ—мє‰;њ‡П§љЇЗOБ жЎљїгЏuЈvˆъК!$U:r F4№љЃІc’Ь†>У™JУ{^ќэ7ПћањЬ›ВшŠkЎПщC7пtе%•.]Ч#ё4$„(іЃƒђT‘@ Щ‘G5їдyUYЙъЖ6ѕ!C_wу›ыb“цди˜ЗgпЪЋ€мммљѓч'%%YUŽКЎЏ]ЛvЭš5šІhэœI тя~wџьйYNЇпію ” ЦЎЙІ04š›ьhЂ @ФЛюš40ЋЏя–ЄЃœ @)^tQіЦн{іє"св@4Пђ•‰;vє‘I"L“+Šѓ†Š^zЉЅЇg@–хЃLрškђŸyцogи№ВЉЬ#tЄЈis/ЌLtЕПіЗgžпŸW3abMЊ5Ш,В‘ gЯž––ІыњˆЌіoМ‡)Ѕу=эYFУ“O>™•eГй”“eS ЦТ…™ээaŸO“Дˆˆ„Hј@qwwTгЂVтТ(D^U•№њыС`”1k‘1Ж|yў~Ф1bšмсpЯ™“ОiS7чccіDRSуxѕе—р  –ywјps_Ÿ/9йט‰''Ћ‚Š­ЛєGyєёш#Dy$ђШ#бП=ЉНјšОЋ™“88#fЯё!Жm‹§њAmk#ЪЪxMчH@ИџиЏўЌеw<žv!Ш9KJђЩВџ№сжp8tКGьм`Фz•@Yrц€ш3;{“хєЯ’,ЧЅ)kл;ўИОЕЇ7aZЂL у`‡яЉ!g‘ƒ1KœАcО(!GЉ#”œщJЫ8о †eєoйВЅЛЛлfГqЮ)ЅЅЅЅ–&с™ѓrS*bQїЄ+>љЉўРЏŸ|ј—?йПqrafZ’KA=<ап}hчІMЕ7}ђЦ™š?(—^xнВк§]љЛћXУМ‰6­}Як›К'оќеыkв Т(%CŒ!И юФмщ3SWoYћЯG§Ssy_У†ѕэ3ЎšчџѓЗНќl‚rqPvеQћ‡oџhпвЩйі`лЮWџЕ%L‹ЎЛfaЂЉE)#ЈsзфЫg&o{aлžwётy=ъзаФёFŒ]з“““gܘqрР††Kzїюн}}}sчЮЭШШз8…e4джn+)БŸфlgR`.]šsїн[u]{XB 3гйл=xАЗК:ss„*ŠR ФЌЉI~јс~Пюё8,Јс}zљхy/Мp˜~ AфŽќЃбя$%9сш8!tюмф+ZрŒxз,tttФbЁќќNBтI}p Њ‚MлЕ?ўBггЈЬ Чh d7Э/fХ•ђхзЉГВ‰vЦlўQ@2bЫІшџLюњО4Н’ш1xGЬђoн % ч{^|џщ7%rEбѕёІФммЮСС”ІІУееUЇёљ:ћF“‹ Ю=€@”/.Kмв§ЯюЮ-vл \ЄCzfђНKх'vіў{gЧ „ВУЖlvс'+=™ €мЏёшШУEУЃвІш"Яљ=0R%aiIhšfi*:Ю‰'ZUяNоњ€P*ЂšRqС‡яN+[љъ+Ћ6ЎидcŠ„&'ЖЄ’ЩѓОјс‹чMЏЮ ƒn8І^{ЧнIЙO=§ќSZM(RwоќПњСы%JœsфF$"š),ГMйміеуЁџўїЯ[W8нюФ’зпљбЅђuЫ›OќїБ?і&•~!W6uHŸvљmг>ќШ_з„ЂСЈgТмл>ќ‘ЋjœšС‡ЄКMVОєтьџ6`NFе‚j5ЊigC[™Rj†ЊЊеее Лvэвu]UеЎЎЎ—_~yкДiVtiœ`MЋmm­Ы—Л,fЦ“ю61Mžœœ (є№с`NŽчЈћ€Њ*‹ХјюнƒееYBс—$„№ЊЊD]‘ˆyму'%Љ==бУ‡ћђѓНBс ййŽююЈЯg$%‘Q \C›Ь›іф“}у7hЃGRFњњќ))>›-z ;67Mpгт[З.’]MІŽ§э|эПЃЏ>gюЏ3?z—kYшцЈ3<švGчЙУбŸщ#’„Ч.DРбЛ CЌCЋ`(Я §№Їсч5ћŸП$эќ_ь•mмЁIЫlНZIwіA@Œ‹НЋѕgў­эыB dR6GНўZebс №а&эџдw4 “ЩЎQЎћ€mN Ёˆ€$элЕП?Ўmi„‘дrљC7(t$•0wПЎ=ћМ^п#ЩEg_nЛ~™œe{[ьюŸDvЅ:y >ќ›ш†(ћШч\Ÿ\BќA8љK­Њz__ачLKK;]–хЙa4€@VЄbфD q4EкшЇ‡J“Žr,™bЬˆp–ы|ЌQ…ŒФЦq4t]пЗoп–-[ЌЂТщtжддdeeY4 gЛƒ`M‚(ИСЭБ‹)Ю#€маЭуm"И3ЧZ?ЙЁ™GoТа5U&K’Ф(š†Ц99В!f$0‰Цкж?zџ+ѓ?sнtOLзNВj`МС9—$iвЄI^ЏwпО}‘HD–хоооџўїП3gЮ,//?Sрˆ’Yџ€Ђ !btnСлB’Ё(ЂЙy {лP–ЉзK{{C„cbєe”$РMSoh dДФЖЕ/q8€RГЃУO0ЦG2Вы†0u]'гgЎ( љ;::,мјНFЌkG9лэњЛ)TГм#/ma ngЫ—I/д‡7]ЫеTДЌнћЋh›CšГШVšБnО}ƒ~п7Эжo;?>ЭЌ‚чї№њ&\8 0Фр[ї""vv›ѕ§Аи ! ТЏojE{T“FŒ‚Ь~ўиЏŒXŽДќ#j‚С7ОЎНіЧpЬNПz­œ<Њ{ SмјhјЇЯ˜žRyўхRІ z˜_Ž|{ЛљЕ:ЄЊТюgЂ?ќ}LЯ“ч]dЫђBА“oпћС=ќkї8—–- Њ:ЖФўяоШUžwЁ­,Кї›Я<–б%$ФЕ §ќ_fRЕВpЙšІˆCЕЦЊ‡B;ыї|UЭ”HТmЦфсхЊV’vФ q2@'tјА188˜––vКю‡sЦh8С,~ђ OДъTpf`… {zzMЧЅ!"„Œ“FйiЅєДПАt]пПџž={$IВВы322jjjмnїјђ<О#œlNИн‰Vg9БШЫA=алл+t25о'6tМќпўщяЅ„ гЎќфэ—ІŠqЂд~ч0MГЈЈШыѕюоНЛЇЇG–e]зз­[чѓљ&L˜`ЩjПћV„’$uvvfffј§;тXЃс˜)љ8GUU““г{{}С ЭтбH’œ••йе5 %А)Ъ‘Чж0„УЁІЄdhš+R-^Я‘Е„аh”fff"z4M ‡Эб$•К.мnGFF†ЯgDTгN”sШhx?Q’Є5kV‹0Vzљ™ьг;ОŸN{W !šІ!ЂUй%%%VЂм9h1œ р†’wбЇ>]юЬ-Уh†BpƒИ Њ^ї‘IžŒМъйѓ&x1ЦOЅlрЬœ!КЎ'$$Ь;wчЮ---„EQіьйГџў ярАюЈ~єу+WЊŒ‡ŒR’оЪїР)-ЭЪЫу/Мр8v-Ѕ$##'%Eќч?vMУ1ymŠB.М…1њТ ъБinBРEЪ2}ўy›ЎнWUЩEнжй)§їП2чcїх?јСBxс…NѕЙА<'i“YŽFcII‚R§”ЪOоHэФMбЧQ#аQgЖ4pяBз’RёЃa‚?йSфЊjiїЋњЎ~ћEr5ъэfЗ.ЅЩиБЫ№qrСЪ­біflЎ,Ѓ8И• 5Г)5PD]‘ЎМDR4 kШ tЇROеZDd” "H2ь[mј€]`Ÿ™ˆ=Р(D52щR%їyГч€q ЌNgtбЕŽ97аœLJtŒhшWi^+NХКvЁХ@ђB M4lсюЉі‰хŒ…/ В ЇЋ‹ ЕПєYЕ{bї } ‘^~Е}A6єˆLAШlљЧlOЏЖm5КЎ“ (lщR™Х FO­рTUїzг+l7Ю !„›МЂВТэM8ю†„œПУoˆглUЫ‰D:::bБ˜5XС`аяї'&&žQ‰Г BPJо…Ÿќ”ТѕP 4ІмдJвL˜ИјІ9Ы7ДH(5žЋЦЅДЗЗ7‰РАt…ЭfЫЯЯЗйlЧНмЇJШˆŠЂ8pАННqтФ Ueтн™ЗИ…AQhoohкДмcз:,ьшˆЬ›—ыїыЃ@B У!56všІЈЉЩЪл†e4>мmšД  34ЦьыvЫ{іДЈЊ+%%q4ћ Q-‰њњnЇг“‘‘ЦNЩp?еg–bа0р$+-OсШ@G@ь ЛњEз–SY&‘J q0э4УIдмп‹WUВYe№tЇhю‡Т и]+ ЦцN`jŽXеЪ;uЅШo6"иЅ™yФф єАd (B ЅРd @ 9:M@Bба‰\@~9Hн,п‡Mбв3ГHb"4nг~џАйсӘШ€E›IT&#bKI›Fн JЛЄхSГ bрў6Ђ" вŸy5”!ФŒp;ХPDД ” 7ЫTи;œЊ чЇ™В}МŒ†ѕ‘oЫгў2}LєаJ‡CОыЎešНЏЎ Uѕ|ћлџОќђќ%K& qTо !4вОѕ­чЏЙц‹ .ВFoœ.“eЧoкДyћіDљtъMX ’МO M!щNhе f‚S!рУяmB6˜№QvвВ9Ќя?М­хГЖ Ё@.Ne}“Yыо4Ѕ†ЙЯj…Rd*Е 0&Cр— в€#qЉG*>€PтВc F\”џѓ‘шуџд;$6uЊ”—AиBУ9vІAl2ЈвPЉ'"0ЄЊA0†лŸŠl= la1FA%Ъ;Ег]WТaщєђ%б@aф8ц"D1ЪЧB ЁќД2PJ(гъЩЈЂŒs œsы%wЙ$IVД{еЊUННН–мQmm­ЯчЋЈЈPUѕћENSd‰R<‘Т2"7uѓ4нx„*ЊL7Œ‘#То2ŠM(cчrR c,юнЛЗЅЅ…Rj1tххх-XА !!с­NьЫјpЙ\БGЇZriЭ‘€FˆЕуQЛ3МЏ/lйУлЕo8ЌЛнCsэЈЕ@„~ПцvKxьОˆ‹™‰‰вё! !<4,э I’ЦЏzТŠ…;ЊІЩœŸЮV,ъЇƒ€ Yyr–‚­d КРЃХПа0A) Jг'ШY]z[—l5[ќ"ї ЩnƒŒRIzЮlя=}<Р ѓ˜ 6zАGн‰Ю”€"а9Žс‰аM‡Кjѕ—џ­ЕЄлю§Ž}VA˜—зЦкЗшuУ9„‰BLŒ’Q&pmX9†‚ HнєКoЛЎJ‡?B6H%6F–E}Ч—.БKq:pњf™q0˜Ійрh%ю щ‰QЗMRР№ЁХŒ~М6ЩyкŒ! GД~ƒЄ8$‡м0њBЈШRЂJЮM–щГ"їwО 99љЪ+ЏмАaC}}НU8wша!ŸЯ7yђdЏз‹ˆу/eyBPbzћC1ƒяK‘0›;бc;*ЁЗіЕїpХюёКNOzрй‚51Цz{{wэкхѓљ,ViY–'Mš4gЮœгžЪc Wbb""уЧpœФю€†У!ЎЗ„Ё рННš,Г1ᇑ}5MффЈttѕ„EZІыТя7ЪЪ,'СБТ9ЄІ:Јdd`­№ѓx<0ЮбLы’Й\.Лн‰ЈIIф9†Јј№еIDЛў5ТЩŒKdе€DIqC{;rРHіE0bв‚Db"ЄfHS“Ѓ НМvЋˆщ’rТЄfJхЈwЖš[ZD$ЦL8ц…џvWœpJrQьhGZILŒ]Д‡AQha"4oЭ0эVЅ*ŸRЃ&xЛEW( GAЕ‘tьB$дІ’ z }mШd‚‘ТdxЅ 4AsГЁ/2D`2!Œ`X?BKѕŽAƒUЕ%&&žЦћс4 ˆ@$hoэћиѓ]Н6*Ь#їВ!аэq^RštSEТ$7E$ѕU›ї_К]Нyс*Хwљe$Ѕfф›>vРљјyзШuЭ“^,™RќьE.Ez(ЊХqJPeётХщщщ[ЖlБ ѓоxуšššœœ‹ыщŒ—™“Ы‰ЎžЧ~ѕ§'жд •˜cќ „3s]їѕпп}БлЧЕ+N’,ЕўялЫОпАшж_ќё›SЂƒ•ЮвЩS„eљqЮъъъ8чВ,kšfб6”””ŒGЃ–Y™œœdГ)C ДЇ6v@t'$ЈЧйњW„У†нЮNрУ@HNVG~ Ю‘sсv‡В‡1Bˆ‰ˆnЗ<в1ƒ)IrbbŒГЋв:Ил‘сІ)S*NЭJБДlФу%^ J€kиwијз"cсхŽk'B L ЪЄь\RЗNoМN^”OЂQœЉ32ЃYъдL" pЅВвitѓуUф‘dЙ*™˜:$ЄбВ2мБOьМD)M ЇZXOœжL—”ѕZг:ЃkЉ-Ч 1v4§зhŠ ЃHЎJ‚$ #‚ƒ,ƒЪˆт!иЊНЖкш $ @Е‰€лMŠЫШ+{yk‹˜З€zТ`wТўцЦƒрHD„LœЧ№ї|ЯJН}ššb]€ЊBџ~§_Ћyо4ev CёnПs!u$$xМ^яiМIЦ%<СuH47г{qЖ3™X"”5ЬжоаЊк–7КД_]˜1нMРЄй)_@iŠ—вЇ7PJрHШ‚АcBВ\ €ЁчШ AŒxw™%*LІ]I wLq–fKжё Ђ@dzд(!cоъCФѓ(Ѕф\ієОЗˆ‰‰‰›6mjooWХ4ЭmлЖY…yЋє™Ў4@$ŠУсt&]К|Zъ˜”=h %ЉЂHбL`MгDТdFч-?6!”БQS?Ђ\X"„RЦrЮ0‰ !\EмђЁ‰Х“SИ.(%…Щ9Ib0|Lыcва:ц‘&Я^zƒBQ”P(ДџўІІ&Y–)ЅšІхччЯ;7))iœh -g^nnžгщ4Эwе ‡ЭŒ ћБž+sГЏ/VTф9^‚PЮБЌ,сИѕрКЮc1žšj8ŠрA”$‡"S9ALл49cjFF&Œ|М”:J-.nf,vђ‘ж Јтo67ЌGŽ`8иnЎ§ЏЖО jл>їI5Q‹ЁГDКрeыŸДdБ+ф,AОъЉиЩ’;еВ$7Щ­”mOЛTtЬRгэ t”h^єЪjУGАшJ)A!ШбR‘`ШђбСЦ†"Ё ЫР SцЋ—­2пНџarУlц ш0žљ“6`“>њХ­CNЭЭТ-/k+ѓHMкЬW!AЋŠљЎНцо}Ь,fЙiЌzБœђ3§Йg™ЭPђнипhЎzI‹0eСIіЧђз‚џ|9њЋdИrs0їš+џ}Кž|(M™;йъ$‘%+&vЪ—Œ н==ЉyyvI’Юuэ BР00%йuыЬŒ$+чШтЮоаёЇН}5zЊ&КьD*Џ.И:BШ )$E! ›Р™dЃCI,І}TК3Ђ ”йcУ|м&}иS Иe#$фd§Ј‚BL‡А €2YЅР9ъШl2“‚@а9ŽЊ}T–ЈLFw@`\уъьРџЭШШИјт‹kkkwяоЭcŒ544ј§ўЊЊЊ”””3)X5ТаЅ‰W|ф3S“Mѓ~'СЕh(Ђ ";]I„†ЭэuкdJP˜F4ŠCсD$Lu{œЊL)  У†тJікE0ŽщІо’1V_я—$ъё(cжЯѕѕƒ^Џ'11бЪзбГ““Лo_cccіДi'Й Ѕ‘ŠEіфЊ ё3А9hb*ЭЯІTט ” t;Л№FGХlЕ­S„tm$)‹чI€С20ЃTЉ|цЧєš -ЎaŒ#PаЂP8W§жЏЅA UХM 1jcWТ==D НРM жBЛtУЇмsУДа QNЫ.q>TAŠJ‰‚Р2хŽЭНŒї  ЦСюЁщ9Д0ƒhQ Ѓ3/ЕgU(­НЈspЅБђRцQаqН3c†]@M YЛш#Ю ѓmн48xВЄЪb8DџT ŠˆЎ@p(џ’Дј*очGƒъ$iY,?‡BMš[ОэNїMS€Ÿz>5чЌН=1gAAœV_д8 „€D‰Ф20С­2ЗъQ3Š€н§g”9%ЩQ‘`ј…CzGіbЈй'КY[OрЯЭўM]Бo-ЮЈ \0†пЃgeŸ!Лlх.ва|НбїПжДŸЮMЮ-vƒbСа QŸ#ё3T@™[ньkяЗЅu_ K““eтПМЯџъсШ7–х]ъ!‚0Њ…пмљHc$fS'x%-Yзф{Ё)ёыs3–Є04тvУйЅдђАM˜0!))iэкЕ]]]ЊЊFЃQ+ŸЎККZ’Є3Oф ЩŠЊЊ&cУЯЅ5­ " гр ЫzћіЭЏoЗчцЕќemЇГЌ$Ун[Пeх†Еoьѕџр›——x$E Xѕ№П_Б7рЪЬЯM–_§ыЯwЯЮ’ж§ї•ьW-—dыЌ}meS4§в0 +‘CзЏlЮKкНy]jUq Дя^ѓПѕы7эПуG_П ƒщдЎњwџ§Ё‡ўЙЎ^їцg'hНћЖЌ]ѕкњeŸљЬ­ѓ‹Zь й ’$EЃб={іДЕЕ€U“““Гhб"+дzФ–,юЬЬьнЛЗŸ’ЋW”$жб1шййЮ1k­№„Ѕ`YY™p”к*"А;„@›эјE§§БФD577й0ЂЃщ €>LJRН^хИ5k:].я™1”­Ї/;;'==-lз4UUc'УђD˜&ЄфKЙЦМр`‹ ’Q,‡Ч@z‘”W” F5аaизЫмДjŽ:…A,ŒКiХС–&MЯ—$€HM@€ ­˜Ц&3…атo&†L+ЇГЩ Т!4$+WVB,Š1ЁЃ.‘’IrЅ@pа5ŒF€XиtJr+х(ЎC$Š‘(83ЅYР(шŒ™€€LŠЇШЎc$ ‰%ђ•U Ѓn!Р5ф ™0EЎ–€XКYF#(‚ЉвЩГ™L SKђ6@$ВlvvІxЪЪŠE=…Oуh4 €@ф ‚$‘ˆ&"1Ё$ЪJ@‘YЊййлHVх\9vи™4%їс*‡] ц ЋОб<јј>Я';h,ќЛЭ=Яw˜Ы}ГФ&д_маv_}ЧН‰Ž&ЙфЃ{@%–lgйЪЄEXŠCjї lIЫќг"o2"УЖ-Э_­ѕ?RНdІ‹БКЎїнЁ’ЉЙїNMШ’‰иЛПыюu=?нe+š›œ/b<›ђь`Ф“ŸššК|љђЭ›7зееYЏГІІ&ЋЊ"!!с K_š†Ўiчce eV>=QlоЄt{У‹K>њн{/IWЈтЄћŸњљњя3/е/љќќd­uхЊќkctёчО}Ч ѓR˜lЏ}ђЗїПдЩЫOVDЊИМ.kюAЄ6WrЂЗПyХюђO§фЎљ^Š]lћгн?~іОrУ‚лjœаћњ OџэпЛ o§њ]Z”І#б=/>ќЫпќэїЯф~zyОkќнІgПF }}};vь–ЛІN:cЦ Циˆюљx_#Ћ‰yѓц?§єІS7ЄUЋкU•йэв1‡шщ‰хц:ЋЋг9УЮˆЌЎnААа“˜ЈŽ ^тЅ—ZГГcф1­ё ]]‘9sвэGЋ* ~Ы–СЪЪ9gRМ†RZZZДvmл%“&э2Э“’­" Ешq–[RЧ8ш€шQд"Vк*KДbдР!Ф0!I$€†4@ЪŽми!Т0eЃ4ŒŽ^hФp rфP„ˆ†0:ЂЌ=ЦЌˆE0: B€Rр:c€0tB†СсЭ(ˆу5 ~У‡щd8ˆh AŸ ,]ьІІbПп~щЅЇ§6GЃ1ъБI €Y%ЏQ‹МжЊ АYSl 2Žˆ`Žr*РˆŽЙщЎыЋмv4••‹&%e7єъtLs{Z|ыzb Ѕљп­v)A0зЕГSзїЗ­о;иXх,?†‚#ŒвDƒЃ,йn™–"[ZVвд[R}ЈЛ'•мŽо№†ЦСЬєЏMNЮї0Ј<Ѕ:ѕжў№7іљvUzѓ3$bФ­†Г +ЏMUе… ІЄЄМљц›‘HD’ЄССС 6TUUхххYQиё™IŠМќг/ЌлŽа Љ`оЇј•’!sIў•З\RрQbšfOљфтдlзctAИГЃўZœќЁЅ‹цИ!–R'ЬЙњCmюОЛ жkQp!F}p Ё›ВЃшš_ЉšУ4С[=ГиѓњоО§Эš:їяйЖfe`вmw|рђЊ,‰šBJ]|ѕѕ ;~љтЊњ+цф–'nРјмЪKКiš‡оЛwЏ%9fšІЫхš7o^QQœУ‘ыDмpУ Пћнc1Эf;Yg>чؘМvmWNŽЫуQЦdB…0{ь@fІƒRUзЕЃЩКwярТ…™nЗ `lBчќ?џ9М`AЦ1 Ј(’ЎЖlщНѓЮBTг K☊CьлЛэЖK`˜ˆт Œ!"–••эмЙї№сhAA‹ЧучœЄнpЊ,ф­+нШё2ЬШёeп;<ЃЇ{фm2иŽ=&!@оі Їиа;ИАBPEб[[ лкМ“&•y< Ї|ˆЗУИ ˆ@(D‚БMО4Ъa4 c[CџSa%7§іBЛƒЗ uBг\і"I` %€(лэХ4Œнь4њb˜—Тz|8I–вйˆдEБмё6пˆ(;АФВ…*SJ r€н!}п€Ш/gšntіrŽ€H$›-л еGЭЫQІЇ‘-Žw„—CeeeRRвHU…aЕЕЕ~ПпЊЊЁ D”в ЪrœвQL€КIг2\!ф$ЉМФУ “#Ђ0„ЭЎP‰с`†Т‡к1чвдф‡ѕ $MЭШ™0m‚ВОљ„/S*ŠœB39 SШN•R‚F ЉогееиЄg/wkСюЦaMдэ2UOqКЖГБпПSуq+[C8оЗoпсУ‡%IВRsrrцЭ›—””4њ"žXmPjќёŸјФDDѓd?)%„рО}ƒј@БЂЈˆЦ˜‰?3VЏюјдЇ*ŽКй„@E‘ћњњvюьћєЇ+dЮMvt–ЎѓююШ‡>T`Œ^Х90&Џ[зжиШЭuѕBшў§=BHз]wœ)ЃadgЭšњт‹uu•Гfm:3эЦq’АђУaw}}нž.Dœ2eіŠЕŸќфtг4ожhрeYоГЇэ№срœ9щЧyџіѕХ(%ќ` Ђ~єФЪЊU ‘ЯЭuŒq3XЕ^ЋVuЄЄицЮЭтЈЮ "!RCC ЄФ[] `ŽЉЦ њS}FFž,Г3?žyyљхх%{іФввJJJъu]Ёєœ$м}џСВЪz{/Йd’Ч“0ЗЧx•\šКHЯOОkBBЦ0џ7ЅФiWЪ“m6ŠТ@OќХ~Œxгƒ% 8))LОxuЁŽ f0”RЈ…aэ4чцXUЛwяnooGDY–Ѓбhaaсœ9s,т&84VцЬ™ІiI>Иы[пšk‘б c`Ѕ@њžyІё–[Ъ</чббОыTž|ВсЛп&ФQСЮ‘1ЅООћйg›~ѓ›ЙВЌŒNJАт ЭЭƒ{і ўц7ѓ(ЃЫ„@ЦдКК–ЛяЙg*"}§­‹џќЇОЕ5vузœ­!Ел‹Я§пџ^­­8{іЗ;p’I‘qŒуННлЖ•ІІІЮ™3WQ”qВбЧ1ЦЙ№ЧLуFЬфQ“ЧLn УНx4ˆ‚Ђ;9`ТJbС№AN35CeE‰ršƒlлчя 5EФQЂкОFпўю˜!N*7šw‘.HŠKЎHЅu‡BЛ{c q6xФЭшhѓз6‡ЂуŽунТJ GФєєєЫ.ЛlтФ‰#мв7n”$ Nu>™Ц)“$Ibвh0Ц$Ibвлч=#GЩщHШOЭОMЖЩ”H•їДмЖOГЋфTkH WRгв‹JьзюЈ?а: іїѕјТžК­ызmЎыёг›Щыыы[П~}kkЋЅ]išцЬ™3/ОјтЊЧГn1 ЂЂШwнѕн^Ј@JпJDЄTнИБЃЛ;КtiіaL!@zєб§’D?ўё Jљ1СЖjUGNŽsљђ|€ишЕІ)Qx`w^žЋІ&e ЅР|ьБƒ—\’3{viЦF;08ЖGйщЅ7%'ŸNA“‡ѕЌeeхЬ›7Учsmл65qI’y2˜qŒу>_вцЭ%iбЂ.—{ќМzуd4FQ"‘Ё†ўБ/*‹Ыaфд‚*C{_шЉў‚ЈGТЯlь“Ѕ’lOКЩy sRm‘жЎ_‡pАў`ї7^mњќ– †ЊVŽ“vtz*ЅЧБZ(F›H“3 Rћњ~WчkŽ@§нО_­jМeэРž0‡y'ŽгбU .ДЂщ’$ѕїїoмИБЙЙ™ZЅNЇяњQJ#f, ‡ТсpшЌпТQЭДТ ”Хmѕ—RЦсzвВЪfOˆn[ЙfужЮЅФшЋпјŸЇživИ1LыB(ЃЃЇžЁнЧ ЃŒ1ЪcšГА|ђМE ѕЯ=њТъ&'@DЯžзџђ“Џ~х—џ=иgЪђiPJБ<ИqуЦ@ IчмnЗ_|ёХ3gЮПЯwж[Jй5з,=p@ўУjs†x‹-M3њуз~шCХ••Yœk#ЃB€iт\ћ‘”IГ/!RGЧРїПП§ц›KœNЗa1)8GY–КЛ}ћ[УНїЮ@УK„Hћїї?ђШ›o.уѓ7MЁ(і+МњjчнwЦYЇъ-`=k••еГgOF];vL …мВЗЮ4ЌgŒїїЇmнZЃ(щ‹ЯЫШШз‡n\ТBˆСˆI ёvaд охбaQŠІђф4;мvуAЅШ m=‘Юf&пRn йyЧЬДЃgеЦCkwЈen jm1сIHИkš'…˜ЈщМ7Ъ5@ˆ0y”FБ(ќ1s`ЬЧ›ў˜9(!Ё\P™vG?Vп~ScOI‚Т4Н%h˜6зчf&NіPрёzЫs#Я‰•ЙnнКЎЎ.ЋіoћЖэб)NA|ЗёW DDУЁPАѓЙяпОт˜[‚Œ†Iхњц>ы5ЂС€o0.˜ДіжCAŸЧ„Љ™оммxщю?НђРwЗџЏ(7™ѕД‹f\peЮЊ^цŒ‚zhачХЌGŠ>ŸЊš4№X р ИtFXЄ/КђІŽžРП_ќэgW?U“B=н§м;ё#ЛЄ<гnшкЛy­Xг•$I‘HdяоН­­­”RJЉ%o=oоМФФФsФС0‹("//џ'?љХ§їўЦK““mœЧJˆ›&ЪВњрƒЕMMСGНрHF"шКPUћПИJUйwО3u ч IьWПкY\ьўќч' q$зA4M DўФ'жжд$^xaюш}AҘљйЯЎџр‹ЏНЖ|tE$„ AюНїO~ђЋ&”žѕ˜6m*€иМЙ6™1cЦЏw€s†јe0у8% J!ий™Гuk%@ђЬ*,,‚qZnЃ 8эЖK+“œi6злШ{вЬTя‡ЪЄ21ф$0 LLKјveТГЕ;УМйfщ/"Јі ‹§ЊЭ‚ЃЭљбљгђ|ЏЕEКtЇГКаqQ‰wj‚&"‰л ч,Ј7жззsЮeEоWЗŠЬЮLS О˜!JарітIГ.RђЈ]1†БPŠZВ 3d3fкsІЭН8С(q0ыS’4И-wњМ‹гDЉ“сiS–пљНŒ џ[u /Ъ™gщeW\7зіЪ/šLa” [жДe—ф—•z&КŠц.М8рЩW†DWЂ‰Ўтљ‹.Ž&цЋФŒš‰“nљвї&Ю\НОЖО7d’”Дђ…Uѓ.X81з#Ęx|#ђж#U34cl qгйО Ž!Фѕз_ёф“OпqЧЋџјЧu” Ур’4Єу%šІPЧ–-э?њQэЯ>ЛЈ(й4‡ш9GгЊъ|эЕІW^iрyУg 0$Y‰Šт|ўљ†ЇŸnњу0!L+уХ4ч ЊЮћяпV_яёХeŒ1ФЁфJг„ЦlПљЭЮСAэїП_`š1kG0 HХ§йЯўЏЃ#§ЎЛ>+œ§FФiгІЫВВiгЖЕkgLК?3Г•1nr<5rќ€H‰%)пдTЖkWОлДdЩ‚œœм3а:љг_Э1К/ƒ§dњnЅ8‡A)иєЗЛod dƒ Р ы`ѓ%kc‹jr;п‚€4ЂХd:# BЉB‘!o3-(E@a !Ф8p™BРр ЃЅV *Gѕіш…€J FЋјАЂещxB FЭyIЫМхКЋвЦй•єОХШЈюнЛwлЖmСPР&ЛЕœ7б0MЃ,eСьœ[Ук ЃяаnFЄЗЫІH'ŠwJL= „9Г{мNЂўРˆO ‘кЌ…О@T ђhРЇл22гн*šІЉs0;wvКZC!ЬGŽoыšœšы%F 4р{ќАY˜‘А,G6"œы\ЭЃCр†р†ка@ŽМ…С M VНЛЁ Уi (L]pщQѕOТа…Е.L] ]S8m<§„€ш%žюžTYюtЙсШ*яaЄЊ"---##УяїћќдCd ЪFBІГJVЉя№1#DшбH( GТЧA$…cš ”ЁGУСPд„#Њд„#†ЂPХN|л_§ы}ПњŸЯ›[Z šЎ(аЖ§ХGпт}СeNOЭаЃСPD3ZŠаУс`$::џюШBbБ§BšZ, …Bсp$щШЩqS4нЙsgCCƒТњЕЈЈhбЂE999чЙ|?SJ…р))ЉЫ—_ќ‹_М№Џ­W6iRcvЦlў_ўrыg?ћњg>ѓѕpиxр—32++SГK’zшяпx§Оћі}ўѓ_khhўљMiiюввtЦl’dЋЋыОїоѕпџўЎЛяОgЫ–=ЏОК5''БА01•1ЅЖЖч‹_|щ‰'ОєЅЏМ№ТІ5kі”•ЅчфЄXkзЎm§ђ—_}юЙШзПўЕПџ§Х––Žќ|OFF*c cђkЏўЪWVМ№BєеWџ7eЪT‹aѓlф,іеЄЄЄœœL]7ъъDЊŠЫДй4DŠxждипK@$„RЁЊz р=xА|зЎтССє3ЊчЮыѕ& BЮЭ—ѕ™uš/)!D>Й[šЂН%ТИЃ„ž˜I”QrЂ(%#мMdьё‰tœQ%Ч^Q2Оj>qѓћ РšК„щщщK—.нКyчюшnDdиаББfъ„фЄ”БЊж'}xЪЄ“јиУуoHЈъF4M%!#%Uъўя о5Ѓ$ЭэiмКiУ„)ŸИ`Q‰[E`ь!†w?ю1ю$Ѓpz&ЋВЛЛл IШВ,„рœЯ˜1cЪ”)ŠЂŒŸМѕщ…Хi=aBхкЕ+~ўѓюОћ/?љЩ†Є$ЇЂH€ќ'žxqљђKлккђ“_оrЫo+*\щщž`0К{wOaсќПџ§сE‹мzы­wп§ЃO}ъљЌЌ ŒQУ0z{Э‚‚…џўїC ЬПьВ+~іГ_]wнпsr”дT7чbяоС’’%/Нє‹ъъЊЋЏОюЮ;ПvйeЯед$1Цzz­­њХпќвK_-/Џ˜9sюНїўєЊЋVЄЄ0IЂœcWЬž}ѕІMп***Жв{Яі+Y$11yЩ’Хy›6эиЖЭ18˜рѕњВГлню •ш`Eƒу8UXоЦ!80иоžне•кзчЮЫЫž9srVV.œЩGќщсGsєюЫ`ŸхД…'о(tl^К.ЖЈ&яў9Nяц“шœ…BAЏЃЙ+ДЌ_UZzЦЙьЫ}oРa!№[Олle‰јВh{™т$еU5ЙЙCOнYК ˆРdЦЛыV?ѓїgзюэ6(A”3Њ^{уЕ ЋВdЁП›ќƒг5€ЃCVњ‚aчqг;РШїz{{G}§с­[ЗfggMœX•““–˜˜4r:‡5Z 0,ѕ4š­вњйZ>zЩБk-MібЇ?fэЩь;І‰“Y{О@–хђђ ххBЁPGGgoo___ŸІщˆ€ЈFЃсЗ&зzЃжЃoГбЄ${JJJffFFFšЂЈЃЗ;+ЯTZ\ддЬібЬs$ЮўЖ„№я рfIR‚гх:л=yПУ4rѓђђ‹ЌyуѕXTSe```§њѕеее999жЇоYyХЙѓЕ’9ч uuuІiЊЊ‹ХGЊ$о8&ЭњdзОѕЏ'Пя)эx~].WYYiYйа ЃыКЎчѓ9 !TQdI:JЦ§\pщISІL---=œ я/6ЛнсpРyўR8я@€ JHqiУuщцЭ[клл-šэлЗ–——ŸYэsBEQBЁаˆМ5c,‹XZч—Ћ<ŽГ…gЩˆПDQEQоэqп7=tчH]’фt:œЮИкsяXЯ™™Y_|qmmэЎ]Л$I"„466њ§ў‰'&%%НгЊŠѓˆЈ(JWWзоН{Gф­ !гЇOŸЙцЦOпЖ4‡F"бpgУž­kџїЯќwсэ?§сІ8,ЎBDB)ƒ#NфeЃ_„Вб2Hш0—3"\"G6р& nђuSc[!R(р„kШЋЇРа4M>2C!(ЅЃU:Q Ш‘žŸ_~Ё8Ю}Ф†8т•ц]UU•””ДeЫ–жжV›ЭfUUј|ОГM5.А˜ЁУс№H•!DзѕќќќйГg'''ŸїўB›ЖЌтщsІd[K_rуm|ў'_ўюЏО_:сOŸœ™N Ѕ|ррЦ-{л"`O*Ўš<Ѓ,Cя=АjcGоŒ9™*ˆаЁ•Ћъэ95sІф„EїЎYпfЋМhВКcхЉjvБкКъѕƒBM/Љž2Ѕ*BшHh‚JxџСoŽД2eZY:EK:GяЊ{sk][а@[rсдi“ѓ“U@DBСwxыжэ ]Aš1љЂJЩiЃ#i6”R6oкИѓp_эIЅgL+NF>аИaусœYеМi[m#wнвьxНBЇ qЃ!Ž8ŽРr'dff^rЩ%[ЗnнГgcL’ЄƒЊЊЊддT]зЯяy† О%IВД$,т&ЮљH•ФyЄ%ёv Ш н‚r8G%ѓЪл?[Луі'ў§цuS–ЇЪ0XџŸпћX]иSV”l ЖќхAчВЯ~уѕпuOюwПя–*рлљ№?љDЦe_zєЏŸЯ€hэяtWуВG.,сџњбWk ЎZ:н>`:№и›ђСЏџs Љ@Рa vŠУ­x­VўњыТољХklFЯЦўюgюАeцg{ЬіЦ‡Ъ[ўѕЏ|zQєmр?љћ›Мzb‘ж7mŽэёЩv&8 ~э?јгVŸЋД0й<ќ№_Ÿ[њёЏ|љъ кС5ќЮŸн цЛx{ŸYS~хвl%žљЧщAмhˆ#ŽЃ`eGкэіљѓчЇЄЄlоМ9‰иlЖžžžH$RVVVTTdq5žзАЌXђжеЃлэЉ’8sп„PJ)2ЪˆСІ”–VO|nноžиђTh}і‡їm2–ўьw_ž›щ‚hзЫПИ§kП§uХяПqуrЯуЭћћ *ДчЮЛ$ЗtТЕ™нГЅŸфмxM$5fgWt-}бП05 ікwЏџЮџžмvћТщTЙQ"‡ŸљбЯ7ыЫ~іЛ/ЯЩtBДkХ/юјЦ^Дрёh§KЯН‘pЩз~zћв ;іяzцЮ§т‰M[ј•…ЛŸ§Ы“ЏХ>ќћПм>+л_~ќW+f^+алžџщOжоћы/ЭЫѕ@ЄуЕп|ёјyљЂ?/sЅхЇ Ќ Їў№[_›S˜ЈXЂЇgћФёоР{цЅGЇ VŠ!ЄЂЂтђЫ/ЯЬЬŒХbŠЂD"‘]ЛvежжZI‚gЛ›я”RMгЖnнКwя^ЋаTгДМММ+ЎИЂААат“yxSNBz\В[‰hЬю7_иуœХ‡чfК„a‚=cщGЎЭ4ЏнЎMЏщкЙПб­ЋwEЊ.[^Э;wn€њѕ§вд™Y@љDё”љ—NMSуhЋš™-X =#0ьhˆvlљяnзМ+ož“щДZYі‘kѓyїкзZ!iк~§‡ОД,УNhrѕТ%y|АПл€Р–]­FС5зЭJ'РСНtйE30Ќƒ РЛ6?ЗUš}эmѓr=м0Р‘uбm(ˆ4ОМЮGэьу•K.XP’bcяIсП8Ютž†8т8FRRSS/Пќђ 6дзз[U‡ 'NLLLў…kž@jнBMэіa šТ›чЕ3Штеqœ>Ф†8т8!FЊ*/^œššjUUШВlUUTWWчццRJ­T€ГнйЗ"Z•ћіэ3Mг’ЗіxEQЮMТiЋWВ,wuumоМЙЙЙY–e+‘sњєщЫ–-БЮvOЧ LБQ#2а јњњћКялњппнѕЕ?ОžpгнЗЭШG%sжѕsq§?џњJуJLя?јї?ќГгUqљ’d€фi•“”эЯџgWЄzA1HЮ/Џrzў_k‚5ѓЫ2э„2Y‘F‘%*Щ’dЉ\ЫВl­БeЭК~Ў8Њ•?>зщšpщќTЫGО‘эaјЛ:њ§б›‘Gњ;{a іvt‡4Јf-џц=‡~ќЧQ­д|сЋ_˜“Є|рЦ—ОїЧo}beaК"9'ЬМтЫŸ6Пљѓ_н™§М§іі{џнлЗ–dI,yњДДмМXc{€€”Жєk?jўёoџzзч_ШNЭпчsЬЛс“*шZdАГг5ЌЛрl_ƒ8оS чцчQqœ^ф”АgЗў нЗ'х^ВИќ6!LJOЭn‘ЉэээЕД*,z(,,<Ї ЌD$йГgO[[!„1ІiZQQбЌYГ,œ_ЬаЇ:@ˆh9XздЋƒ0L$ˆdїf”–fe#а ŠpћЎн нўБ'”U–dИ†ь)ГџРў]сДiгŠœ0ћлде‡ &Uх&;Œ@уЖ‘ДђЊЂT‹Љ)жНkыa^4eJfАyћžЎд‰Sђд!ž„#­$”U”dИ ї6юЉ;<žьђsЅPчЎэЛћемЩг+’buuкb,ЉtжЄL_уіVЕ|zQђаЋѕьл} c ЬeWVQEU~рСЎ};lЅ“Kв]ж0œэkЧ{qЃ!ŽїN—б`СšMbБицЭ›їэлge!’““GЊ*ЮіƒEмД{їnŸЯgЩnB&Mš4uъTY–пу†З d„ЌбвЉГžœюЊкЕ2vZGddГaУцИDAг’x‰eуˆxx"Ž8N#ЁŠ… ІЇЇoмИ1‹I’400А~§њšššмм\BШYЉЊЉ’‘З–$ЩЊ’˜;wЎЅ%ё~В‡.G@!фhž B)ŽшEјѕ(„@$tDИ …HшA4 .`”Ф”ЕРRА eж†'j…#НД–’с6щШЯ`ёPD0дB(Ђ@Фс#R2$X…B8вУ8т8mˆ qФёN0R^gЩjЏ]ЛЖЇЇ‡16RUQ^^~цC––D(ЊЋЋkiiБД$LгЬЯЯ_А`%o яхФБ GtЅоn;rќэЅь-xL # ЦюzТVЦsєŽG„Б9ЮХ<й“Ž#ŽSFМz"Ž8оF *++-6Y–6mк400 I gBŒw7@’ЄЎЎЎM›6577+ŠbclђфЩ—^zщˆХGqФёŽ7тˆу]atUХТ… эvЛЎыŠЂєѕѕY“Зх…WЛС IРСƒЗlйтїћm6[,KLL\Мxёœ9sоOЎ…Бc#ИЁы?Хrг0 ƒуа†ІqмЭ†V™' IBnІ)@№oGяФУqФёnaй PQQ‘””ДaУ†ііvkкоБc‡%Ћ=Ђ!yк[’Аd9cБXaaсœ9sп+b•я „2Y9AмI2ЕЁ$гу„ЋЌЃX?Q&Пo‡9Žї тїxqœŒPЄЇЇ/]КtђфЩКЎ[SѕЁC‡6mкd•0ŒGг#! +‰9ч3fЬИ№Т пзƒeЧ…НєаWnЙѕџко*`„Yp0ыћХЗnПуЋOŒ@фрЪŸ|ќГпи`%%"ЂрZ_њёЗО№‡эcЦчПї™ўq7@pн#пОу{ыкЃ0вЕе‰щё?§;Гћ‘#gзуl|dyмЧщСћђmGу+ сp8цЮ{с…Z…ŽŒБООО 64773ЦNЃГaDоzѓцЭ@@’$г4ЧВeЫfЮœЉЊъ{KоњTКЏqчњUЏ?ћзwіъ… с7_[ёќ?_]Лё Я*л<Љ)лa7”цJ€`гцu+wvЧŽvШLlм№њЦ]}Fћо7^ла2ЧnEЌ8|м#8цfЎЧНїаЂ‘MЦюzьЦУЫшхcŽGяё№DqœNXЁ Jщ„ ’’’жЎ]лнн-I’ЎыVЈЂВВв"r~ЧжƒU0ЩГˆ›,- рœggg/^МиЪy|?еUžŒ€”1{š'ДmхŽі%K … ’С•+wЧв r\v% фLЛѕы•Ьщ x,рѓ‡ЂКфЩHS] ^чPьѕ":Ъ Щ^‡;СыvЪDqxм JЬар`(fГy“2zФ2m.ЗC!Bїћ#TqИ Сh E%бkЗк№ ЃЇВУыѕкd@а#ўщLr™ў@вSTЋC"ъыD AUWRВ[&€CеќƒОˆ!Sœ Щnх}?ФёЎ7оЗ\ˆc—B)9GПM9їKЯGЫj/_О|гІMћїяЗ,‰ЦЦFПп_SS“””єЮdЕGф­{zzvэкхїћeYЖШЊЋЋgЭš5тZˆ[ є –:}FrУюЏ;taa„‚0Šнызьь­œUОw㨘Ю МыЙЯ~фўŠŸ<іЭхeЌъ7ПxdeГфryђчп0+Ej)T=;џ§ГћўКЃŸ:=i5ГUвШ‚1*ˆШсџњћЧ_йЇQjrю*И№ѓпќєД№ЫЗп№@ѕwќюѕE­џѓез=фН№ЮПќіжD“Pv№ŸК}MўЗџћГ А}ѓУП{pХЮ>*ƒІ›)SЎљќч>23K\їчO}{ѕмл–to~yЏђНOўhЂ‚сЖЭ<№аЋЛ{AЋХѓ?є…O//ѕШZџЮgјэГovIУPsЇ}№іЯ\>%CфŒ#ŽSЧ9:=Ф1ў ŒJЧўЁ„"ŠwєгД šѓц]gЙTU]Дhб‚ טЊ JщЉVUXС!„Uв eYжuныѕ^xс…sцЬy#NТˆ4Гц^\{шп/7Чc€MoМБ7Vsс”RЇˆq$”)6UЂ@lџѓ=џїbяEw§њёП=ђЃ› vЎ}ГЕGЈ2ГЇіСќx‹§тќюб'ў№НЅiMџYqXUи œJБѕПџЦўо:їSпћ§ŸњУНŸ›8№м—Пі‡УI“—M з7ю ˆж-;EZ. ьню#TоМЙ1€ѓЎœfCпŠп~ћлэ7~ыО?>єЧџћњЕb§Ÿј/FЈьNs6џoe{еЭїќњзŸ+‘Aї>ќнoЁZ"ФwMFЯmˆ8*еj$Я‹’ЁМr2jыaм#ЛŸшАcвМ !‘#0ПЙzуЯšRОёБЩГœШи !чЄ АUU‘™™yбEежжюоНлJ‡lhhUUUЉЉЉКЎП­н`iIьйГЧчѓYYˆ8sцЬ‰'ЊЊЗNBЈаЂžђЋfeџэЕgЗ|~кХњС7пиc[јн™ і'c|Дю•Р8иfЂН Tс‚Ѓ œЅ—ЄКnњzћSr“„@ŽШˆ”?)MlФ‘ƒбюmHz^ўХзj%nEдєЎцЮїю№'ІЭN|l{C3N-оЕЉ]™їБ™ ўVlЏ#LоЖЃIŸpUi"ъЎКhЩюOнїаўўЈ)Pєэ €“Œ˜ߘœэСЙ TŠЖѕtј!№П_нЕйŽBc$кЛЗПнНЇV|ј–ыЇо~пчo­›9ЙЂЌrЪмХ‹*Ъ“ .`ЧЛCмhxпСлѓѕwIВ;еY=ц' %УD fЧоКGЗvmиАGЄeм?ЧkIхMf„бЙрЃ6;vŸcЃ ':ьБЫЃ`4ЖММљPKAQВ“@N|s”R!„гщœ7o^rrђцЭ›ЃбЈЂ(===сpИМММАА№-4ЎЌгДф­u]З’\.з‚ `”vЧ!tЏЙЈјЙ'ŸпYЄnyЕ)sњЇgКА#ŠcЧ€ЁqрВJ‘R‚#ƒa‹”1"Ћ’L)%BJ‘и˜Њфš Ь“Y2ЉЦ J zЪ|ofuArв„Jзs[КBZmЬXо“6АНЮgzъкz –UЇЫСCЛћsmpЯ\rбФ NEэл:4aљC˜DЅL„Ч„тЬ,ЌЈNƒЈŽDЊЎ™{UNu‘-1џ†Ÿ<^љцК5ы6МљТ#/?њчG.§ФюИqVТ[Š`ХЧ[#n4Мп€€ˆб18е ›Л|RЮЄфЁUS‹§ЭЏ?жit7uїЭёІRТК_ycп (IщKз\WтЖ!с;ї<Мў№NПI=7]Q@6д=нHПќв_”EW?ГђџЛ.ЛvцЇj’TННўю‡w4dмА|цв „[=ГЎam{(B$oNоU^ЊсяyyѕО{ LI/(ИщвъyЂџЅЇVўЁ“qЅЗз~эОњ‹>МєГY,жйњЏ5ѕЋкB.PЖ—T•}hqqеАё9+}RQQ‘’’bUUШВ‹Хvюмщїћ­Њ qLvЊƒиЙsgGGX…yyy ,№zНЏ’x{ DЬ]veЩг~юыŠоh*™qѓэ˜Щи“ьШzњУ„ !сnП1,Бе™LќНБЈ Р hД”*‡[#Ж„LU“3']}ѓ‡rŽ:6`iEЙmеЁkї4I(6aЅКІiнJЉ'2Џ:єэўСЕАјлїнs]ОeбММњЏkк‘ŒА8X}дсv'ЁЎЯИќЖkЪЧž "IЮ›Иьц‰—м ;W§щžянџдФI/Ÿ`kaЦёŽџ:yП‘€Ž–`Hеž4;yд:гд8 DbГЉ€њЁ}?ћЫы?XпйуNЌШrБ–†п>БўЗuA$иЖfэWžйѕRЗ™X3;1ќк?7ќq_O[а№ІШ юhhб((6#нэ;Лƒ>!œn :пмјхПЌp_@ЄЇ–'ВСн;ОѓШжџљЦzљѓ+?ГЛWqfzЬаюлю~|чNЄDQB q–’ъ.Юݘ’LЭі†_=єњŸіtЛ(нeЌzeгїўна @ФЙšх52ЕЇІІ^yх•UUUы!ЄББqУ† >ŸЯ*žYN)эээ]Зn][[лHжфєщг/НєRЏз‹УQŸГ}fч2eŒQ†@ mсM3•7Ÿ|шѕ`бМ%Г$ H%‰IŒ o(Щ eгв oЧŠ]1$В„бюkії†$‡Œ–7БLлљцЮ:#lо№т–€У&PЦ$†XкєЅ…}ЕЏ<Н'jyDэпПџƒ?lьB ЙUjФЮ—џББЏpA… =ЏВBi~ў‰5§ЙskrРаТu%ЇЄPРXЯіGŸифŒ ŠDЉ$YŽ;€ц•Я˜%m~aХыƒCMkџѕп{рЕўpлП~ўѕ_>П+‚@%›'БpRuЉGыD9œ›fuч тž†їФє>AUЖѕз#rDS‹юпД§_СhrЮмiЙ­їЙUuџmчЅЫ/љ§Eщ.€шюM7<Аg{л`kцР?оhoійW]єЫњ–чжќИН#цIŸJѕжРО0M*NШLrГЇХgШі,ЏЇТи}р/ЏкЅ;пМќ'е*€Й§љзя|ЙaU{ѕwћъNЎgнtыќkœМяЅ Л™ \‰Ы–Nj}фЕпjyНrб­й €йнйОКR•~цњš™ ађш+эСl%"ЮCX–хE‹ЅЄЄlлЖ- ЊЊ:00АaУ†ЊЊЊММ<+ж`UIьпПп4MY–5MKLLœ5kVqqБuИЙ№і@#<ич kрžyеœи__ѓЭ%5 ш ъhD}Н§Ё˜pЭЧ/]ёгџ|хуЁЫцц›G™?Іh$–Ю§№‡џљНПќ№‹‹*SDwScЈШозеРX` w Ќ™@snјцg6ѓЩямбЕhv)ѓк№кv6ёJR=ЕЬўїя?п>§w?Щ"UX–йёл_6,Ня†‚d€ФвK.Э\ёТoОЦЭёјь>ьМ№КъŸx§woјД;ш ЦL 8 kТТ~щЊя=№_iлoniКЫQ8w^I"†{;zюIз~юУ‹9Лzоф ;Шо Sц–{Ђ}Н=§š{жѕŸћШмФˆR8gжФ=БeO]8#Ы%9ГІ\ВАќ]mmн~L˜yЭЧяќќђ\ 5бŽЊ=­ьты.Њё($›Ђ*В­єЪЋ.ЎJwЩ•=ef!{л[:4wхM_МcйьJwШ4hZ~YnjBёьy%ЉBl95sдxэmmн§†ЋєЊЯ|щSWж( Эž7НHhoяъ ›ЮšЫ>ѕх[.ЩБ/Щ(Ž8NёŠнїPpBYwнпёт 0ЙlaВbуЁКњіMэQЧФw_=aКyу›Пёя†€G%šapD–TdхU—ЖЖЏой1P5яБлŠГ9Aл№мїНбЛєёЋSŸсЃЋЕIЫfчВт\DŒtої‹зŸЧФЙЫ/њљt|щБеПйг'ь2І€ˆ’MБЉњš ПUэєmлѕшжЖ7ѕ4]щЩUхХЗ^9љ/?МaгчžЌчѓч~яъЪй*r экМћЉ]лѕ5GHrvъДЩŸМД|Ђ p\чЋ@N {vык}ћqRю%‹ЫoТЄєl:лЌD4}ѓЭ7ыъъ,žG]зггг…===6›Э0 Jщ”)S&Ož,Ыr<ƒсДр­3_ођSќЄЊŽcžќї§иPР[иЖ(›Э)PzМtЧxщDяяЛ№ž›…ygцьX{0вK^pёдл@РЕ ‡›к[ћњbSeŒ .„ЩЭXrх.-šфA$ЇB +ЎиЋ ƒ@U—гe%UFZ|н еИ†ЖЕ%‚voJ‚'гšоКлїЧИ”,ggкќ: ЮQ*™ђ§ SIrH ЧИ’œрЁр™1э;ЅEuНПЏяѕЕ^пОћ/y%‹2__Ј5)ŽtХ"y"дсžМdю„ъОњ˜ЏЇѓљ•ѕ›6яyМЈрmxОD(`ИгnЗ/XА 55uѓцЭ‘HDQ”ююnАt2цЬ™STTёœЧwфІ‰„IŒ 0MTJd@n˜‚2™QKџšIb” ‚›C2й„J И)€I%„ р|h%ЅŒapd‰ г0‘Щ2%@ЕBcУ„(Тф–.YSpгфШ$‰Y“Ж`8 В­7ж9!ЙЬ†BNXQuжД=эMЭOn+ŸГ$й’лaO—yk„'d&•8:ўьщ&cђФOЭq%ƒХ@7Щє‚бИЙёхцЈЄ8ЫRU;ї7PJ˜$ўіG^:мЦ™лсœіt‡ь>ЈAFVR.@omэЏ6ћђ.œy‹Ѓ§йЖгф›Ў­ž”&ДoщŠDbœk| 7lP5нeЯ FћС‡^oѓх|є‚ТЩЉм]лэ0!цяr|ЮŽ[UaЗл ‰ŒШ[Чsп)Ž(W#rM˜<"ŽMЈЄа#л1‰Ž"­Ё}ЬJ:Д•dхФ[юƒt”YK™4FМ›P&Е/9rhЦŽsФу7uТ.ФЧ;Ц{еh@DЫ@т@13двПЋЁчЭ‹+oW$ћйюсYA…pCOЈ9ЬyQJ–J(ЁB ”•;;ЇqUkЧў7v­žЖшJݘЕЄМЉnџСŸџЖї9шьo*ељК‚ЮšНХGqМ/№1F@€d'Vd'V Š–=Э}Е=С&_Є+ыЇ„ЪЬF Юu8mХŒJХыPМ'кQ„5_DїGtдІyŠ‚БОЈŒЁ˜ŒAЭ ”J•)aƒ‘ŽОPЫŽ–S2<Х™ 2М% ŽtЏ=C6w,ысмЁˆ#Ž8тˆу=їšб0œQ@H~ђФќф‰ЕдuњєuУКoЄЈяДЗ|$GўЗ ;ЉЫ–фВ %.Td.“ыўXO0кˆѕc§Ёи@HD{|‘nŽІLUUrP*u›к|ћ…0йY 2НeЉюќtOЃ2ф„rч=ФGqФёС{аhАМї0Ь)3[QъдЂдЉQ=а<дhfD~зПхбOGў€Sf(]B‰)ЩЮœdgЮШN!m0ы D{Т§Ёжž@г`Є“Q&3U’нQ=АЏcm]ЧъDGVВ+7г[šŸ<)ЭS8tќs‰м0Ž8тˆ#Žї$оƒFУЁшHƒ]ёф'OЮOž|v:3lЪK…qИŒг*фDJ™KMtЉ‰™о2аЬp(6ˆіvњvјъЛќMa(’в#M}ЕuЋ“œЙ…ЉSJвfкdзйя8тˆ#Ž8оуx/ B€5E‹б% gНcG*ъ†‹а*к$„Њ’Su9“]ЙЉS4#3­{{Зvјъ UВ#Ђ?к3щjЌл~јПyЩ'ч.Kpd ~œB0qФЧћxд?' rму8ёо7F0b=œЋ'€!?ФаR›ьДЩЮGfUі’ˆ8мПѓ@з†орaƒk(tƒЧќQmoћЪ§oф&UMЮН4н[$QЌBЭsЈЮ"Ž8т8Ї1ьј<ђџ0mэШ;ф$9єOЎ­aщ#Ўž/x чŒžы‡ž,J˜KMЌЪZ\•Еx0вQпЕсpпNД'ЂћHfSomC‚ђ~“hБIDATї–ќф‰Sђ.ЫL(U$ k5эŠ#Ž8Ю-!ŠчސЩ•’eA ‚ 4 +ц‹0$тc•ЄBGqіJЈѕ7!–њ ЕD|ЌзбQЎж8ЮФ†ѓcŸ,LtdЭ.К~vбѕН[К7wњ†;(eЊьlмлмПГ$mFEцТМф™йЌ'9nЫŸ/xohЯЦqЎaФJ@DB%lЬДm ]7ЃCxT7Ѓзt3ЂѓЈfF ‹Š†Ч Ў™\#@9\˜ˆЮЃŒЪХыvS" %Ѓ%TЂ Ѕ’DF%YВ+Ь&1Ef6…йUЩЁHvЦd…йгм…SЮіХёіˆ ч%ШАЇN/NохohюЋmюпбс; 3еЁxъЛ6vљŠгІЄL)L™ q—Уљг4уп`qœ&=b%Э‡Д6Š DtдDѕ@TF`TF@Ь ф„P ”Œr‘ чЈшЇЬlŒЪЁиЅвp|u„'тPE.Cю J%-ѕІ™?pГ”гHŸЧ8!n4œЏ)ЧАФС-BЬŠЌ…M}ЕuэЋ;§Њз&ЛіДЏ:аНЉ$mжфмeЩЎœј3yюУњ ŒгHЧё.a}W Mі`№XOАy0мсv‡bнб§-ж}Yƒ‘%ЬЉ&X’Мh1уЭє\ '] љФr.LUvD ЃВe' Зk§;Є_O‰L†єl‡ .LJЉMv@ќэtю#n4œїАФС­GкkOŸœЛЌ8uzcЯж†ž-ўh˜\пењJлРžЪЌХг Ў „ХM‡s”ЦcIqœ*ŽzЎ­яŠЈь4vћ{ƒЭЁи@XїЫLŒt˜\ЃTЂ„Q"ЩЬІHvыгпВ LС„*§{їGq=ќН™нН~Їо-ЩЖмфоmАБСІšnхGЏ BH!HO!!!t „„^L'Ц`РІиИ[ЖeЙШ–елщъюЮМп+ЩВБС6r‘4п? ]йл[эЭО›yѓЦыж.ЭывН†цui^ƒ{]šза=œ!гИ€ŒqŽ:cœ3нВd–HJВ-aJiл2iK;iE-‘АDТIгŽ&ЌhвŽYЂЭ^ЛTЛд#Ј Ё—@dиОЎмc Oš;mЭŽKЗЬKк1Cѓ„ ŸlzОВiеДAџ—эKhЊшJЅ5(ћЈНь,ь, +IдЗmнвАЌВiU[ЂбІ%ЖH‘ЬMlpЗГT^Ч xвƒюЌ€;няJѕЛвюtŸ;Эрn†‘uќ—9џ=АK{ч„“Gщ,ц'лџk€Šz4є*NЊЃ ИuџјЂгfNќdгѓ[– лтЬибВўЅ/~;БјŒQ§NаЙKе‘T”ЪЩgdS$й +ъф6mk^I4Pћк)Q sCюжќ!OЖЯ•–ш—ъЫOѓхЇyѓtюњ&гГ Ш‰ .vлZћЄwTo=› zЁЎŸеoЮI#ОЗЙaйg›^Њ oфШ‰фТ Omk^=}аEщў~DДлќNEQŽdNžCю „уѕбэѕK*ъ>O˜mœi 9вТ24ЧКuПЯ•š(Ъ Ш z2їхu:~јњЦwжoиЏ–d?^B9BЈ Ёї#ЂўcѓS†~ЖљхЕ;ФЬVикИВ1ђ‡C.-Щšjщ Eщ œœ†'ЌHMыЦЊ–u›ы—жЗmхL34ЎЙ-;ž”QЗЬ Їzs2ƒВќEbЏмЙgLЃKY†=9r+є<*hш§‘Hšgк  њЅ•~ОљхэЭe.Э7[пZѕЗ‰§ЯWtЊЊP”#Y—СhK4nЊ_КЅaљЖІеI;тж#h‹d$йь3RŠвGgŠ3E™т4_^чкы/Е—TR}‹ЪRACŸ€иОpWQњш с’-ЏЎиіg.ЎxЎ1Z5}аџмщ*n8Œ:3Лў@ГмUй†ОЬЩRBФњЖ-ыk>ооМЖ6МI’ti^Џ‘т”`Ъ ъŸ5!74(Э—я5BэO$йžяь”rR”oL }…“…$Iј\Љг_”(ZИс)K$ ЭS^Г(šlš=ьšT_žšRqь1шќUг4рœEЕIє дQЮ"•e;ж…7U6­bLг™‹€,‘ ИвGцЯъŸ96Х›ыsЅЖ?­#Vp&U)J7RACпТC^š73н_јюš›ЂUКцЉn)mх='П.+8@Х  I)h ѓ–ЄiкRЦMsЭкЕ[Зnmhhhnj*ЪЭ]ђХoПџ~nnn0I6dј Gї;Б8cŒKѓvtЕ—P:мoAщ­Tаач Ђѓ &;8рьёЗОНњmMЋ4юj‰еМКќюSFн—2TSt/"Bhšцєджж6E" ?§єyѓV,^khа,+ i €„ƒWоuWcyљЋ?ЌyНІmлDIKз3 J'L8§мs'Œ‘ Іgd8ДmЛk,Ђєh’ЄSБ-’lZЕ}ўЪэяšvŒЁfк1LwЇyєаИЂSњЅш|JgЉu(› њ&ђ)ЇŒКё§uOЌЏљˆ1!{Џь3†\в/m„ZЈЂЛ!8cšІЩdrбвЅя.^ќŸ‡j+/яЧиш””k\ЎБЙЙгќ~aЧsП”sп ^/H aгќ0YекZ6oо=O=Е oьиsЎИт„Щ“'N˜ išUЩЭ)ќЬ™"БЉnЩЇ›^hŽэpi>4E,шЩь—:bLсЩў~Л=Qu *‡Œ њ.$"—ц]z•ЧlЈ]ьд—}sепцŒО)?eЈŠО9ЫВt]€^xсЁ^јь?џ9–БыђѓO9rh R‚”@R лD2!%IЫRJ!˜”дД9iisвг1@ќ"~ЏЖіэыЏПKзOИьВ+Я:ыЄ“OлЖ”ЅЧiяоCмбВ~Yх›хЕŸшмхж§БdkР>4ѓшсyЧхІ юx$ЊљŠЪaЁк—>Э™ЩPŸVrAа“ѕс†ЇrгŽНЕъo'М!/eˆЇј&œˆaљ’%wм}їЇЯ?‰л}[iщ1)) %иЖ•HДзўeˆМЃcЙ=LCфрdВ9нB … Žj8уМоqСр‹Š^kjzхŸџМъ™gŽ>ѓЬ{~ѕЋќТB'ЫсpПueџ8C–H|БѕЕ•лчЗ%|FŠ)тТЖJѓ-Э;І0m$8kNЊЄrЉ ЁЏCdD„ŒШ?.nЖ~ЖљeЛbfылkў~ђˆrB%*n80RJ]з_~ёХ+nИсЌццw†€eй‰pD}†Ÿ€wyМB ЇІІžšžўykыўѕЏщK–ќыЁ‡ІM›vИпКВˆ$CVоДЈтЙ- Ы4І{`ЬlЭ іŸиџЬ™8глW‘V=ЪсІ.JћЬ=щ“Ь_tЊ%3ТёњыўŽз!2ЇСRі1Ц^xёХѓ.Йф€ЧЦŒхv[ёИmлЂўЃ0'црˆЖiŠDbЂЯїц”)gnп~о9ч|№с‡ жЛъ!к‘­ЎšџкЪ{Ж6,74SReъРsЮћ“СйS9гUƒrфPgЁаžHE љбƒ.гяdгŽЛ4omxг‡хO™"ШКT‰WО!РЊеЋ/Нр‚{ввn,*ЩЄАmQыжфvа9ЂАmЗmпSZzY2yхe—ежеŠŽxD“vt~й# Ъў3У7l‘Ь Ÿ=ўЖ)ПхsЅ‘S7њpяЌЂДSAƒв  G:pі”Єuыў 5Ÿ|Zё<Ј аОCD!Фu7мp‰зћн‚+‘`Л.t;ŽHD`YП<ИЄКњЦяпй‡У}$”НrF§ZcЕЏЏќЫЊэџгИKJ›!йoімё?ЯЇZУ7XyRQ•г ьфЬЇа˜1}ШХсDCMxЃЧ,нњZа›=КрxUєiп•WVV/ZєЏ!CЄešњ иQ>№/Ї~ўљЖmлњѕыЇ’"LNФPеМn~й#ЭБj—ю7эXŠ7gЪ€s‡фељ0ѕqSŽ@ЊAQvсЬЇ№Лвf•^tg™vма<‹ЪџSнВI Rь›g_x!“Бў^Џђ~ЦˆrМ^ЌЎ~ўХ@u6$IDЖЅaљ›ЋџкЏ5ИлДЂЉЅЇŽОyHЮQ*H9ТЉ Aй“љ˜сяwьаЫ]šOH[HыУђЇbfˆTмА/*7oщК”ђP~Ut*KыŒЅ™fmm-ЈД†#9%ЪЊОГц—це˜aŠxiоЬ“G^Ÿю+pїN*ЪWQ'ЈВˆLJЛ8cЬш~'"ЋnнИdЫ+ˆLe7ь‹тў§‘!РСќГоœD|[JлчЫ/(ЕЂе‘…œКkv,xЏьБЄMX‘€;}Ъ€sfНмkЄЈZjJ ‚eЯуDrBџгѓS† iiL_Н§НђкO™$ещ§5N?§єЪDЂе45Ф§эn–п$ 1œLFSS=цр(РЊэѓЌ{ђ„_tкЄgымE$UФ є*hPіQcЦБC/ї!АdbЩ–W[уЕ Йy§jƒ‹ŠД!CюиМ УоЯABD"нВ`њ к_‚шЦћ—–Ž1ТВ,•y„p>/ыЊ?^Аў ˜3Ц7kиеУђŽqR…дЈ„вSЈ3Uй+ЇxCЊ/яЈ’ѓЅ.ЭЗЃe§ъЊƒЇи3"rЛн=њшѓ†ёR]a6‘8˜У:‚ѓVWЏ,.ОыїПЕХТ™+БЕqХ{ejЈKљьвkJѓf8PГ$”D ЪWC(Э›10kBвŽzŒрЊэџЋ oDd*dј 0uтФуЯ<ѓьŠŠ74шКЮK„ўA”$тœт_kj.лО§є9sFŽ!ЅќЪ„’BижNЖћеД-Ы’ДзЛ„:-vEDˆЌКЅќн5H ’;nиUCrŽV9ХJOЄ‚eŸL8з­ћ0aE–n™'Є…эЅ”=@€‹}ђќѓ?ШЭНeыжooкTcY.У`ˆ&uCJ"“ˆ3цвѕM‰Фй7ўЅЖіЦЬЬ—^~yѕš5@єЃ"Rу\гwв8C’_ћчDІщКЮpЏwqѕЙ g9ЪцшŽwжмЗк€ˆŽ*9oHЮб’щЬEщ.ЊSљz”ъЭWxъ‡хOњ\ЉхЕŸ Ъž:$ч(Е8я!Ђiš?МщІЃMѓOУ†Э …nоЖmjYйyiiзfdєРЖIJINНП§8ˆЮ*—DФ9чœѓес№C ЏЕД їx^4h˜ЯЗyйВŸўьgѓ^~ЙsUюнЗAШ8TЏzџН—Tд„m@#5xмЬSf їХK ‰œе-zю_oеЬКўкБ™F—ћлVџїљЭS/ќЮєL’„ЌЯŸNGBвŠ.м№TkМVу†-ЌqEsFп1ЕВЯ#ЅR= Ъ> `ШJѓŽЩKš0#Й)ƒ—WОiŠФсо­#зъђђšЯ>ЛkШD<~T0јПСƒ›ŸџZkы”uыЮ,+{ЁЉ ИІ1ЦqЗ„GмЫЯ€ˆˆŒ1ЎiбПъы_Лі˜ –GЃэзя•’’a%х]EEŸ}ійђeЫ4M“rЗё’c‹ЛэІмёТ;„юёxxы†§іЦЋniGrЏяШщ„А# лЖеDl аЅД8"D+ўїф3/.oйљ`‘}Охе- _8УрьЉ“œЭ#2•Ч єPЊЇAљzN™HŸ+uЪ€sо_џD[Ђ1iEЫv|рTqP_˜ОlоkЏLIIг4лВ„mћ8П 3sNJЪкxќОККoпўƒmлВt}f pl pДпяAфЌK‚ѓƒsѕ•D ХВEЃѓУсЃб&лі06еяПЗ  ПЫхбuвBg,пчЫЋЉyoсТ1cЧкЖнuILЃђ—yvžuЪ}^;%еk LFж_œh]<ўЦЦeбш‹ЭЭ6‘E”Њi^D’27;ћ Ф—ZZ~PV&НоИmGЅŒKЉ!њЫеѕуƒСkвгћЙ\nЦˆЄЮŠ—ЮЋgE#‘=ьˆеVЕ$<ƒЧŽLѓ:C Ш\‘чџќщ“„?”БВyЗ|їсМ /h.}љнuQSЄ ;щлз_utБ{ѓѓПЙхяWўыСSŠНvуЪ§љž—ж’+8pЪм3 bDNZ„dŒХЗ-~ќС'о[Ym"Џhжљз\>gtPЃEПОфOeЅп>Ÿ?rїЛЮ§СЯЏ?й/lтZo:‹œˆЁОmЫтŠg AКuџьвk ЭЋ"ЅЇSAƒВOP’аЙkpЮдэЭk„Д#‰Цх•oO(>нi"їYrћѕ+Гmޘ Nтxыѓ§ЭпžB`йі‡бшъxМжВZ,‹ ƒˆмˆ]ЎTЏзиЯ0ЦyН“§ўЮа@GЖЃsє;ћ|jР—ї Jя‰ЅЏЬћУЏЦjWN-ЩMO љ]Œдœ, g8 +с™7/ЙюкПќ{4n~яO?њхЏ~G=pНaХ"‘ЈPЫwп|явТяў№Ї ѓlќрйОА2)ђ‘€ЫњяКљЖљќИмzг˜lkэ;џКћ7Зд[їмyЮ7˜M[>xvўєГяќ§ЄA§\РzYg!Ђжg›_nеzŒ %3†\šс/5ЛRщљTа ь+†ŒˆfN\ЛуƒЪІUHИОцусљЧК5ŸњўД›гцЬљЫM7m3ЭlDйqQпy€ˆРЙт€ЦиqСрqЁгm))œ™’2v№`pЛAJ rтч]s'wK€А7E"uРьуŽ“Rю^9J‚Ь™7оpƒўф џ№т{НЙƒ†;vєЈ‘ЅУЧŒЬq1`\—бDСмs.:{JŒ˜s§ѕяЮљЭчl–sќ:г5”M_<џЖ9уЪo_sТ(PpбЯhЭчЗ-1…DБђЕWол>тЎџн1Х PќэЛS’so~}ўХgŽ№ ™=тъ[/бБяНАж5ЎЎZP^ћ‰зХЌжБ§Nœ=U ф)Нƒ ”}‡BуFiоŒšжBZЭБъ5U &Ÿ.I *‚лХттСГf§tётЇЦŒ1уqcO•Л^ћ%IЇk_6@ŒШВ,в4&%:‹|н5Ч&вЛЙВrцмЙNEШ/Яž@†$Eжфoн9іИЕ_,[ГО|KeхšЗ~ђЛtцЗОџ“Ћ'dVBЄ—цf†H’ рЬ•32ЕщЕІэЁiH€D‰Кu•ЎŒ’~§I’->x|_V#дoЈЎoљк_юљи-„Э4]VЏhнБjѓжjQЈs—6`rR""ыuДЦk—UОЮ™žДЃйћŸ‰ˆдлоЊв'Љ^ee?8ЄKВ&eŠШЩЪЦ•I+Š•q "]зяНћюџyW ‘T]qJя z”§…œщCsЇз†7!cum›З5Џ)ЩšЄ:v3nܘ3Я;явћяr~mzzћь['5“ђkћэwn Qcь‡›7џЉЎюІ .(..оcEH"@Ды+–ЏЉOŸ2ЙП‹„ФїЅŽ=§в™/}ќтВump”“ёHвДtD‰ц˜юvy§Z\:3#to OŠxФТі§РXm+I@€”€ЫcC.ОёG#‚Ляѓч&эgqŠУЄлTПtKУr†м–цИЂ9§3ЦH’Н2lhqэыo|БНуЅ[>{хЁ_њ"N 3@ЦzeЬ€Ж0?нє‚$aŠИGЯ›ЩPC•џЈє"ЊЇA97†цN[КхU—цЉl\Uо”@ъ b<џўuз}Ы0~UXxIJЪ­UUCVЏО0-эІœœ"]7œ)…”В#ЙС9t]Ks;™эс‘S’;yœ@дВ*’Щ?жжОокzB0јтР™nїВхЫzы­яНћЎ”rзU.‘sI`LПр['ЌОћ??јПХ“30ЧЫэжЊВO–”Е†fџькIРІtчzЫоџз§ббC‹Фж…OўЋqњй—ЮЬ„њ–hЌ-7%G_tQЩѕЯќэ‡А§ИaоЪ%‹Ы IЦ“’pр gЛxХCЗп;oޘWеВљЏМ]5§ћ 3‡}Нo‰ IФ-Ћ|Ѓ1В ‘1Рсy3CžlIB­y­є&*hPаАМc–UОС™жйОНymV џсоЃ#HйцЭЦж­?.-%гцvП4dШK Ў­АvэTŸял™™љК^тvЇК\м™1бљЏГ !r'sЊF:џЄЌO&7&“лLѓ55ЩфdПџЕ’’Ѓ‚AЄќуР3**ОXЖlќИqBˆ]&P C Ш<њЧїšёњЋя.^ЙnХ4ќ3.њй™gЭ*ђp.#mЎW_6+Дсйч^hf7ќќтoЭ „ G53%ЧMžЃЎ§ѓ=G^њєГ7Зywх#зќщЉѕy^Т5ј’;я.~ё™y|0oИгKЎНїЇЇN*тЁ“NІ„zW'‘dˆ‘DгњšE‚l<;X2Њ№bj6ВвЛЈ A9@!OVџŒБѕK\Кws§вaЙгМFŠš{щxљЅ—r УЏы–eщˆТВЮJK;+3ѓЃццџ47џЁІf}"Qьr™’’Ёiš–Џы§ #ФyРЯcˆI"гЖыm{›iVYVЋ”[’ЩзZZjl{ЄЧ3еяМИxD(–%mлYЩТЅыž††.?nмf]:#ДЌ)g\5хŒніšЄ Цё+“O:vђIзvо €љГЏќньŽŒЌ—§lЦeOwїяœ"#я˜ѓo:цќЎ›'@vЩэwuй•^зьXанЎ1'ŸЎ1C•0Qz4(†ђaЙЧЌЋљиk„vДlh‰еz5нБ}ћvt–ЄŽ(„BL ЇЅІкІљПЖЖхБиVг|­ЕЕк4M€tЮНŒщХ‰Фqџll\^Qц)Й‚CчІzˆЄм[Y*g|c/їtn›ЄќвЬЯ^ЈКuCuыz—цЕer\бœ§œ.ARšІ7EвІ%LK$-‘ДћРqSz"егphHf5=ўчЗџйРKІM§ыЉљ~f—ЖюѕІРГŠŠѕіЦ$ЕЗвˆˆио-L$$‘ѓ5аYG‘1d@RvќŠиљˆdчКЛое§2ќйСўMбjVQП4+8рpъУˆ˜І§§Яž5sцЋЊnЩЫ#ЫВ‰Д= 0иuёDа4чv Qю[Ёe‹HgŒИiSrдЈЛяО›ˆікЭ€ИїЋВіgI"іU/Ž{]fЊs€НМЃ‚бДуkw,д˜Ы‰‚дйСЮ]ћ=“dPЗ|љэџ]Yювeв2 €ddL›TzС”‚’ГЦl—Я5"vf ‘l_^ю,Ещ4ˆhmХ.Яэzз—лœнЖ $%tifКьЯЎэиnэЯ!m”ƒAѕ4ажPвэ хfУšЭЯНѓХ“Uќшщ—^zўо{QXјЯккЯУсцфЬLM)mлnЏбє•Oзlћ+Жп^@зuђЭцц[Зoзџ›ћЇћю;iЮœЉSІбnУ$pMЌ{юЖю]к<ьЖ^ўNЉ[0ЇЄbнћї\qљП`ъ9П~шО9Аqо§?ю|хЃS‘3ь@РпњЦm?щўхŸЙ G HBJ)‘kДуЭ{.§ЛсХП_7cяџіК?зџяЗ=@aз‘ t&dАm‰šж^‚ЄmйФ5Э™j$m[ з8лe6Цnг1lBЮ@‰ŒkœЕПеfМsf‘ЂsвHЧЄo№wкPГˆ!ЗЄ™(ЮO‰_ћdчQvЂЙЙЕ•dЮАсп?­џPчЎxЫ‚З?џћg;6­иЖrJСРLЮB[ѓВ-ЭеЫ2\™9™ѓ}Кs$ЌxХцКŠV3.A3ŒЌМЌqйо^2|ЧТŠpи“:}dJѓкЊѕQ›\о’!љC=DШ­ш†Э [Уf‚а№јŠ‹В‡`ИqIy}…8vR6mЉ^YO –Yœ?>гЅ;Gэš-ЕeMё6S2]ІЇ+њ!ЕUз.пбжd’цї(ЪдˆШщжеЏЎiЋйФЙЫ(œU .B=‹њ{ЮзŽњ­с˜I_`жќц‹]ѕшGŸел†ЃШ.tо Ѓ/,іД­-{є…В1чЌUкЦ ­&SO8cь„њЪ7–V.ЉГ4`Ъ‰Sn9ЎЈˆ‰ЪЯ—џcaХћ•mINqЫђFŒ}ЫЉƒFxBЇ’“™цЫOёцЖЦj9гЗ7Џ)ѕЬьуЙˆhšцЏnНѕКмм[ ЯџPSsNEХ‰СрйЉЉsгг„p­Z_ЛƒSвщзEЮ9g „јG]нЫЭЭ_DЃsSSяЬЯOѕxjжЎНѕЖл>xя=!Ф—і _FЮрQОФЧ/vх№щz{MJj|oокр№!ОДЏbФеw?wј‚ЬyW]ч=шоєœLЗ—ч:чIИ<)й9мЏ0OzV„8I`|зљ’€qMпЅњ”fАЫяэS2ˆО4ƒЙц,хЩvоЙз~iі†sЧJZ‘ŠКЯuюŠ™-%EЇмDћ>г -Q[1СU˜Њ„HJ’uOЪИйй‹ЖmГMSHB-RОіЁwзПЗЅЕеаyджВВfЮžќУ‰iюЦК7о]ђЏѕ[“рцЂ)СrњqтИkKƒ:@ѓВх{mлf_Ющ+ѕ5kj7G,ђuлЗ†rcМВтЩљыонPЗ9h™Т4ЄџйГЧœWшŠ——?ѕЬЏ‰ќџЋ№lоP_бmA#ЗxШўoќ)™˜б%ѓ?}јГЋ›mЭ сˆ№ggO?fТ-3rвЬШšOW?њбІЯ…л mQШ>фТGŸ[`ШЊ/–?іўЦ[лэ­“јиб7Ÿ6xД—ѕщЃgQAУ!@€fyu"JšKї+ШwЪ„ІїжМЩИфЂ 'х ќvѕВUwНВjEъ€ЋЎ{jkZЙфж—з§з<КИдnВmЗЏŸNvП‘?ЅU}ВтўЅŸПЕ,Zг)™АлЛЊmлЖу\6Џxћ…—оYQ—ЎЬЁЧŸuо)cВзМ|џ?> ]їу †Рмєё“w?^uкїЎ9yLX Kюљ§йЇ_wй1дXўіЋ/Юџb[T‚/oдЉчЬ1$ЪўѓЧgЗіПрLяўќЊкпОx†#пzюХїОивFzњРЉпК№ЬQйn@sћЛџљяќe›л,цNщ7щд Ю™кя›|0Ж4ЌˆYaŽšЯ•–Ÿ2„!“$іm“V4ЖБо”юœйiљœЗ?ГiЧ;ЫЖTHpчeфЅиМщoЯ.yЁЭ7љ„Y7 i5ї=ПъеЗWL1}tyйTщ3'џvFQ XеЋWоГ с‹ѕ•УBбокТмA_Ж ьМa?9fVЌ{ђЃMŸЌ*›7ЃdˆПіС/ўwƒ1vцєGЦІљЌ№ћЏ.~zеК'5џфK†k-VЬ рЩ YtЩ%cћЕеО6Хs›+?ЉrlfšЛzуCяTl0јл—ŽžсЅиж ОГeeYецiцЪUПuЭкє’kЏsRж/ћьжWЫžI Žю7tpЂъхљeo4ЙŽПьє+ бZћю{Ћž]WљХДў#МЎ^6 З7SAУСGЩ№њ;ТДŒДЌbУˆИєXФ4SrЦШшh­[Мxуjг7§шЁч—ІІŽ)4oѓЊhxƒizУ‰ЊИфƒKЏ™]4,пFƒ,›њ—œ7kа$7ФУ.!@wщ:f4ЩцАЅ Я(Ю ,,:пУЃ”ф”$4fЄzs€ˆjТЁЯG №Ъ+Џф{xѕ%ћЈя_3”šо{ш?z)6ѓДГNьЏm[ќР=›6klд>ўлon~ћ Я=!?іoŸ­NІŒC[vМg,Њzэ/ПМыецЃN?yVЁОхГЗ~w§g•ПџЫYžЦe/ПњщEЇ …РЌ_ѓСѓЯНXч:zЦбcr‚Mk^~qбЪ‹ЎBфѓ{qЧгђЯ>sЦDлђї^јс7ўєŽлЮl)[ќі[‹Z*sД~#Ці Д~zз-П}Н&џє3gLі4}ўжППЗpэ/ўќгc‹/оі§‡зІŸ~м˜еЌzяБŸ]S~ыCЗЮ.dк7ЖvЧ5гŽШ,Э•@ћЪmћ‚(кжК&ЂЅјЂ ^ўп2ƒ Ц@$ Mm‘є‚ѓg šь‘ыпZѕA# /КdvПAбФЛ_45-mЖ‡ aj‹&Z„Ћ0?T”sЬ#“,вЙнд”lH‚wдшfч€7{бЪ­ŸЦ­†жшЊeЫоЈ…_1Їd,€ДŒБыпЌЊn 7­1эќЖјЖЈmљнЙcІzТЂЬ#-ІћtDi™Qn0ЫlˆafqŠ7kвя†ŽJ0юmm˜їIE™хŸ1}шyCSB…cŠK^пКБ­em“йJš†ВЉетУ3ћg†Jњ}Ы$УcЈдКžD гћймА=išКž[”ТРкZпК.‚щ#C$IojњИв…\ >Мќ LгЂ-5Бh+ђIЃђ €ЄЉmikВјшсYм eВВ*#=/рр’s†•—-m\§ж{—ОХ|iiSЇЙlrvњAЪ…ь”р6‚I+7Уum›Вњx=щx4šfRJЇ Hлцˆƒ=žСЯЅii1ЂE‘Шk--D"/ЗДX;vˆ`ліРdђB€ЛkkЌ^­†%Ѕs5Ф<]ыѕў(;{”зыcЬхR лц}ьH”ЅiШОЊ!–Rxrž;цПїЯŸWuюЗѓ8sй‹‹jњuK‰^&Ј§/Ї{ƒiiвчFsћ’Їžи8щЪЛїНщ:<ёЏ]ДF Я лоzїмзўѓ—WMЬ€ІеЙ•—НПYя,ER‚  yбkџ|Ктє?<љУYЙчœ1ьŽГёя—џйqSGМѕaYэ…Ѕ!Њй\Ж=ѓФSK›ЪЖFgŒє%7~М&}РQЇ–b]Sн№ћ;/“ п:~xьд|rњYcЇљ}>ЗОI?љО?œ–hщПŸxЋ,ыч/м?; `ю~qхїў6яЄIWшo.ЊЭ>џз?ЛжI8kв /nеЄиџe7 УёК†ШVBdy)C]šwП–ЇBАлšъЋ€\&bёš(‘АЃIAОршуŽћЭ„мa):З›–'ШЃ%+7ўіз›HX SКМ’ЙђŠ O.ЌžWБёСП—?ЈЙњ +9oжˆƒš€HcU"б]GMЬЯ)$В„-$ аг)ќAY$щ ЫЫЯ@иЂЦˆˆ4ЧТuбЖbЃЦ•z@JJ6F*Mђххњнn ™нџьA›йVџЦП_{ДєТ‚SŽsё Uнєб6‘в6Ьџр’љ  Сд Ш]pђАђ5KжОГрђw˜75uЪД1—OЩ9ш­“вНTаpАCДЋZZт6ўС…n цІHK=hƒѓ=СN&lQQF‰,рЏЁ›Я/, ЗОм’АypD–nВHЂЁКЭвќCЎ f[U•mwЇ‚ЙЦŸqв}CжПИlлВъHCДљэgо)ЮњѕqyyјЭЦnїЦ‰ r‚%!OV+еqдw4oШ ј†CХ=нШбЃ7ўы_ Б=Н#t)РУЙрД””гRRкŸ@T‘Ln4ЭІdВ5-&ћ|“33Г ЦИ\E.з^FЪЮBOа1?Dќўттbи{EH’ŒДЩsgмsыGяЌМђђzу‚зVЖіЛiі@э§+PI!‰ДV­XУJ.1ZАmа<ƒŽžїдЭА“[ЖlЌЫ™6Ў8УщRK0|ТшрšИь˜К„КфњВех|р)vЭЊUеёz<`Їїkzѕѓ5m')Ю~шЃ ёЙƒ#U+WФ‹цž7ј§Ї–lЎ’#з~В4š>qf zтOўzbИfKйЊJS зы~‰‡“ lЦ‹gMЫG PЇ­‹жTGгвЖЏZY•Lи†зї…eя,ЏНшєЩЅЁ'_НћсWЯV‘™}дмK: ПЏ“ъИЉс [šRкЉомТє‘АK`#иfCu8n‹мв172bŠnG*зќфё+СHMO‘Ђл`Eы-ВPЫШЫ?*›л’Й=КЃЭмЃSPs—ќрКє‰‹зПНЁq{[ЌzйŠЛЖ7ж]vьХ™ŒЊ[›bІаВGЅs[0iH4ЖZЄeѕ$—&Щ6\Р2!СЅбЖ­‘Єф~oАаŒ~о5y`xІс` "mбВfщ*LЭєЛ2ЮЙіŒвOжМКІЎЂ)ZЗ}ѓcзU_sвеVВЩ (Jяя Д Oз,ЁчфŒђ‚1іДяВсХЅл–VЗ5ЦZо}іђШqПšUаѕэцЂgQAУСF„ hЧŽh8)ЕPhT@8од‰ЃoXNЈ˜!qЎћШNИвfœ2х[i`o­hˆј§…й~ўEeUЋХќщ%˜‘јІ:BЉYŸ’‘еuBњМСєt%vTЗе$Xёа!W›||ЧыжoiЋIBžJІ"#’)оœ4_О”€ЃлРЙXюCrт‰ПМъЊŠDb€Ў )yЧВзэ:в:/Я0аэшvƒ”MЉЉЯ”цф€ЫеYВыƒaO?0ЖЂЕЕбч;ю˜cОД4vWdO~ъLяМ…я~vщЈёѓпљТuйфlO•м-}QŠdЄQј ЯшxIrЅљXБhЂUKѓъ9Aъм•т%);іŒ@"šŒыіІyOм§“Жfhіш‰уlzђJgœtцyg“я=Рѕ- Ыma ВCоьЬ@С>Mšш*™мZГб ё‚(4Зџњ5kšVЏ,џdј˜):3<$%КsЧŒПiВЂе ›lž™“šeЖЌЉHjiгgO™> Q}яŸо{1]К)qqІЗnGД%&4nGlІq0зmk\•оЁљCSН˜ДЭЄ мчцсЪЗЗЦлИ{ТШТЂЖm/4šшKа]f8мДеd™yоєB"МЁ6‘0МУЇŒћЩˆЬ{dў‘ЅхбЫш>АЎŒуN|fЌ-у)Сќ oP$j"UQ,<јЪСƒЏЈ\ИшіykЫЗЖU'ЁŸчрДNЪС ‚†ƒŒРмвl‡IsЅeq€ЄеБИлU_Qљ&хNœШLŸ\ Д}ЧЎIшЗ+7ў}о†m%уКqRfЋYdО‚Ь,ƒP$­ˆБ`џ`fРmйO†++л д№ёМїяYgM8qТYХ>•мRnŒAnЎ/г™&uАо"HІ3W}d‹”Та€ФђWюџЭуe'пў‡kN™хе ~љџяЂЕ‚ёŽ"Aэ;-Д€ \ќчЮЩа%1нep’ФCЧ\їЋc.ЏпДvэЊх_,xцЯЗ­hИћСІzічluЦ#Т‰њ–X5щЬ•ШKВїЏEMФжЕѓRCС„м3yJqПђu•лj?ЎˆMцE#ѕ˜СоWЗmњbй3žМЌX§ыЏЏxЙ-уЦ[NКДmу],Џ4јТiХУ<ЬlЎл–А,У5ИР`omLT›мчП0я‹а˜є@ѓŽЗзь(ЧаSњ H3f”xч}жZБrе?E%мјљŠзj­ЬIcЎhћ$Ж-СќE™nР6[›ZлИ{L†'лЈЏyъбџЇgq|щбiФZзЕ%Т&й/šaMЪгUooa ип›мRўЗy #Ц§ёЊI3Дш'ЏНџ‡UциЦŸ=РяЕЭЪъpC ГГ}YєёщV=Œ .ч ЗYПfGуњ–xnVj ЄЄ•–dхlЏ~gоG‹Цћдрд””ЬгOŸм№цЪWпћdбф0vєMЧ•Nрсљ;j—зЧВІљ‚.0›vT~ZOkЄ‡ZЗn]й‹gщ§3XкЬYcйšпќрнp лvFޘ|ХQйљќр~(dŠ/пŒљЮ)бЧmyрu7Г”жПшќ#ЮЫч ›jЂё…GMкЖюЏ/ЉГа—{њЉcП=$рх0ё”Љ7ђ•Я-Џјыкѕ\уўьмcOwжфУ(ВИІfI],0бr#P8КvMЭіxк Џ7в‹/95,>(хщљ/ztLиFVіŒ9#Џф2xц™gLjxkеМџ-ўЩ&WЩФБ7W:ХCвІ;І–жМјіТ[tУ#mЫqЬЄЫЇхђію(ЅgPAУС…Ш€Їі­YфЫЬ€7kЦ_GД…%ИRвњ!№дCnМ$чДцdBъz0%PаЬqГЇ=6біeЇч#шyc&?\0Ю›*вМC&ќљцбшѓѕwЫ,tUNю‰-ёА)$2—л•Ь8иcЇеNѓpІ@ТŠ6EЊюŒ>ž ™ž>lша_-[жЊыїфхqЮ/у_ОIЖЏ ЈГ)`ŒЕYже›7?гвrEqqzzњWЏј@DNіYpш„YcŸ§Хуыgп~б,ŽА[‰GІёф9ю}ьЉKв/œ/VН§дЫ BзэˆЭ†L?2эюЛџђ˜чŠY™ё-Мід ЙDЧњ*2P4эј9“оќЧП §јВЃJ‚ kц?rЯг‰щ7ўqТ РЁЧђоёТгžьб?ыFЄa“†њўїъj­)?›"LЫЯ* m˜їтЛ“О5!Дљ“џНћЮЂ`ЉЇbУКѕm'аЮ=FЦ„їЬѓOy~С_~uЫ_oљіœ’Йў§џќѕЁ%SўРИI•Oўъёc.џЮмЩY^оДhоВMЙG]БпŸ"@Јn-OX†мcГƒœЪŒћЙ!Н`иo~XlЙ=ЙYœхDб—vъХЇ jБЅлWр3)9чЬ1izДе’’qЗзWœщq€'0хФЃKЦŒЈ‹лIма_AŠK€ЖpC4з !%зЮ,šS гноЂŸ€ДДМoЭM™2=Ж„`š?рЭKѓx€$ИKЇMyИtœ73}€ѓъду.8§9сЪЩI1€%“ЧџЄЄф’ЈЗ$hšзыЭЯєњДДAУnЪЪ;Ѓ%™Фt=”,єkЮy> фŠьœуOъh\юЌЌƒп:)нN§Щ6ю)(№tЙ‘yМŠН]FZ048ИыMhЄggІgя|ž7-cBкЮ‡)щЃSvнˆпппяпmЫїћОгсяЇ1“vД9ЖЃFїтЪСћr@žxс…•/ОјЦаЁ?пЖmDsѓУEEгpjќvTлоу ƒјЪЅ)ЉKТ#vЄJМжмќНЪЪnї;%%—МњъKЏМrжg|EN"k?)<Ц§jйд1#RœžЎ N "cdJL/Йјіп6џсяќњц'МЉУІЭ§іХбŸџ7žˆ€1тœпљѓпŸЛу/ъЁAЇЯ=ѓB^§X“IЌу•8GsэяўфрсgяњщS % ›§.?!€>іиzНТ“;~LЄDЃttПР Ы*sŸ \“ч^yЭцп?uЯпёјВ‡ЮИє†;O[ѕрѕШЏєqЗ[ЧЮк@в=фмпоы{рўЇџќ“ЗЅЦ˜;oњ ?ПьИн•ѓг;jўўиЫПћщуюЂ™?јУеЧіsl‚1-iЧZb5РPsЦ&Ј€:їЇŒ,IйљgiџѓfdŽЭшђч""У[XаЅХpКёˆЕŒьєŒЎRrFѕБцж$йƒ§Ј{ќ§‹ќ;ŸщќщПМMЇ84jЁŒŒ ]_]wхчхuyœ$ ЄЇ IпхИH@@z(4$ЛяЊг:љќ§}‡ЖuRКюC-|х›ЃіZ~;зƒщјвЙЫBS§і=<‘кkтЮ_;Їє;лшиТ!!pОf§чгŸ5DЖZ"9ІпIЧ ЛRH›Г#%*uцТ=Пф—U-ы€htПfН\J›œ= ЗЕ *.ўБЫѕƒТТЄeнZUѕdcуЗћжœœЩ~šІcNAЇЮЕ"Б b[џўяžxтдE‹†-[F.vєO@ћR‹h7иі‚pјWееuЖ§§ЌЌeg3]џЩš5oЖdёb"кѓ…ДM[2MзœsJJKя,м,LS Ўы Aк–-Aгѕі“OZ‰ЄiKцђztISjКю,[Ev2žДˆ^вL fшЫФtCыHt“ЩxмФ4—Ч­w93…i dКЎЕŸЦТВЁatн‘Œ%, šЫыжHZЩ„šСQhЦЮR$э/EV{J/Ё‚Ѕл=Y€Ш’V4– юнйќњ"Iнує“NКгВ65 : Vuьшˆ ‰„e9]b‰ЧSтѕ:щ-ЁаsЧ…YYрЌbхќ“вL&ЁЃnД{рўЌБQЯЯП№‚ ОІTƒВџœ И-баЋui^[š9С’УНSŠrHЉ6Eщ6†цqыИйГУ1ГŽœY—Ю2BsFн$Є :7р M€‚œœSЎЙцЮ‡~oТ&Ѕ тЛ^у@gП1H!€Aˆf! UлВˆ1”;RШŒНw5K"֘ДэяжжžѓЭ)))_QR90N:akМжICѓЄxsМF№oUQzU‡Kщ6ђd8AУўзШ;ИмКпчJёЙR ЭћЭЗЖ7N2УяИЃЊДєЛ›6ЎsЦЌНOфwz8Ђ aћ"‰@ыМ‘эН‡DиDŒѓ8Рёыжх}єяї;еЭp"вn‰еrnHigњ‹^мЉ(GІ#ЋMWzК 'ƒ€8ђ˜й*ЩFdtФt6t Cгџ‘‘={ђфЎиДЉЪВt]gˆцекA”$bˆšІU$“Ч–•}‹5 x ВwЮIЗТMб*Žš$™юяЗя a+Jя ‚Ѕ;н™D„ШV[в:2јEZƒАэ?нwп>њфрСхбшё6zэк\УxІџЇяПџПЯ<ƒˆBPКXvТI—ю1E<нпяpя“ЂjЊoMщN^W€ВЄЗDвGрЄЫƒуІљї_џњwžŸž~ЊЧѓTSг­UUїзе™šzIzzЯB€”–”аХ8)Hф”‘vB‡Юœ œЮБдcšŒ­ ‡џнд4ЏЅХ"К#/я;YY ыЗFЃќуЯ>ѓLMгњьša`вŽE’MuП+-е—эй‘ъ +}… ”юфвќЮ\vгŽлвЉ=аїЂ€…Ÿ|’ожvvaa2qўнмм‚Сз[[я­­}ЌЁс(ПЈЫuazzЏvЮpЪ90Ќcj' k/ А&yІЉi“iОчhкї23OOIЩіxЄeIЫК$+ыўšš…}4{жЌ=W„T "„ѕ­Бš€;няJзЙыoTQz4(нЩЃ‘["a ѓpяЮa3oоМ чc"и–5ШуљОЯwAZкъXь™––‡юЏЏрrљ+4ŒQЯDŸЏа0r3‘Ш%%2f4кіЦDтѓXlU,VkY-BT$“~ЮO …^8pИлrЙ@ЫВœФIЎыоњњ|0{жЌ^[ђppК"‰F]ѓ†цqњ\DЌєm*hPК“л№fкqK$їю6NQgъzаЅ$DЖadЛ\Г‚С!кЄ\ижіN8М*7vж!ЂRЏї4ЂяoлЖrэZdЬ–вЉ™ХyЉЧѓэ””q^o€БГ2…”ТВQяR ˆœ9œ –ЃыV’D[Ђ‘!“R=™ЊЇAщƒTа t@—цDfŠ„3<б'ІuдЪЧыК†Ќ“В@RHбДФџ3ŒџKOw.эТIbe^оRФ›ВГG—”М^’№ŽБ ш\рJ сLзtn&B”ЖOK›0~<ьm9$х@ iЕ%r!-П;]cЮТW}№Wњ.еІ(н‰3Mзм€h‰Ў9 }Юi'ŸМƒѓХ‘ˆЎiV—Џћиё‘#"’Выт–QgLg,•sPhiКЎ#ъˆэaAGъIщD#Лo "ЦиТ–-3ѓ„уWcнŠРV8бШQвИгU"ЅOQ'Нв]2Зю"!mЫŽwооЇQШу9ѕъЋПГvm„1DДП4LАчyŸNџ'/rOу {|Ў bŒе'“зWU]tбE@@JЉІNt/AV[Мž1N$ЎtшжКŠвЈ AщNˆм­љœIhІH@ €ыњяяМS›:ѕТ+а08cі~ІьзqГ‰8ч-DпZЕjФiЇн|ѓЭDЄ*Bv;гŽЧ­0И4Ÿ[їюнQ”У@ JwB@Ю gжЅэ$BіН/ЛˆHD~ПџеgŸн>jдєЅKˆ4M;Hу4@гѕeёјДЅKaіьЧzHз4еЧанœЉM ЄэsЇšчpя’Ђ*hPКsвЅЁњfнD”RцЬ§ѕа)ЇŒ_ЖьžЊ*$Юл+.Ш§Џhэ<^vМ0œЧ,ы—лЗЯZНzк 7МђЬ3йййЄ&M1Ћ‘I=и4єХг[щЫTа t'DдЙ@ˆ(ЄuИwчpbŒI)Sгг_юЙлŸxт>M+\ГцжѕыЂQ‹ti2†N!"AdI §ч."ч№"чLзAгLЂэ‘ШЕыж йИёљPш‰7оx№о{ƒЁJe8xтfI—цmŸ:ЁŽДвЧЈQOЅ{!квъЋ3.л1ЦˆˆiкU_|еХџљЏ}ќёЧŸЊЎвкzŽЫ51##ЫыЭpЛн†Ё!‚”Жэ5 №iзuЎыэ•1"fYѕБX},і~}§k–Е&,.-§§ѕз_|с…Ю+:(їћюЕV"“в64OЧ|KEщ[Tа t''ЇЁЃ"Вьг!ƒs@:‹(нtУ 7нpУтХ‹џљдSOVTќЂЂ"mѓцœHdŒЎїљB>ŸGзQМАVДЕ•76šœ›–•АЌЖXli$Вžh›ЯЩЯ4mкиЁCяЙшЂqуЦэёх”ƒ!nЕ! $ih^ачOoЅRAƒв­Д—!B`ЊIэфd9H)ЇN:uъT([НzёчŸЏ-/oŒХžЎЋЋнК5окnmœLž,х_[[ПhkѓЄЄ0Ÿ/”‘QаПvjъфдд‹‹‹gNŸ^4`€ГYлЖcЊwс ’ 3ˆва<œЉЦSщ‹дyЏt'dLв"’ЖHJ’ yЄшф\нЅ”ЖmРА#†бyo]uuSss4­ЎЉЉЎЉЙўц›S‚AŸЯчїz332R23ЛnЪ4MDфœЋy•‡ %ЌˆггрвМЈUњ ет(нЬcSМ9B Л:‚e'֘a „RщКž•››•› ›7mЊЋЋ›4qbVvvчГlлB :Љмй‚r(Qм #2"ipgеw‰ NoЅoQAƒвЭ,;KЖ iйвь›ѓ-їчМГЬ3ЩŽтаБxœк"‘ДєtpfLЈN…#%Ќ0фЊHƒвgЉfHщ^(IZ"щD ЌН8П О†8?; Œ1MгTј‘ƒ€’v4nЈѕ-•>K Jїr–ZDфLыƒх •оJJлщ тLчL?мЛЃ(‡‡JКVК i9Ѓд'зЗTz+Ы)‹Р3T_З”>J J7“;з§S§ Jяa ˆ#чLх?*}” ”nENа€ФQSй JЏa;eб‰г:†'дщ­є9*hPКSћ№9%ѓ”§%ЅќцQК%@@ 9Ђj9•>JњJw"’–HtщiP™ ћMqИwAй[8 АgКГ№„ъgPњ 4(нЉЃЇ :zTЬАŸœi–Њ*У‘Ц–I †œЉœЅЏRAƒв­ˆli:v;–TQУPхŽ4N"$!2T-ЇвWЉS_щNв tzqA… JoAг‚2Іr”ОJњJw’DІ@†\kЏšЇТЅїp!е—JŸЅ‚ЅЛIгŽЮнšš–Іє&]ŒT ЌєY*hPК €)D’€tnh\2(НEgEHEщЫTа tЇЄD"Љq7gЊNƒв;ДїЂ@GЌњ”>J Jw2э‘С]šZдGщ]$ )mRkО+}–JчQКSмj@ЉsЦдђСJя€Рyє ЭMPeЄ•>K JwŠ›aD”$\КWзœ A5ЌJo€Р\КO“†Юн‡{_хАQAƒвbf‰ЄЁyugЪЅŠі“*#}ф!d5Gwи"щ槇{хАQAƒвтV%I—ц5ИTЬА?œвNаИЪ7:в g:tUQ”ОI5LJwŠ$šˆЄІЁyЉАaп9ЅЃpAзuPХЄ8’lIЂ§WEщcTШЌtЇ'+н_ааЖЭgЄD8XKћX–…@НщЂJRКu]X–АL"82УIР5­/v‡ H’’дђхJЅ‚Ѕ;Эz9ѓUŒсA‰ˆшѕз^{фбGы#1@ъ-_јЅƒ .,,|ђњ›››cGрЛ#ф ЧЮМщцz=gTхpя”Ђ(‡‚ ”ю‡€uмз4Эgž}іѕW_эЯР дЏуfуВEе>CCƒeю]й DЈPЗ­ђД3Ю5rЄ ЅяPAƒвѓ0€pвЬaьўbжŸ‰$ѕЊм)D Vpф^‡5Цnп&чGуНщА­#іЯЁ(‡’ ” ™”:ЁDН,Ё—#а‘7*бIЩЁЯѕ.ЩE9dTа єHD@аёz[>ф~uЂŽХїŽ2KН+JŸІ‚ЅCчŸZ раТ>йWпЇŠ(Ъ—ЈЂ(Ъ>ш;+ŠВwЊЇAщ…DŸъ;?є$XЖmY–‚ѓнчжrЮ{бWѓЮeНZДЇ{ЅїSAƒв љмP5ч‡ЦаƒФ8ЯЬЬдuн)^йћiC—Ђœ’„І-MI6‘”dtjˆLщ#Tа є6рГЌ7A{м t/ŽДA€iћзcцH);;PHсіљ'LšT2p`žaСй.ЭЃ[Єњђ„Дю —цcЛV"QХ*”>B Jя! „* юj„ХёУН7Н47џьЮ_ьэW^~љ=їо zшедДуŸn~бВ’‘@d‘D“$сx§ћыŸpi>[$бВ™СтЩ§чъoPz=4(Н ˆlБрh/\R= ‹ГєcЌыјhaфїlЗжnк,LѓpяцCdѕс-jмC €ЃЦ™3Cum›%I†–HŽг}ˆ(IрСЉ›Ў(G4(Нгlh0Ч @ЊД†ƒ c(ŸКFeNOO-с?PгYь`шxwЄsзЈ~'li\сs…ЄЮN аЙЫ9­$‰ ;kHЮ4•ж є *hPz!Акd{а 4ЛєуВ‰zіє@Ш№ЄxГcf+F]о)uдпB@[š!#;?u!іšй"ŠВW*hPz'D`ЊЩЁ%ЉЧуvКHќюєЂєб+ЖНхбƒDтЫ# †вE@#”Ÿ:TBƒвgЈ Aщ+Ра"РVqУСAB„@лЗЖrнАmЛ3D "У0М^ясогЏ‡ШˆdŠ7''4ЈКЕ\чЎ=іІь˜RЁ(}‚ ”>t„Хqјg+ЌIЊy˜ и Ћј’ЯgœpЂІiRBGЭDЂ‘CпzћэуЧ?м{њѕ:R­г§’Ф—K…ЩAй“їn*Ъ!Ѕ‚ЅO `Uоˆ@YdЯЯз;ѕѓйбdy9uЙ]Јxq§КГgїˆ СqШK–цЫЏ oж5Oз„  ИгГƒїn*Ъ!Ѕ‚ЅЏРŽў†ŸІУ™~HЈ с РŽŒH@ьZЇAјМд†зяЖь=ˆL’yВ2ќE5­wЛЫЖ§3Чым}ИwSQ)4(}Ž ŽЄHЅ{uљ&NВЫїrщќ“=pžAaњШMѕK-‘@фэ€$б?c\OЩшT”юЂ‚Ѕ/ъœ"Ј=cЂх— 2*LщsЅ6FЖщœ“Г(‰T_^†Пд’JŸЂОk)ŠЂьIЬ іGDъxБьDqЦЗ5йRщcTа (ŠВW €JВ&šЇГTЄ–rІѕZUŠвmTа (ŠВWˆ€EщЃнz@’DdЖ43Eщў~ ЦИ”ОF ŠЂ(_8гŠвG#"šv"?ehШ“э!…Ђє!*hPEљjCrІ" iLЫ bШ;*>)JЂ‚EQ”Џ—* yГ-;‘(Ъ •ЉєI*hPEљz љ ь)I;šщ/JѕцЉЕА•ОI ŠЂ(ћћЇ54ONhCF еи„вЉ AQхkјнщƒsŽЪMэS1ЅЯQчНЂ(ЪзB04ї„Ђгв§…€ЈOЅ/ReЄEQі‰ЮнYС‡{/хpRСВЂ(ŠЂ(ћD ŠЂ(ŠЂь5<Ё(н pОЫd<’$Фў­ѓШudТ"Йїg1 9ВЩ–‡ћ=+ŠвWЈ AQКc`Iй*Щp.х @цфИяqЕY" шцнЫтАэV№ZAQ”CD Šв-("љ№cƒЇЯѕŒЊљ I‘kѕћ‘чџлCюЫх$wїxЦљюјПдєь 1Лі% KтЄлћн<гўєчuw ^ЉEQ:4(Ъ7Ч€IJН7<љ3єvmщZю ЯБпђ_ј№ЯЯn|Нt`@ эЌDдоAŒѕ?С3й'?ў5YР’иљ`ф@3'yІ+Й` Ž юaƒахщ$œ:юE„]ShџFREщKTа (пj§дяІ§рB=1ПёŠяД.мDH€<нsХƒйЗœ•rџSцqЧ†7'Eˆ˜!)e PG„мJм›Sё– d †”2 4‰ СŽ2jvD @R’ШpаЭЄгЛA I PH™аЙ” Р @‡і8EQe*hP”o‘Єd!з№Љn"ЗпоњvЙє u‘ЛЏвђXhLž€ŠИƒњ ЁмUm.пY=% lkАЗlА[ ‰˜Ujd3QГбЎƒ†@qbЙХюь цqQЂбZБŽ,‰œC d(ma‚–_Єeч07ЇdDжnБЊТЄ!rвј€бzfм.+“Ў"OQ&+’ъЅ)б›Њ—№ PМYдnЗЋ#фBT‰ŠЂ|‰ хBdШ9@7QРХбЭ$HЩMmwžЙ!PprъНя‡ќГючЋ<Їз7$<ЉLѓп5ќёюxEiКћТђЏїEџ2eЧЏV№,F1ЁMН&§Gw&фˆ˜љцmy@ gЉE!YŽїТяЄ^ћCяЏГ[TљNЫпбђђ"iёїК%йч}ођ‡‡№„‡Bc9МюІKžgEЧІ|џ—)sЇuЮН–eЯ7пћ‹жзV“—Tѓ2Eй… х"B†VkrЫZ›Nё§пЯRЃi{їцV›8 ˜GG˜ @DІMVBьˆPСдєŒŠ=qgѕ‹ф{цў$эЪŸfyЋЋПїЗD˜ VC–WF-`RRњ yШŸWНяъ№gръНј{WчY@ˆdдыОєз9ПО’­~ЎщGO'Њк0{t№ЊЇўс?Zђфњ—жJFВІœтщў+Ў4—ќНсцOэ-‹Є7/tлSi'bќ‰Z?\/…Ю ЇјЏМ-§СБьВi яжq?#ЁтEQvRAƒЂ|C1 ЭЗh55ыВ9iœю_њ‘ЙЅвZП$ЙrQ|I™нdШœ„C‚а7и~ќ”њпО)\€VybK›6hLъ1W{†§7БАžИК %ШИшGўф8Я‰yєбŸяќ[4м2іzlEEъiyf%‘*ўІ(Ъ.Tа (пIрˆбM‘ЛцZŸ8їšР ЇуЈЙЪЎ­L~ќDѓпNЖ":@yл‡kШЅГC&Јq“Йfs’;?—iѕ@эЩ€@2рž6$&_yйіыš—€qжД"ќvY№К‰РIЦИ8йшчџ}0ВвB?–і‡ЯЧV|ЧsъЙЎьлbЕ&‘ [М0йФЕBЖХЩŽкЕ€ЃgњЯ•|q%qрi —=бИHz@ O(ŠВ4(Jw. Е‰7N~ќt“?ЄёL=о3{ЎwдT}шdяŒЉЕ—]Ћ4ИP.тa’ $A„eМNi^фе‘x†žюжfm‹"ГШэ е€(ˆ@ЯЬв8сЈГў~@Р€тКk|.@šЛ V5гeлIM“8QѕВШŸёпuuЪ=ŸИ>ўЩбw^Ž}ВLDьћ (Šв… Ѕ›$mb€„xDD"ЂІЪ\њzыпОEs2ю{<8тВŒя=Лэц7œe–cd[;k*‘- ­ЫƒЙ@œlЙЫ…~Ы|§Юъжѕ)пЛ-xхoœdЙ}AЫO.lynЁAѓ дА=АвQn}НівуCпЙ=хьыНмйYлўќ‰њŸ§ОэЃє!2(ŠВ 4(=”I€M&™&CI` lБСЂC7Ђ&Х†[яYдірwаp!C"лЄdœЂ6 KkЏ|@š‡6НмјэзУЉ@6X йж&m@Ќф?Цmљ/RЌ•…€,:Š5/6\їN“Ч…+*šЂЄПЛuвЏРj#/€ Фˆљў#ѕŸџЛбчCА(‘a“8 ŒZиђ4PДCрЄ7ЂВт-?[ў­ РJЪXХ$iˆj­KEQОD JФxqnіыˆ—nпkѕ!:k=2шFBЖE@FкoFgСIDg5ъ.ах;fзTƒ@@ PCч1mВУащ B@<"Ђ‘іА5DŠл5q`€š“ˆ€ш"HФD4жq”œзu†$ˆ"Mv+€их  `›ЂЩм9 С 1(ŠВg*hPzCзЏКцšМ‚‚p[cЌѓb,‰8cѕѕ?ѓœ•lDD8Ф?"чŠО[вЁRtмHd'ЩДHr@Ѓcџ‰vю-ЧіP`чF8 †;7€ЎЎovyLззнл6Ч0@жe‡Лю†Ђ(ЪЎTа єH#GŽ9rфяЊйQѕЦћX‡#hhG_•?Ш\<У…Fг‘іx‘оЫО|ЩџкЧьг^‘JxTe_Ј Aщ‘Є”RJкѕ)ЅdŒ566 [р‘8]PŒЏLŠыЗˆЮЙ„ l–@j&EQ”Ѕ‚Ѕї`ŒQAAСЙgŸ§_Ю+vэS`œ7зеBЭŽУН›ŠЂ(=• ”о  ојУ^zХМK—‚”RукCџјЧя~љKRЅEQˆ ”о†ˆBС`(ќђ]™YYЊ4‚Ђ(ЪRAƒвл "эБXЄiš‡{яEQz04(Н"т—ЊB2ЦеЄKEQ”Ї‚ЅЯMЊlУЁуsЁ†Ѕ‡SAƒв‡ 2 0Ъа .Uаpш€ƒЖЇN EQz 4(}ˆТђЭЋ’`єА‹C IˆGJIь}оwТ*MТЖїю(Šr€Tа є!љyy ‹žЏмњ|јpяЪBш,Xе#ѕЫЫ-0рpя…Ђ(еœuЅяH&“Ы—-ЋohшЁ=фN‚Ї”=Г6"I™•™9fьXУ0їо(Šr ў ,МЊ\"%tEXtdate:create2011-05-18T19:37:29-03:00е›z%tEXtdate:modify2011-05-18T19:37:29-03:00ЄЦТОIENDЎB`‚Scrapy-0.14.4/docs/topics/_images/firebug3.png0000600000016101777760000025705411754531743021231 0ustar buildbotnogroup‰PNG  IHDRХяУ№пsRGBЎЮщ pHYs  šœtIMEи  + ™cШ IDATxкьy|EћР'їб$mz$Н[*…B+PnDn+r[ќ!Š((сEф–C.oPAAЈˆЪ-‡Ъ+(Д…rJKšщ•Іi›4чўўXо%$Л›Mš–BŸя‡Ÿнйй™чyц™™gЇГжш‰3їэиŒ№6˜ВxК+ы"˜о ћxaЗуЧoлЖ­iФоКuы‰' љ€xњ‘G[fГљЬ™3#FŒhšъFŒqњєi‹Х– хРmМЂkЋm_,.>§ГЎЌа"–В;?%џІЂћihuывєМkЅЄ‰ /–9—.]Š‹‹ ЦOгввЏ …ТUЋV!„Ў]ЛіћяПЋT*>ŸпЖmлчŸопп!d0іюн{эк5зЇOŸЁC‡їEmиАH ‰ЭЮЮюмЙ3t- žn(яŽЯ‹|L№ХЩ6Ёбќj­эТЉš­KKš&žі ЉЎЅL:!0PС}H[їъеЋ:t NcпгЇOWWWуЧЇNълЗo||<›Эўу?Ољц<\ўёЧљ|ў|€JOO?ў|=‹r аBЩЩЩWЎ\x€–C#юїИpЊцЭе‘с­lKТ4Nўхщ6Фе§Ÿ—ЇФfїфgLHО~3Ы€'šыэЫ^UѕѕЯъыŸЕ|šЪ\oЧгЛВ.юјH3HyЉЗ_ц’)wЬ&Ьm:q@Z‘Щh_<щNoПЬСЁ—w|Є!н2{]д УЈ6Wц<—{r•ХŒym Л лђnб ХЅ'Х™ ^Ш3ди№єЂ<гœчrŸ’f>!Ьxkи-m™её.\ЊЎЌ‹Žт‘ЊCEAAAllЌkКЭfћу?ž~њiќtЦŒ:t‰DрщЇŸ.**ТгЏ]ЛіќѓЯ‹ХbБXRТ•‡pgЏЂбB,aЇLњтT›Е?=vц`ѕфž7Јrт ЩŽџˆK?oЋјЯц(E$_,хМЙ*тdКOџёjbЗўRˆ-ёчЬXqю˜о­UIеЁУс8Ѕ`vђфЩў§ћ;ЅЇЅЅ§ч?џ9}њєЈQЃ№”vэк§ќѓЯƒС`0ќќѓЯ&“ : €И3XР™Й2bцЪ CЊœњяз•Ю—їщёЛkŸВРЛU Хl›ѕю:heЉ52ŽGФ ДЅїі?DДт•‹лtђŠ4жА˜Л7†ЧђiД0дкOюЏњu{х­ЫЦAуфЯN ЂЪIњ>"~PVdp•HgБю\ўoэІwŠr2 Ц:Лc: ЄъP6TЊгщ‚‚ю“9++KЁP(•JЇЬ6lЈЏЏ?sцЬЎ]Лоzы-„аИqуіьйГxёbЁPиЛwo‰DтV<NGК"ёДїАX(6A8wcT?y}Ю %З(пеZ€*Ъ39ОHЄп1*нЇЛ­ЈDeŽˆЛ{#iž~ЏљmGх™ŸuЩ}$cпщ“РАМГ@pя›П<ЇєwЦхЭ^ѕФP™ŸŒcЈБѕѕЯђ­хЃЃЃU*•S<}тФ‰1cЦц …§ћї?~ќ8~ъчч7eЪќјмЙsФ>юмЙ§ €–C#юї˜ієЭу{ЋДЅЛ +U›7Я/JъщGЫ ёывдк2ЋЖЬКv–z№ ФЅчЈЋЪ­UхжѕГеC'ИOЇg№ П]ЈЋАъ*ЌNЛ–я•ќЖКm'QњЭЄi=`Œмы`!4zzШђiЊТл&›ЫЭ6.x!O71ХВ‹ѓMЫЇЉ\o”pT9ѕ^з›˜˜˜™™щ˜rэк5>ŸпЊU+ЧФяПџОДДдnЗыѕњп~ћx…ё‡~ащt‹%;;ћшбЃC† q[cVVVbb"є+ZИ>§ъћa{З”­˜ЎЊглƒУx=ЫVяЃПeЦђ№е3еЯЗО‚8VўЦВpтRЇо’qIWЕіcхЏш>žз—†ЏxM5<:[,eЇЮRўїЩЦхнYэ}eŠЩѓCЗЏжМ>рfyБ%І­pъЂ0<§§m1ыч–ŒЮSD№&ЮUлSхtуФЙЪ‰]Џjэо}…КcЧŽ?џќsEEё ъ'NИюœNJJкО}ЛFЃ‹ХэлЗŸ žі2tšfГљЬ™3#FŒhšъFŒqњєi‹Х– БiФпЏ­Ж}БИјєЯКВB‹XЪюќ”tќ›ŠюЄž–уі г>љ•ФRx“ мp-XТЅK—тттˆŸ=П~§њЩ“'ямЙУуёкЕk7rфH‰D‚~Цџs„С`иЛwяЕkзx<^Ÿ>}†J_NHHHlllvvvчЮЁ“№АЦгяŽЯ‹|L№ХЩ6Ёбќj­эТЉš­KKМˆЇ}NjЇk)“ƒ‡N TpŠFzшvхъеЋ:t NOŸ>нПџИИ8‹ХrфШ‘элЗߘ1У)†vфЧфѓљ|№B(==§ќљѓ=zє /'99љЪ•+Oаи4т~ Їjо\оJРцАф!мAуф_žnƒ_2зл—НЊъыŸез?kљ4•ЙоŽЇ[-ињ9…C.ѕ“g}ПЎO$і{иmи–w‹).=)Ю\№BžЁЦF\эЪКˆМњTЮБД„ šѓа№ЫЕе6GСfЏ‹К‘aецЪœчrOюЏВ˜1&ъœ=\=.щZO~FJlі­4‰NTшхЇ*Єс“B%CQžiЮsЙOI3ŸfМ5ь–ЖЬъЊadœ§Ÿ—ЇФfїфgLHО~3Ы@_oAAAll,qњњыЏЗkзN H$’чž{.??ŸўіkзЎ=џќѓbБX,5ъќљѓnЫ‰-((€РCOw|RВ|šъђпu&ЃнщвЇ‹ŠЫ‹-?нJњщfЂІРќй{Хxњч]5юЪlw0џёвBГг]_Џа\Пhи™бюxiˆНщ"єПн А.јС”…a_}Xbџ_…_}X’:K!ёч8–г}€єУБ‡ ;є%пГЅ|hјхе3 ЎœЏЃWgёKwІ/ ћЃ&љЋ?кfŸЋЃIЄ‚T~&…x'АG2ЄЅфІІ)—uЄ)оocБPl‚pюЦЈ~ђ,<%HЩ-Ъ7GЕ „ŠђLФkvСaМЂ’Бo„єI р мЧэЛљ­?иУаW8UѕTIЊD—UoАуq|uЅе­ќЄ…4\`lјЮИМйыЂž*ѓ“q 5ЖОўYОѕшшh•JхOgff8pр•W^‰ŠŠ"Н…Эfл<ќќќІL™‚Ÿ;w.>>оm9wю쉉‰@cгˆћЇЇ=}ѓјо*mЉХnУJецЭѓ‹’zњс—\—Іж–YЕeжЕГдƒ_ФгŸєб›ъВBsЮЖnЖкЉРбгC–OSо6йЌXnЖqС yxК4€ЃЪЉw пЇ, ћnMщ+‹Тx|’ИѓуЗеm;‰вo&}ќKыcф cгЉљyзъ­ УБKšиІЃшЛЕЅѕ{Y‘eХєЗђ“вpIЁ’СdФB_Ш.Ю7-ŸІrНбЩШž’˜˜˜™™Iœž>}њрСƒoМё†SМcЧŽ’’Лн^QQБkзЎŽ;тщ?ќ№ƒNЇГX,йййG2d}9ЁЌЌЌФФDшс46И>§ъћa{З”­˜ЎЊглƒУx=ЫVяУ/ЭXОzІњљжWBЧЪпX~7_Жq^с Џcіъ{aNNžК}Ецѕ7Ы‹-1m…SнЭ0qЎrbзы†Z;БЯУAQёЊЛГк{ЁЮг#ќчК]xлгVИєћV4‰‹ОŠљpЊjывEoв;ЁПяЋЂ—ŸД† L • яo‹Y?ЇАdtž"‚7qЎђиž*Ї]ь;vќљчŸ+**№OP8p!ДrхJ"УъеЋA‡Оћю;F#•J“““‡Ž_§јуFcЋV­ІNЊP(№tЊrЪЫЫѓѓѓ_zщ%шс46ЌбgюлБљгjЮˆлC^I „n>œ8qBЅRM:Е ъкЖm[LLЬРСь<Ќё4ќHx фсњ!IŸаXћ=Thе•u1,–ПjO\bw?h]рaЇАD 4%l0@< OФгё4OФгё4@< Фгё4@< OФг@< OФгё4O€Oрв\c-a€XŸˆЇтi€xšl1†-ЦРмРœAc2zˆ&И–<7™ю№4)|^2ѓЉrrщoЫŸ•лљ‹Ю™šL„PИ4МhNBшБMхUх!„:*;fMЯКЃЛгjc+jE[эVЕ^Н;{ї‡g>4йLр\67oV^”,ЊЯ7}ў*ј‹HянћЯ—џTыеqуUнхBљ‚> F$ŒˆёA•ж•^+П6єћЁЭ0*jтI™ЈCX•БъdўЩД#iE5Eа_ ч6[мЌOŸЬ?‰ъпЊ?~њLќ3јСˆЖ#№ƒЇbž"Вљжk +l]ищ;ЇєYАљ™ЭрОVЛuгљMЁщ]Ї;ІПоѕu„аЦs­v+>јpfiŒйЪгЛBФ!џNћwzзщKN/ њ((јЃрiПLkЮ-еє“2k KДLДіПkЧДѓЭѓп@giЩ=З :r“ЦгУу‡W+5Еš wущ>1}ˆl,ФšгkNю[ЙцїЬГ оyђКOл—:ОtѕЋІE&UšjVYLфЋ0TМ}єm„ајФёDbZЯ4l1f{пVђvЩ—)_Ъ2тв[=оК“vЧќžљъW_ъј’уВ<x}cњў=ѕяКu†…†ЃџwtШcC Ћ4 _]ќЊЦ\3І§˜ Qž,н~tЙцЋŒЏа§`ЅЩsпЪХcУ:#„R“RБХXjR*BЈKXl1vыЭ[јэŽEсЧ“;MОљцMЫ{–Ћo\}:іi&Г‰ЬЫtфУ~>&ьяьЬоYkЎ­1зЩ=тИ8ЭfБпэ§юД;–ї,Њ4е‚> и,ЖлKЬ%w;e?РI™Рd3m<П!д'Кc:U э?Њн(г"“ѕ}ыЬю3I#jѓтхЯь>Г`v}БOœнsЖЃŽ LZє\Њžы“NGяЋN…г‡ЌЎе1ŠЇŸŠyŠЫцђ9ќqЩљх`ЮСобНqѓс.Žg›ћФмuƒз§U№WРЊ€Џ3П^5pеœ^sKбvDпoћ†Џ ПP|aУа 3КЭ№Ў ХsуаYšЌˆuOћє˜іcяЂoзш]=#{Nи?!huађ?—ПйуM˜сš†jSѕЖŒmŽрхф—ё”—;Н,рЖflе›єN™ЉFђУЗЃџ§ЩttћбФџјЂЯсмУTЕї‰югљ‹Юуїoв~ыs[™Ь&n!-г‰чк>‡JПžNUШќоѓW XБїъ^йJйОkћ–ї_ўЮ“яИНФ\rЗSіœ”BR„Сb`R#ћOь0qяиНѕжњс;‡oљg‹G–ЧIR$ЕџЄ={ `X?d§Хт‹сыТћ~лїљ„ч=вбБ(шЙѕ\/:[ЯaВКVчІ KjKЎW\—№%н#К?ѓ”„/9pуР8,Ю№6УлЕ •„оЈИQR[‚zЃлЁe,3X Юm@Эш~_ѕsЭ­0TT+чŸ‡šнkЖ[‰‚ж^‹кsu‘ИтЯзЪЏе[ыЗgmG=лцYтБ!4чшœrCyЙЁ|ЮбћМ–F<Љ@jВ™ ѕ…vЬў‡ъgw= 3@“БёќFf›жe БXˆ5­Ы4fлxŽdЊЃЩфСgeW4ДѕаrCљАжУФ<1Ошƒ_%х§SязškЩљ!'c2›И…ДL'BќBBхuхNЋbФЊлk]^C­џ{Нбj\її:„аk]_s{‰ЙфnЇь8)№9ќЗzМхTMnэџzззЗмЎвЉzmыuєіQRiЬ‹ГшфЂZs-~ŒЏ\тсM…ЁbюБЙщшXє\zЎЮ­ч0Y]ЋуКёdўЩvСэњЗъ$ 2X ЧnГaЖsЭˆЖ#xlBшдSxЮHY$BШqqЧ‚  КРё к?šІ^|LБa6uЕzх_+—œ^B<,АМSh' _‚?Ж‰ƒЋSщTјщнЧiФ{яф{+ЎМ0э‚ Гe–dОћћЛ'ђNР а4мбнйmџИФqтАЋu`ыНWїЊЊUЎ9ЉFђSwN™lІобН‡ЖъЧѓЛwьЁ=џLяшоѕжњSљЇЈЊЦпrГи-ФRЈлйФ-Єe:ЁЉеDЩЂТЄa…њB<…Е„хИ1#\Ž*Ћ+C•ж–"„"Єn/1—мэ”§'eЧёч›чИ4HUЃ[ћЏМ†…XѓOЬПV~JSѓтT*ˆc<’q o˜ышXє\zЎЮ­ч0Y]ЋcOЯш6c@Ћсв№#ЙGŒV#Bш№­УУл Ч "^FTые­Z…Ў -­+%-*к?:_—яъЄPэ4п3vO˜$,ewЪбмЃ\6зАа@NU­zLўXЄ,oЫи€XЧiФлx~у—П|\љxПи~ЋЎк9jЇr­f8€&c§пыЧ%Ž›оu:>Єуы‚ЎPф‹сеƒтНлчнs…ччўЛ№яљНч+ќЧnУg.цаЯ&>!§zњЌГ&wšМьeTwЄ,RсЇадj~ b ЇПФ\rЗSіœ”‰EЋЎzЕѓЋŸ€oЩЅЏб-)ЛR~№ыwЃОГи-n№дђЄQWœ<Ю)МёкШаsиsщёШs< Y“яOŸОsкŽйћФєiдцЇ?с‰n№уљm?Cёє€Џ№ЏД:@ хK‡ЕvєџюћcЪкСkƒХСAЂ 5ƒжљ=…Ут „ЊыЋ…\сŠ+/сAX=hu (0XМ~Шњћ‚fjёвЧЇ?Ў|;Ђэˆчк>їWС_џ§Cšf$Ч7bv яінхяBп]њЎKXфЩL†Г‰Oјрє—K//юЛxaŸ…в‹г*рОЯЮюИД!4Їз!Wјіo#„ОКј•лKЬ%w;e?РI™@kдЮ:<ЋЌЎlеРUJ?ЅлнrъЮЉ”н)vЬОoмО)ЩSHѓа˜з•ЭџlЦУ› QојмШаs}‚GžуiШъ~}ZkдfiВ:‡uЖк­ПоќOќэжof›™Яс_*НTiЌМkЌѓVуŒn3Jч–ж[ыЯЊЯ:=ІЬ9јз”ПтфqšZЭлЧоЦНаS^LqгАMЇ&Ÿ*в­њk•уЅ-џlсА8szЭбЬефUх}tіЃ”6)6ЬцVМ­[W\н3В'†АџЊџ‹я•š’uџ]—>>_ёЂŒЉGђУЙ‡зYoБ[і\йƒњсЪ†nрsјЬЗ`2œM|‚Ў^зk[Џ9НцŒKЗ№Љ…|_oв_*Н”Q’Aм6чџ:ќпœ^sŠkŠп?ѕўЪПVКНФ\rЗSіœ”1ZЮmX1`Х'У?Гw }L8™ђЙнЯL=ИэЙmAЂ 5џ]уњЈCe^в0‘ЫцОеу­’Й%yUy+џ\9Њн(ŸzЎO№Шs< YYЃ'ЮмЗƒюэЭxzWіЎ/RОш ь`Д3J2FюIѕТІшѕzW]дjЕ\.ЏЉЉБйl\.W.—[­VН^ŸсaVUUe0иlЖT*%Е0†aеееuuu!™L†gУo4!\ъu2/M<}ЇъNЃJ* %M€ыpB1лjХЌŒЫcjэ~2BH$сдVлќƒХdǘ1БФaГG­]Щ'NХRvqўНhџРћ .“sя”ж;І„lЅзZ§ƒИz­UшЧfјсЬ[M&“RЁdГй555UUUJЅЁ‰Аищ”И+TŠвjЕеееЎAЄйlŽЧBЁаd2ёx<›ЭVЅЋ‰Dl6лd2сэын%G'ЁЩVSSc6›C•Ё,KЇгUWWЫхrz]ъыы n™ђŠr‘PDœjЕZЅR‰ЊЎЎЖйlxXЏеjI-Ќзы-Khh(‹Хвыѕx"~chЈ—ѕ:š€ ьїpLЮеk­!cэНѕic­ !T­ЕЩ9ŽдГYБмlуѕ‹ќ_NІбцА ƒУН/8цђX6Ћs$,W№ДЅ„ЖдТєЧbТМћРˆ\.чp8,K*•š-fютp8rЙм`0щDиm6›Й\.ОLу!/BЈЎЎŽХbс7т!Џw—\уiЊlЕuЕ‡Эfћћћу УєК–СьЉЁ ƒуэЄЖЊЋЋУѓАйl"hvКбгzЭ Рƒі{0ˆЇ9Eyfџ .іП€˜Уe!ВZ0НжъєХh6=–(rŠ›ЃmЧЈзfХ\sJќ9e…ЈЌШŒ!$–2§lHMЕ*3ё6Ёгѓ]џ“БX,цЋм‡8АйIіm“nžUК*<š ”жддH$“йˆa˜—kЄ)!dГй4GКмg Cйь6ЧлЩн!iЂѕТцi€xњa‚/`ГXHWauмз!–pДe›хјУ.!?)ЇІкDnXНжЈф9žњЩH–х žІРЭg(aНС^Uj‰ІиiMF7|;/~РasHуiЧ ј1›ЭцrИƒХb‰D"Н^o4Й.‹ХbБX^\rz0 ЩЦas”J%iШыVКч 6ЧёvЗyЈnєД^Gѓ№0ХгБђи–i&Y ЇМШуА1ZТ.ОcVD№œr‡ё n™и,„AЯXkз–YЃZп]У6жйuV™œƒвWй*K­1mIЖРЪCИrw;=0 йm˜Љо^ЋГUWZCЃљЁЯўњЯfГ- ёъ›г)BHЇгслt:X|яGёжvЛнjЕђљ|„у1BH(ъt:ќ<БX\UUEМTчн%ф№$M6‰DЂеjёНј~AAAєК0A,ЗWUU9^"ЄђѓѓЋЊЊ ФїOуq0.Оv^UUхQНN&рaŠЇ[,29ЗЌа"rXŸЦeЮˆиQ­хХ–’3ТафА У/-Д”š1 ‰$ьшx_рqŒџфИуяЧЖљіїЦЅRiiY)fЧ№ˆащ!Фчѓ5Ѕќ›ўўўNЗзззѓљ||aиёyЋѕеxј(‹uе:bп‚w—ЁЩ&“Щєz}Yyў%™LFмEЏ =ўўўUUU%%%,6K&•езз“8LV]]я6!ъН{ЃІ!$yVЏ“IxААFOœЙoЧfвkпцшР@€+ЎŸћpЂЊЊŠЫхтыФŽЧ€O“@ГжЇ)С—:кu?pъыыƒƒƒ]Ÿ›ˆЇ!0}qќ™ЧcРчцрпЏ<І1>ё4@< ФгMЩХ‹Е_•џPPSS“нЂTnЖ 4Зжi!-ыЉš˜Y|ЈN3Naъ„‰рQ2Фг@ѓЂЈЈ0**ЊK—.`Šf;ВР0 0œТиO{†FЃЩШШРzlcc4ж{њЃ*€ЯЁŸ€az†Ш†В‡Ђ[кpкўЃ@хrOЛУАŠŠђШШШŠŠ УР нn‡_ў€с"рћгnаыѕWЁPhЕ•zНžъYПММ\ЃбX,‘H‹џф5†aХХХvЛнпп?&&†Ус „L&“Z­ЎЉЉС0L*•ЦЦЦђx<ЇЧќтQ˜Д|Ї‡ЄШШШВВRГйвЅKЊЊЋЋЋ‹Š ыыM</,, џYЛнЎVЋЋЊЊBrЙ<**ŠЭfуe:>‹ЇЎuj+Е†………)•JнI вF0Gœž\…Ё)'::кеАЄBвX#::КДTc6[„BAttŒЩd*))1›Э"‘06Ж•H$Ђ1ˆ“.єіqmєˆˆˆввRЛн.—ЫЃЃЃqНH&'''$$$00Я`6›oмИ‘˜˜X[[KЃ,Љ=‰ЋTžь‘aiД#-„Ё[’жх[:сЉжvЛН   ЊЊŠУa+JЊчvRСЈюЅ2i{‘ ь:Ю0ьnйЪmk’ ьVM*ГxнО4тЙxхЪ•Ч{ яз•••AAA!ЃбxћіэЄЄ$ьCЅ>щ€ЯА8œ6p ѓд=јдщфKEEE„ОTv ЫЁIєк-iЈyЈЇH&3 г-Tк5|tur9ˆЇ‘ло‚ Q”——SХг555mлЖхrЙЅЅЅЊ„„v!Fc0кЕkЧсpдjuQQQtt4B(777:::..w ТТТV­Z9і@ЇцЄ*п‰КККvэкsЙ\šЊямЙэяяoЕZKJJ№nP\\lБXё ХХХ‘‘‘єfqЌЋИИИоXзU\\Œg№H*нisРRahЪ!5,НЄ]mкДхrЙЅeЅЙЙЙ2™ЌM›6јЉJЅJHH 1ˆ“.žV][[лО}{„JЅ*.)ŽŒ д+,,L­VЫхr|НЊЄЄDЁPp8њIэI@уЩ>1,ЗЇrKвКМsЫj]\\lЕZ“’’№ŠHЫЄŒЩНnл‹T`зq†awѓДщ[“оСhд$5KCк—љ.“ЩjkkE"‘йlVЋеЇЖЖV&“yj*ѕI…aиŽ>NНш|вЧ›rъєД‹9ХЪЁŠŠ NG“шЕ[6ЦPѓАO‘ g@Ї…TЛ†ЎN.ћ=§BT]]ОЄ'—ЫыъъL&iЮшшh>ŸЯfГ•JЅС`$zTTTŸЯчp8:]žž˜˜(•Jйl6žЎзынJBZОQQQ„УQUЭfГ-‹еjхѓљ111xЂVЋŒŒфёx</**JЋеК•ЧБ.­Ж’Ј‹јЉ ‚F0GюП…DšrH ы‘Ё˜˜˜Л…(”6›э^™ ЅС` 7ˆ“.žV…ыYEЋ—L&уpиј3z}}Н^_­P(мжHjOOі‰a™И=•[’жхЗєTk­VK4еo!Q Цф^ЗэХpРaино”SЇЇ]Ь5R,//w ы]НvЫЦjі)’с xˆBЎOFWиясСтДеjЭЬЬtL!]Лuœlˆmж‹хъеЋЄOTEEEƒСnЗ3н—CVОŽљЂЊ:..ЎЄЄЄЄЄ„УсDFF „ЌVЋ@ Р3ЋеъVžћыКwЛwPA#˜ЃїзK" M9Є†ѕHHЇB7,“ЗмZв'nщЉжјЌC4…п’ Цф^ЗэХpРaин]!žf„нnЏЌЌLJJ",h2™nмИNьRu;ЛЗmл–h0‚МММШШH‡cГйВВВ|.їЛ­‘дž^{ВЇк‘ТА!Hыђ‰[zЊЕ“ћyфTїRДН ЬА›јЄ:•šTfёIћКm‡#ЊЊЊиlЖППII‰NЇИœеEЏОo'†элРЮ'юб”SЇЇ]Ьёс-///66жQNвDЏнВq†š‡{ŠєbЄвЮ'ЃЋ#Апƒ’ЊЊ*???ЧЧ@рч'ЦџКЧ„рр‚‚“Щ„a˜бhЬЫЫУг1 cГй,Ыd2ЉT*з9N}}}C„ЇЊ:??Яh4тO„ФsЁо”SЇЇ]Œ ЎЎЎЎЎŽx Џ‘4бkЗlŒЁцaŸ"͘ЉДѓЩшъшrЌбgюлБ™TˆostUРчцЭ›aaaRЉд)ъН~nпЮ•Ы™Ќd?pэ@0р‘яТ ~sУ‹A€ЦТѕihюДiгІс…`VYYY_o’Ыхžv-J0 evaPš3O@‹ ##ƒЯчЧХХСяЅФг фђsh>Ь ƒ*ХјОxнњєфЖ`  жЇтi€x ž~tшЪКшХЅІ/љжeуТ љCУ/їфg ПМ 5џц%cгиСгr).х_ЏoЄ*КВ.Кўknžгvтiй=рјоЊ7нLъсЗ§|Т_uЩ;ўIшаЫoц[ЧїV=(‘hxcyФђi* k,С.`]œў=ТMп@вввЅЎдdъ—ЧRDђ_xKёй‰је3 дЙІц&№ШWƒЙ<ж­аvЄ`€xК%’qІцЅю7žfЄФfџМэОHQ[jYѕFС“тЬAЪKЋgдьŽWёJз 6+іљћХУ".їфgŒlseя–2ЊЊw}\:v†тБ$‘SњcIЂq3Л>.%*bXІw"бXР•E_Х|ЙЄX[jёШžT‚1ч_Њ'їМбл/ГЗ_цфž7ўњ­кIkRS0Боэ+ЦЗ†нъэ—љt@жвWT†ZЛ[ UЧQ zi™{ˆ8­ПzЗЋбh–.]ъzќhгr4 žі%7Г яŒЭ{љнагеЩыЖўvЕцьЁ{ЁЯ‹]Ў'ѕ№;ЅэИ3Ѓ}оЖy~‘уНјzЄыЮ„хЏ№…ьџЖћЋ.yщw­~и\ўгWфAъљ5Ч&|Рш€ѓ'j<-г ‘ш-рJфc‚џ›Ѓ\7Ла#{R ЦOж,zчхwC—u<^жqв;ЁK^О“qІ†сэ4ъо6Н1шVџбђЃ%~+x<Б›xщд;Žї’њН:nЅeю!Šœœœ„„зуG›–Ѓ)рќоИм.…~НBѓъћa§F „т;ˆцmŠў~]щ“ЯјуWvРьљŸFj{uоІ(З•FЧ &ЯХ“zјЭџ4zУмТ‘ЏЛцд˜#P…­ЅjГez*НHIMSNyтЦпGѕН†Ш<ВЇЇэE„Њп­-}sudпwŸ=њ ЈЎДюXSкЙЏДъљAёФЙЪч_ЙkЩQЏ…дVл7Џ{сnЅm`kœ={іФ‰zН>44tТ„ јj4ўџ† œNёу”””SЇN™ЭцфффБcЧrЙw‡‘ДД4<YіьйгёxйВeЏМђJhh(BшŸўщоН;BHЃбlнК!фХЅE‹ЙU!dБXіяпŸ••…JNN5jЧУЅ;vЌk~Gl6л/ПќђяПџкэіСƒїызІ"'­ˆЇЩ™єуЄF•faЧ…ЄщmкДyPЂZ %тЖŒ?jg­‰$в“ћHІцсЧ&Ѓ§ѓХХ'~Ќ*+ДиЌBˆЭья)“ƒO;єђЛsЃОŠ4АLšлi,@›олѓŸ1y;3к ХїХ‹в˜Дз•ѓu|ы˜ђTŠПгŸ МSџмёš7WG:^њbаІwŠтnЅѕ•‡фццІЅЅ‰DЂSЇNэйГgЮœ9x M„ХNЇ8Зoпž?>BhзЎ]‡NIIq Cѓђђ&NœшxœpћіэааPNЗџўŽ; ‚мм\|AзЛKnеA:tHЏз/ZДУА;w:thФˆ4љ9rфHIIЩМyѓС‘#Gш+rдhQР~†R]iM‰Э&6Ті‘dжVл№KkоRЋrLkг;­ыtыrовйngTfP(ЯёT(f›ыЩя цо&щА№ЖIХїЂLOEЂБ %‰П џrI s{6„кj[@Ч1Х?ˆ[ЃЃ,йщ $4ъы*ЌAЪћžKOНѓЗв6А5 ЦŽыяяЯчѓћѕыWXXШ№ЎQЃFI$‰D2rфШ‹я§A€Лѓѓѓ•JЅH$r>~џў§Ё .ЄІІž8qЂWЏ^*•jТ„ †yq‰Ё:555AAA„Р555ЬезыѕФНn+rдhQРњtCщкOzцgщ%Г уёYФщЏл+I€УВл0ЏkŸІиГЙьіч_oЙ}ХИwKYjšвŸ№P$ аУуГ|Нbzc$CSZCl•дУЯёЁП~ЋNъс‡Ч$ГЯе9^=јM%У’{’пЃuL9К[ЫФhдЁ—ж#6ќњМVЋ%ЄR)УxZ effђxМФФD›Э–ЬчѓНЛФєyO*%ЎЌЌ$˜ ™LVYЩд žhБxЖ>Н}ьv0™г‡ЭrK bїyж!”}ЎnчњвПЕF=1TЖ~NсЌ"x|жЁяЕЙй$ПYЧџћЈў‰aў,–7ЕGЗЮл5}РЭЉ Уњ TђДeжSщU[—•Ьл/№ЂLOEЂБ“07Љ‡пОЯЪ™”ж[Н8GБhBО Їћ@BшŸњЭѓ‹Vю‰УЏŽ}CБzFС’эБm:‰ЋЪ­ПЎ(ЪgњЧ„W‡Пв'GтЯ8VŽ:ЖЇъкПїBs Q‡^кЦF$•••) вS„аO?§”ššŠtщroУ:Огк`0”••ХЦЦ"„BэкЕ;pрР€B]ЛvнПŸ>}r нџ$)ЩЩЩщщщјzvzzzrrВ[ evыжmпО}ЉЉЉјўiЧ-N8i Д(`}КЁФ%Š6ўкњзэ•CТ. ЙДmYЩи7B№K ПŒЉеY‡GgЮОњOнтoHцкЗVGЎ|Н ;Чћ_Ц’јЩбјЫџ­иэFoПЬџыr§вйк-Gт‡ЄzW Ї"бX€ o, wќd5Mi БUВ…_Цl[Ўri`ШЅmЫ5‹ЖЦtэwwЉrдДрБo„,|1џиГе‚-ќ"šaЩQ­[ŽЦлS58єђГБWЎœЏ{ok q•ЦhдЁ—ЖБщпПџКuыˆ"NЇЁИИИUЋV}јс‡~~~У† sК§ж­[qqqЧщ!”PSSгЙsg„PчЮѕz=Б кЛKL>|ИT*]КtщвЅKe2й№сУ™п;tшаааа>њhщвЅrЙœ&Ї“І@‹‚5zтЬ};6ƒ!`‚лѕрН{ї*•ЪО}ћ:?кДMWр}DJМ^0€EsјХђœœœЇžzЪѕјбІхh @<§…&№аёо{я‘З­€–ьŸР|ћЉ€x žтщfЫЛOLћДАѓ#,љ#уM ˜њбрЬ™3бббєЭбе2™пK•ГQнЏ9ћv3ьЖЄeњЊЂFu&}№Јi ž†xS?мвЖX‡™7oо–-[0 пƒР№ШDМя<ш‘OЛAЃбܘ1ЃUЋV|>?00pиАa‡ЂПх!мцИ˜št4'ЄЅJoiScvvЖы/ я=ђтЕДв‹ўхжDMпw{дb§€€€qуЦ•——7БЇA>ПыђхЫєцЅ’œ80›ЭГgЯXЛv­[ћ0їRвVhˆ4A 6S“v.'ыQ•щ*€ы<сК^т$Їўь“w•‡J пzІы2§€м.ъh4л2Ящ=ŠОљёд%\ХѓдyЎnл”г •šє“іwЗг}fЇ2iœФбЅЖ&Љ:>؄d•••~~~ЄZИVWYYЌеjoW(Z­жЃpтi:ТТТ НŽЇчЭ›7|ј№ввRF3tшP"§У?4hZ­жыѕ“'O~§ѕзёєррр}ћіеззЋTЊЉSЇвдиО}ћпџн`0шtКYГfНјт‹†9rЄ]Лv6› Я3uъдUЋVЙ–€‹TZZњЬ3Яќч?џЁ !4~ќјВВ2 п~ћэgžyWsШ!sчЮuМЗАААЖЖvЩ’%нКucЈ&Љv4RййIqв{=оЕиQЃFдее-_О\*•Ž;–8эйГ'НyЉ$',X0x№`ЕZ]UU•––цж>ЬН”ДтMа‚ЭпдЄЫЩzTe’ Р0F$N=ѕg_ЕИ“ІІъ\N™ЉЪєењДGўьЋw]Ÿ&ЃQ=гэ€мФ.ъVfЗуЙ[ЂŸJМs цё4“к›ЗmЪСŠFMz;žіnЄ1‘G&mр,ьщ4MUнбЃG{ѕъ…aXЯž=322Мчрћt,YВф“O>YВdI~~Оеj­­­=zє(qЕcЧŽkзЎ5 EEEгЇOwН=55uЮœ9хххeeeiiiDњєщгЇM›vћіmЋеš§Т /љЏ]Л†?m[­VСŒFЃP( …љљљгІMs|о]Иpсš5k-ZФчѓIяХE*//Ÿ={і„ шErеШUТёуЧЇЅЅ••••••Эš5‹ъ^њB˜hGS Љ™рЉ№оAгтє’Oš4щЭ7п,,,дщtГgЯіТ>T^Jк qƒе‚ЭЧдT‹ЁqHРџЖшUКЇўьЋw’‡‰>їLЗrЛ(Cлв CяQLЊhМ!ŽЙ‚ЭЙл6х`EЅІ[ЈњЛЏ№­“PЉуЋQ 5ьTе {ьи1ќRFFFrr2—Ы‰‰љєгOIпZ}щЅ—D"‘BЁp|!дfГ­XБk5)) џўW†ЖmлrЙмФФФ#GŽау_=\и?PDАч=Њэеy›ЂмV/˜<џю’yRПљŸFo˜[8ђе`OЭuіьй'Nшѕњааа &DDDр+šјџ6lp:ХSRRN:e6›“““ЧŽЫхоuЫДД4<ŸѕьйгёxйВeЏМђJhh(BшŸўщоН;BHЃбlнК!фХЅE‹9ЉѓћяП“ FšnГй~љх—џ§зnЗ<И_П~NЅ‘fАX,ћїяЯЪЪB%''5ŠЧу9ншhзS„Z­оКukџў§ћіэыd+зAйэіC‡;wЮd2%%%Н№Т !TYY™žžž››kГйтуу_|ёE‰DтXЏSлQ•я$@<}I?NjTiv\HšоІM›e ЧE\c§ЪљКЕГдпЎв нfhHЄqvЦЕГжм ш“ћHІцсЧ&Ѓ§ѓХХ'~Ќ*+ДиЌBˆЭь/)“ƒO;єђЛsЃо сsssгввD"бЉSЇіьй3gЮ<†&Т/ЇSœлЗoЯŸ?!ДkзЎУ‡ЇЄЄИFЂyyy'NtŸoЗ3Шя†ѕAAzНо5CNNNлЖmŽуууїяпКpсBjjъ‰'zѕъЅRЉ&L˜€a˜—˜ FšЎзыƒ‚‚hд$ЭPSSC$еддxдЇOŸюж­[tt4Љ­ЈZЄККzХŠD‹EФСПќђKaaЁйlvLЇД|'тiр>ђЎƒУx 3М№–bЮˆлƒЧЫ§ƒю3ѕ7+5^тЃk?щ™ŸuN;4pЬ&ŒЧП§КфЛ~lЫnУи–wЕ“†б DЋеуRЉ”4ž&vAЧ (((33“Чу%&&=z4;;;88˜ЯчуБЉ— Fš.“Щ*++CB(ŸH3HЅRЂДЪЪJRѕйlЖйlЦ%ЌЋЋsМєц›onйВE,їяппеV4’Ьž=лппп)§лoП}ўљчлЕk'L&ОЇХ ˜Ш-4žо>v{K6VНС~х|нšЗдЉГ 3Д~\4љхдо9Џ/ я6@&–Аo^2~ЛJsr•зёєДХa3†мˆи}žѕGeŸЋлЙОtуo­BO •­ŸS8ыЃŸuш{mnЖбѕіˆ8ўпGѕO ѓgБšШn"‘ЈЌЌLЁPž"„~њщЇддTќ K—{лЧёнКƒЁЌЌ,66!фxŒjзЎн €ъкЕыў§ћћєщгKшўЗ§Ј#Mяж­лО}ћRSSёэбФŽЂ@в ЩЩЩщщщјъxzzzrrВЋ$ЇNъзЏŸбhLOOwДm@@Р›oОљЩ'Ÿиэі:й‡Š'Ÿ|rЯž=ЃF ,--=vьиЄI“B‹…ЫхrЙ\­VћыЏПКmJRЪ-4žn8~mƒ/dЧЕО8[ём”`цFНвњqбŽ5ЅЋо(аWн§НёOOxџ†e\ЂhуЏ­З,(^іЊЪfХ’zјMœЋФ/-ќ2fХkЊсбй<>ыщч{ъЇ,ЇлпZЙђѕ‚RЕУPгќЬxџў§з­[g2™№шащ!Зjе*ГймЉSЇaУ†9н~ыж­ИИ8‡уtŒJHHјэЗп:wюŒъмЙѓˆmао]r65…`ЄщC‡=x№рG}„ќSЭ:u>|ИыуЧпН{їбЃG§§§xщв%ЧЋўўў3gЮќф“Ol6[hhЈЃ}Ј8pряПџўЩ'Ÿшѕz…BA“ššzрРoОљЦппП_П~™™™єMIŠSР#kєФ™ћvlC зOF8Бwя^ЅR‰ Юё ЗUK–šXŸІФэw [MГ€MONNЮSO=хz алЊ%ЫM ЌOЗыгЭјў4№€`ˆЇтi žRXMіСэцW{3QЁiŒ№0šњp‡…3gЮDGGƒСЩ3Љђ?pƒ7Ÿї•$рУЭЭbOŒђ-b˜†щчQ2ОзЮ›7oЫ–-†AЃ€+Тј@<нtЯ˜ХASŸг чѕG5д№N/†^gsЪМfЭ.—ЛfЭšІЏиlЖППrrђќљѓЫЫЫ›В/“Ё‘œЪыГГГ]4 †И8ф:ЕfѓїРЭ ž~dћX†иэіЯ>ћьу?ўќѓЯэvћџГwоqM]? {Šl" *(ИЋзыюYл:pUбj[7ИGе:к*`]Е­b‹ж *Eйіо$’ћўq1ЩН77Dё|?|к{Ÿ<у<ПsrяЩу“›wvН’H$YYYG/_ОD_Ш" љ|>ъ€ цгMŒX, 277зее0aByy9Yž––6|јpmmэСƒчххЬ‚єЇмGOйђ;wкккrЙ\†Q._ОьююЎЉЉщррpфШEѓjjj–,YbfffbbВ}ћv•ei‡У9x№ ƒƒƒІІІЇЇчуЧ)g*7ЂbК!œœœтууЩ 'Nœ тууœœи(РсpBBB,,,єєєfЮœ)‰˜?§KO•іLщbEн(]” p8œЂЂ"33ГттbiЭЂЂ" ‹ттbЕУ@@0mк4===KKЫ6ЁHˆDЂ3f eYыc•Ђkњєщ.­№њѕkkkыввRйV”=гйO9Šl:ћЧеЬ\О|йФФdС‚ІІІ‘‘‘Ьs‘ђшбЃV­ZэоН›eьQЂЇЇчщщЙ{їюYГf­YГFšGЮ™3ЧШШШШШhюмЙBЁ§ЌщкЪE>›%7КK\АБRY”Ц’мдšіbN_SF‘JгЉч…‹СHцЫ/5(-aB6ЅtоTu Ѕз=:KX†™ЊRгеW)fоШpttдввrwwŽŽ>~ќИ‹‹ ЉI\\œїЪBКYанЮXN„R1:Г|уH§ cІЬ'фжЇIж­[чыы›žž^VV6}њєЯ?џœ,oпО§ѕызЋЊЊJJJ-Z4iв$ЪNшN`ќјёyyyЬЃДlйђЬ™3BЁ№еЋWГfЭR4oеЊU~~~щщщХХХ* ЪвВZFFFEEХwп}чээЭ sЪ!Ојт‹§ћї‘žžn``PVVFФО}ћОќђK6 РаЁCssssss‡ ђеW_)ЮšR Ѕ=3ИXV7:0ˆ6oоМ-[ЖHкМy3йPэ0eщвЅ#FŒШЫЫЫЫЫ6l˜RЄЫ—/'•ЬЩЩ4hД\mЋ(]щъъ*‹Щ:ГfЭ’•‚Ёgћ™€Ю~6яцk…lх!C†=z” ˆcЧŽ 2„Э\~ћэ733Г .А=цыеЋWЏ,--Ѕa0dШв›\ЖlћYгЕUМ€а)&wЭQ|#а›в(•ЕM,ЩvиДs:ијš2Šд˜Žк.†>е{ї)Н‹1ЁEч>URzнЃГ„e˜Љ*5]}•b†сB7zєшзЏ_WVVnмИбРРРпп_zкН{w5ю”…tГ ЛБ™btgљЦ‘њѓi%ЁЃИїУоо>!!<ЮЩЩБААPlXYYiffІъ%833SZN7Š­­эоН{гггщlnеЊUbb"ѓ–nP–6@aaЁtІ<M>Mй„rˆ_§uќјёAlйВХЬЬь№сУAŒ7ютХ‹l€ффdђ8))ЉUЋV,/ЃJ{fpБЌnt.`эљѓчvvv555Aддд888МzѕЊ>a ‹Mjj*yœœœЬ>SБББ‘*™˜˜Јє- ЖkМННOž<)-ЌЌЌ”kHй3ƒ§Ь@g?›їKRSSЭЬЬAРЬЬ,--y.ЁЁЁжжжїяпW)і˜M­ЎЎцѓљфБЕЕuRR’д›666ьgMзV.ђйЌMаНш‚M>-kƒвX’эАi/цЊ^g”О#д˜Žк.†>е{ї)Н‹1ЁzEч>URzнЃГ„e˜Љ*5]}ЕcFNЅ‚‚iчrЇЬwv:” (; К뛉а)Fчq–oЉ1ŸVy}šЧуЩ§ѓY~ћіэоН{ыщщЩ•ГПГхў§ћУ† 355utt$ѓK9444ШДLAYкРВ76PQ^^nggGD‡~џ§їž={aggWQQСFЈ­­%kjjг}:c”іЬвХъЙ`№рСсссA„……д3 фь‘„}ІBзPmЋш\sсТ…ЖmлжжжNœ8qЯž=Š ){fАŸ9шьgу8–,[ЖLюгјђхЫ™чтшшDз!]ь)]ŸЖВВЂєІT6ГІkЫ^ЅQЇv”ЪUPKВѕ›іb^_SFQ=ЇЃв…‹ЁOѕо}J-a„ь#ŠeCЅБЙ)SZТв/ЊJMW_Ѕ˜QѕVЮвЫ”6PвЭBеЛ›ЧйПqШц˜OЋœOлккfddP~ /))‘H$фŽ=КkЋtЩЄ  €Ўн($‰ф?ўўKЎвOі,eiCцгtC|њщЇ]Лv%ЂkзЎ.\шзЏKd?ƒ&''ЫЎ.0ЋЁДg–.VѕŸШƒ+WЎєшбƒ ˆюнЛЧФФд3 фжu(?а+ йe•ЄЄ$Ѕ^Sл5‰ЄC‡Ы—/wpp‰D,{fАŸ9шьoЈѕirAњХ‹в’ДД4щr5х\ ==нЩЩ)$$„n‰‹2і˜M]ГfЭ”)SЄa ЛHfmm­вњ4eлЬЇщ‚Mi”*О™cIЖўћp1WЯз”QTЯщЈtсbшSНw›ѕi6AЈ4ЂиЌOЋ4›Ѕ%,§ЂЊдЬёУ2fъ™OЋq+Є›нэŒЭDшЃѓ8{‘Эёћˆ*3oоМЙsчІІІжжжЦХХM˜0AКM^[[[[[ћХ‹sчЮ•ж766&џ‰‡ЄSЇNлЖmЋЊЊЪЬЬœ7ožЊЃLœ8ёйГgфgїккZХ†гІM[А`AFFFIIЩтХ‹U”Ѕ tШЭД>C 4hЩ’%“'O€I“&ЭŸ?ŸмvЦFXВdI~~~~~ўтХ‹ф^ЅSCiЯt.fуЅјљљ•••…††ъыы{zzЊ”_››0aТвЅK –,YТ>'NœH*™——XџрЄs ‡УYНzѕж­[ПўњkMMMJK{fАŸ9шьЇtœbT+§nbxxxЗnнЄ%­[ЗіђђŠˆˆ`PЉUЋVQQQ?ќ№УцЭ›еŽ=’ЊЊЊиииРРР~јaнКudсјёуЩ}‡‹-RiжtmК`SѕкЅ4–оŸ‹9],Бё5eЉ:њ\ИњTянЇдUƒ.Ђ”оАдˆљКGg Ы0SUjКњ*Х дяБ}*н#( щfAw;c™ЎP*Fчq–oЉq}ZхѕiБXМiг&{{{>ŸяююNў=AчЯŸwttдааАГГлГgДэ† єѕѕЅЇ111žžž<Яоо~џў§tŸxшF kлЖ-Чsss‹ŒŒT4ЏККzбЂE-ZД011йБc‡JƒВДЎ•мLй|ŠЅ"66–Чу‘ћ§sssy<оуЧY*СССфї|ЇM›&]TъЅ=гЙXnv”.`ГЈј№a.—{хЪѕТ ::КWЏ^”П&OžЌЃЃcnnОqуFщ†ZЅQ!ІNJ6 Qъ5Е]CФ/ПќтььЌјМ =3иЯtіS:N.ЊщD–ХЫЫывЅKr…/^$ПРD9iџйййЎЎЎыж­c{rђ’wA}}§N:}ѕеWЙЙЙвWЋЊЊfЮœi`````0sцЬЊЊ*іГІkл€ыгtСІ4Jm`Ž%йњM{1Ї‹%6ОІŒ"UЇSŸ CŸъНћ”ZТ2•F7Uvі7:KX†™ЊRгеW)fш‚“хњДJїЪBКYанЮиФbtgљЦ‘њ—3fЪќ3?юХUgЄй@nrњ'юууГfЭš~§њ1дIHH2dHZZкћfќˆ#&L˜0qтФFъ_"‘№љ|БXќDFšі]миБє.пА‚СљЁмЮx7в<ИqунK‹/^Лv­H$ZКtщШ‘#п+Г%Щ?ќ’’2~ќјЦ%66жввВQEF>’XzoXСрќрng˜O#HѓЧСССХХE(>\КЁі=ACCУСС!""Bщo‚д‡^НzmйВ#Ёyѓnb AМ)‚ћ=AAD}№C<‚ ‚ ‚`> ‚ ‚ ˜O#‚ ‚ цг‚ ‚ ‚љ4‚ ‚ ‚ ˜O7ЅтqюЯ$bъпH~*X№bѕгюš1ƒЌŸЎšј"щ‰@Ж‚чЫjВяфїŒ ‚ ‚љ4Ђ„_(ШMЏО~ЖDёЅЋЇ‹П№MrяІwт^ЛшJЯяЗыиCoўРфЋЇ‹пЯЙ4^іј1‰tњ Ѕ]?ЙЮ‚ ђОПчЂ‰ЮШпrкёаwYОуLd_JO…Ь}№†KwВФМ•ц„…цо>ŸѕKjзYзжI DAiЦрњДrn],iуЎгc ЁžЦг*e_:Е3зџKsi2-ЅЛЮИљцЇvц6’I1QхSЛ&єдŽцїыВ/хжlљтu/нX_‹'С_ОVId_%ЇН8Ш?iЙИ–8И6kАЭгюš1Ѓ\ў=šз€жЪ­ЇЊЗМš““Г~§zХуїgAњcSAA0ŸnТvчš€џ—f'wМ•"пЛV>`Ќ1eЋўcŒя]+o {’W­№O›dyГдsЧEЇуС9З/•J_дхЙ{7НПŠ:Œi_Y&оЛ2SЖэCЂ љ_ђOZОёГзšкмИFWzЎџЉuјоќѓ‡ о+/$&&ЖkзNёi*/ ‚ B‚ћ=”ќTPQ*юмз>љŸёŽХY/Ћ­4ЩWs^WЗjCНЃЃU­мєj5FTКПљшІœ9k­њ2чŽ:Ыїи§М=Зз#ђеЫЩsюЪ§vЃлЦ/пcЋtP;g­щ+-Щcїnz+їлэZ–1jNKUП}ћіЕkзЪЪЪ,--lllШЕUђПЛvэ’;%‡ із_UWW{zzњћћѓxuaHж!3ЙюнЛЫoиАaіьй–––pџў§Ў]Л@NNЮ‘#G@—ОўњkйЙž;w.%%E,;;;Oš4I__jjjЮž=ћјёc№єє=z4ŸЯWЊH$’K—.нН{W$ЙЛЛO˜0AKK‹a ЉrŠбѕ/‹X,ўэЗпŸЏ­­=tшаФФФКЯ311ЃFвзз7003f йюоН;fЬccc--­aУ†=}њ”y њшќрСƒБcЧышшŒ5JЉхуЧ766жддьлЗЏH$ђїї—žІЇЇ+zAA)Ипƒ‰sпчgПЊюЁ%Ÿ0]8R0i‰tэop§l‰ЃE†q§lIЗa•БЦЙ$wC ŠAЯŸKr71ЋskЮыъ]њн8ЙS]]]ђ@SSS"a›‘ЗhQї‰ХддДЌЌLБBbbbлЖmхŽЯž= >œ8qтЕkзzєшёъеЋ€€‚ дxInа/^ќілoеееРсpШђђђrSSSЉСххх,u(--нДi“ДŽДCКPЊsYY™дH) –ыщщI;”;•і/ыAA0ŸVNm qіћ‚‹/:HwK“dЄŠОєKžИШœЋС 4Ÿй+бgДБм#>RџœЭ;zЇQОЙхеЯ ъзЙ$е"‚Џљ_6іћ‰BХ:\ ŽDLp58ъN™Fз“ЂЂЂ–-[’”љДьЮђXKKЫдд466–ЯчЛЙЙ]Йr%..ЎeЫ–dFЈоKВ?~|фШ‘ЎЎЎZZZ"‘ˆм‘Rƒ ) ІФааpётХFFF,Њ†††………fffo}SзrE/ ‚ Ђf>}ТџФЧ#Эѕ3Хm=tф’ihеFЋЕЋіs%ќMь\Д—яБз?iжj+ŸбЦ-,јEyЕ+>В!{љ[;чFyјємoЌО˜ЌЅУ§фFwЗђфŽмн8@ЯA†;–d, Бсkr.§\”'PlnуЈљЯ•Вžƒ8œwЄЄŽŽN^^žЙЙ9х)œ?~тФ‰фA—.џm'ї WUUххх988€ь1ИКК^ИpЁџўрххuіьйO>љЄ>/Ь— kjjx<Ч+**њ§їпЅ<==Я;GЎgŸ;wЮгг“ЅНzѕŠˆˆ=zt‹-rssџќѓЯiгІ1 D' RуНННЯœ93qтDrџ4ЙхCmЫ•GAD юŸІ%lwоИљдЬјцвч œиbпчЇw*Іx'єж‹мхљ“лЁ‘ЮВЛЎAц‘ЯŠЯ~VG7нП;§~Ђp егfO~иэџEн2фъCі%ЕCэт†кХХпЏќцEіГ0ИецЯ_wеxєЮ~fмЧЧgћіэв"rЇршшИeЫ–uыжщщщ ŠЧHSyAAЬЇ?˜T€5kжP#MхAAdС§гHг AAС|AAA0ŸFAAЬЇ‘њРЉЧsž9яьб,ˆŠŠВГГ{ЏLzOмд œл„slšw0cˆ"‚`>ї˜ІБDioЫ—/ %•Чy}шШѓ{Ѕ?Њ цгЭ№j. Yи„ Ѕ k‰вотттlх]оMпх6ј цеЬ fЉўM’Н}ˆAўю/q˜X#ђžƒЯЫУ\‡-BЁЯчЃ3‚ ‚Ш‚ыг*#])сp8tppаддєєє|ќј1Y.‰fܘЁЇЇgiiЙuыVЪNввв†n`` ­­=x№рММ<КЖEEEfffХХХвЖEEEХХХRKjjj–,YbfffbbВ}ћvВP,™››ыъъN˜0ЁМММ>ѓ"_•Ўг …Т9sцЭ;W(JлюмЙгжж–Ых’ЇpttдввrwwŽŽ>~ќИ‹‹ йs\\ƒr#ЪZШ0:ЅGи(!!!zzz3gЮ‰DJЧRTOбfYшCZљђхЫюююšššGŽaі#нDTъ„В2Йъќ :Шe| ‚iгІ‘ЭCBB˜еcuЉ›ш|G7G•ФdЖŠ!Ш9e@ВŸf#ЙO%YœœœтууЩ 'Nœ тууœœиЬ…Ю5t.`yСQICAЬЇ›7oоО}ЛИИxдЈQsчЮ% зЌY“ŸŸџтХ‹'OžмИqƒВсАaУѓђђrssлЖmЛdЩКЖ-ZД;vьЁC‡Єm:`bb"-љілoуууccc_Мx‘‘‘AnкДщбЃG111ЙЙЙ:::+VЌЈЯМШuz‚ ШƒЏПў:+++999))щѕызВ?іqяо͘˜‰DBž^Лv-**ЊИИ8 `Ш!—.]К~§zII‰ПППT1J5фF”…atJАQnнКїтХ‹мммЕkз*K›йЦдЉSПћюЛђђђ[ЗnнН{—йtQЉЪЪ,хR;јъШ]ѓввв—/_ЦХХEGG3ЋЧоM”…”sTILfЋ˜†2 йOГ‘мЇ’,Мuыddd,XА€L|ЃЂЂ Ф~.Š*1\ми\pTвA„cІЬ'/ыrWіТТBђИВВ’Чу‘Ч666ЩЩЩфqbbЂД>•••fff mŸ?nggWSSCDMMƒƒУЋWЏd-iеЊUbbЂ\Зііі фqNNŽ………вЩ2ЯKv"жжжIIIR;mllЄu233eћ,((v%w*э™N 9щЄЇ ЃSZЮFy*Ÿ””дЊU+ЅcбЉG7]`Hlmmїюн›žžЎЊe'ЂR'”•YЪЅv№3ш <” лииЄІІ’ЧЩЩЩЬъ)vпбЭQ%1•ZХH”Љв4У}*ЩђыЏПŽ?ž ˆ-[Ƙ™™>|˜ ˆqуЦ]МxQ%qф,ЁsЫ Žк"‚а^Џ0Ÿfyгesї%444jkkЩуššЪNnпОнЛwo===щ?S2ЗMyЊ’˜J­b$ЪРSišс>•d)//ЗГГ#ЂC‡Пџў{Яž= ‚АГГЋЈЈPIЙAщ\Рвƒ*iˆ ‚љtгфгВkmkk›‘‘ЁЦdйdŠжжжВыXжжжѕЩЇщд`XŸVctЅcIћLNN–ЎCгХуё*++Щу‚‚UзЇe+зD"‘ќёЧ–––Ь~Є›ˆJPVf)W}‚Ÿt [[[SЎO+KCхгjˆЩ`›ѕiй€Tišс>UeљєгO#""КvэJDзЎ]/\ИаЏ_?UХ‘”n,•Ў?,5DaюŸnx&NœИdЩ’ќќќМММРР@Ъ:@[[[[[ћХ‹Вл|щкњљљ•••…††ъыы{zzЪѕ6mкД ddd”””,^̘,œ7oомЙsSSSkkkутт&L˜ је.Е?~<Й/3//oбЂEВЋЦЦЦф?:7ршtcЉ|~~ўтХ‹˜ЧъдЉгЖmлЊЊЊ233чЭ›Їдf61qтФgЯž‘џдP[[ЫьG†bп ee–rЉќlъ0(яЁппч6xЯжЛAiШ|:щпЪЏŠDТ”ЏЃoпmWљЏuт9ŽŽH8PS3eо—ѓ|]Г_ЇvыюнБЃ›m њ6Ѓ’%dz*їзTГ~—Йђ”С|ИR4[‚ЗэпЗ'%)Џz˜#‚ ‹ ћ=кДїОyе№е-3= ЇWКZh–r[psГ[r9„–ЧШzъТeлœ2щ3ЄЖЖІД>*еHS0ГAо%ЙЙyžPAipTXŸ600чяязЇ[кИжцЯЪДžg—–ф!фЄˆs_JŠ mлyhhhp5x<-Уњ˜хфтvёЗ?†лонгЃsЗљ ‹ŠŠШ—RSгfЭ™чбЙ›Ћ›ЧˆQў\К,m~КПярvэ;ѕы?№ьЙ В^ПўзАc\н\ЪBбxхЯ?ўёЈ™YЫƒпž5чГююЧ&OП§vУёc‡ыcићя\†Ыe Њ-‚ ђсТ3eў™ї2Єh$еее—ЏнrЉMu=П\lU\\фmNxЗЕќ7НЌИRмЮкЄЅFХKЋ~хЦŽ/3sкЗБ50nСуЉœЌKoNN.nW.џжІ#Y^ZZжЛO쓇ржЁѓk‘цŠm=мЛwOђєж­ш=ЁћЯœ>І~ГvuЛvmЩ— ‡{'њ/№?щѓysEЂRŽKЙЎL6;. pс|YЋfЮўL:GЙЅмКy9Ÿ–#лЁџјIББЯŸ;нСн тў5zаl8)**NLJINI524lлжЙЃУЏѓШYюфтіgфяŽŽ­)+зГg:чŽ=nqр‚О}>!_КukЯž}чЮF0ˆџўУ5kПМќ‡У€€Щгз~$ 1X@иЗŸя§Л3ODкCИ6”… ЂЩ9бЩХэц?[ЕВ€ЪЪЪNž]eOЛїьїфQ§ {џKw9R T6r5дЧuAф=mЪ{3њЎ‰(лU’r­Ъ6!3w• ‹ƒElJnJ ЧХ\ПЇєNОжгŠRУs3cAъ­vƒyF-ыc™lZfdd(ШуIу‡3h _ЇN{ішnii!­цхеYzьээ•Hџ?b”?o яЕ№ќyBюнъЏ#нн1!!QЮЊwяучЯ ­‹3y*=PфьљпЁ‹s›У‡ЊГcЧССО‘zІsnrrJЗЎов—Кuэš˜Ш,~зЎ^:КК7nмьпП_zz„bОUOƒ_ПN_ЗaгУ‡1 7†‰Haз†ВA4E'’щ шщщЩ Т1ь§w.нхHUхBAš>џШбBџif5W,ђrl™њЏоƒфМСf%іцї“ѓbŠД­MM ­А,пжwtЯ< HN€F=-ЃЬ!`UаŠУ‡нљчоЭЈ[›6Я™3ѓГЙГ™Л’HˆћwЃŒ бп,•чGНЖ\.З‘znXfЯš~јШбў§ћ;aђЄ‰ n№тЅ_ѕэг{ЫІ -Z˜бжЕ#ћЖ*…kcHЪьФwEяЁsщ.G‚ JП(сpcžФНЮЬщнЃ[э‹ћWПўФ ЇГƒсНЄМЫ™ZF-Zv2jhё_Z}jjnI&грмЮгРШЄёŒvsk?gіŒ=ЛЖ_8џKшОвђ‡cЄЧ<”ЎKЙЛЕПv§eWЎЎэюоЛGyћ$ˆx†vЛvmхЌjМБшpum‰IЩфЉє@‘1Ѓ† 4 ЖЖцТХ?~§эrbRrMM s’СвђњїLч\ggЇћїџSѕўƒвxё˜“ћјЮЛњ6ИСџўџймй-[šrЙмИИйLD CИжЧBYDЋ?jіž;ЗAо‚ ШЧ›OЗmящх3fФ_ ПxыbXнож№Џgљ‘љЦ[[5Эз72Љь>/OФwВЗ|7Яœ§Yєэ; ЊЊъіZЕj%}щ›яж?}W[[ћфЩгoО[?{ж В|бЂљ›Зl§увe@ юќswімЯЩ—>џlЮкoж§sї^MMЭЋWЏ—}D–[X˜пЛї ўwЪй3ЇЫZЕілѕŠuи5cЮBЅuZДhЉЉiв’9Гg@pШі‚‚Т‚‚ТрэŒЭMztя:9`\ЇŽn/_ІŸ ћхЏ›бYY9”•URЉž=г9wњдЩп|ЗўЩ“Їu/}ЛaњєЉJХзаа˜>mЪВх+?љЄЗ††FƒмКЕУЉА‘H”œœђѕšoekвMD CИжЧBYDkT5ь=w. uЁ@A>h”яŸ&џЁ“S]eЬщкпK.zQЉ5оCгZЇ№QЕ­Gпљ\Ў†˜УЉHНiкЖ?Сгil‹ЇN™ДџРЁЧŸhjjz{uйПoЗєЅY3Ї.Yž™™eeeѕхŸ zГ4еГGї§ЁЛї„ю_Д† ˆN:Юž5|ЩЧчгšккM›CRRR-ЬЭПќrYО|щтЅЫWцххСц›CŠ_I$[ фW^^Оpбвœм\ыЯцЮZГі;ЙšьЧRњЕQ\4ЧŽн“крч; xЫЦа}zїёБГЕ7oЮнЛї”|ЬтrьэьэBaJrкю3BБšЊ*еЇg:чŽ9МJ XМєЋЬЬ,ыЯчЭ‘>Ї‚Yќqўcvюк;aМcВeуŠ ЏЗoГА0Ÿ=kЦКѕ›”FЉ†p­…В0ˆжжYіž;—5о‚ Hѓƒэѓ=$‚ВТЋћ*^'з№лК8‹lН&fTъZфУерVVVЄ%>ВuьdllмT3СЏЬ#*qуЏЈK—#З…lF)аЙ‚ Rи>пƒЋchкž‰ Т†Џ]Ыг‰ŽКЋkц0Дg7Ўєєє;tю‹j" yyљлЖяDœ\мtttvямfmm…j sAфнхг$AиыWИZРћімь`Ј ш\AфнЃђƒf9N {їдќ>;‚ ‚ ‚ЈО> ŽmpЇ‚ ‚ ‚ЈБ> ‚ ‚ цг‚ ‚ ‚љ4‚ ‚ ‚4)uћЇoнМ†Z ‚ ‚ ˆ:љ4љ‹т‚ ‚ ‚Ј“Oџzў\3žaqII?mm ?74сч‚ ън- ўКqУЄщ~ЁAAi˜|кЦЦЂЯP,ЉебжврqЩ4šMМOA&ж:кZ††њЭ[|AA"ŸЎ­7уJ$  “™4‡У&мпBo–Ј ‰Єy‹ ‚ ђQфгЭ‰D„„ €рp8d:л4)5AH“jYлAA;ŸnопG$$Рy“Йr8‡CдэЙh8Р!о,QsЙ\B‚_EAљАљ0ž?}љЪѕЫWЎЋзVBH€мѕёBB4 Bж ЉmЭ•э;їnпЙпc‚ ‚МcКѕјф]Їђ~ŠŠЪ”д…EХ555<Яа@ПЋwчїZQЂюћф9офБ6­l 3#]ю”Rn‰мiFњ+ђИ•­=HOЅ5янЛпОНk\\™LЫѕC(Ў=PЗйƒУ!‚|‰DЬ<ЉмМќ№ˆГКК:#GќЯТмЌЄЄєюНd“+^+,*ž0ЮШШ№Ю{в~шЪеЃ>Н-[В Q=+Ч’Р/‹‹K~Пtх—3чё†*і IDAT§ЧŒДГkE&гчЮ_ДБЖš3k*\ќэђЙѓЧŒ.MЉ 33Ћ  РЬЌenn™L7ˆIёЯžїъйЧг`п!ю§AA>f„BбМЯчЗkыВ/t—ЅЅeNNЮ‰ўќ‹GязввRщ6zџю;Э­Uл?”"‹;ИЗ311}OфK555qџ&фх€ЙYЫюэј|>ќqљП_^фѓљэ]]ZйX@VvnbRJU•€|iшр М‘‰ЕЕЕёЯsrѓ%‰i “юЎ::к 6 ]&Ш$С‘ЯЇ †SЪЏ.]~8ztлж#GŽzuщђ№б#ЙjМy6A~ ‘„2ёџўћNMMя€!6жVаВЅщџ†"_*.)--M=]]п§ЄMшЪеƒЎЗъъъПnўœœ* `љв…Аuћi…хK’ЇŠ/™™Е9bЈБ‘‘ДpлŽНвšjУхrMM[єїщ~цЮ?їьэmрŸЛї ‚јєгOєѕѕргОНO†§rїюƒжів†жжV1БO ћФкк*++тKжVV‰IЩюnЎВб+U_П•’š*‹m[Еђѓѕ144hXAфхфЉ0ћЕkV‘ЇvvЖkО ZНідЉˆ™3Ї€wЗоA+—џќsXvNŽ……љьYгџ7tY]Лзэёxp/кЛ[яїЂЩгsч§щчS99Й––гІN9bYюн­їњuпќ|2ьХ‹|ОfЗЎоA+—РЫ—ЏvюоћфI\uuuЧжSЇLђѕэ/gэО§ŸјY|КИИL[ДPlŸ“лЅsG‚€˜иЇР.žЅЏvъаоШШ№Vєнч IЖ­Ќрпјч‰Єgя&Ц,;!§7>!3+Ї{з.|>яялїbŸќлЋ‡70&дЎ††tqZq‰кжЮс­<љm0Š%_ЮџbоМ/>Ÿ7ятoП:tpЦŒYrе›ШnЅЎ› ЁD|rЙдЮЖ•bЕV6жЏг3ŽŸ8emeйКЕCgЯNZZš хъAзлеk7Ÿ=OшбНkЯžн4ИoэТшзПc7ЉСВ–>„П|уЦ­1Ѓ‡ЕlQШЖн№еВEѕ’YYZ@Nnyš››цfфЉ……ЙьЋ$нКvљэїЫоо]“†rўТя ’OwщьѕїэюэeЭЛv§ЏgЯЧћввжўёЇАп/]™4бПau@A”kзџZћuм-xЪЄ‰ы7n™5k:yњгЯЇ6ЎџжХХ91)yѕзпшыыїћДяУћЗНКіzxџЖbbpщRф‰?oмј]ЛЖ. ‰ЋОўFGGgа@_ВЮ?ž\ЙbYћіэ**+їюнПqsШЖЭА|ХЊqўc6o\ЇЉЉ™””|тЧ“~~фSС/цхцц]ŽќГ.ŸVљŸ™Љсœ—WцццфŠp^^lkk.—ее5d9‡У‹kcџkmeaя`ЋЋЃЃДђ8'7юоD–•””2лO€ЄюС\2ц(>‚:''‹<АДДWўkšŠ%ƒ ВЕЕ|Д_ПOЉoЃ„EќВrхВм CЧ+W,џўа‘AƒќШ*›6Џkэр&šš‹/2t$й*++Л_ПОzњњрцюВ‰*ярЌ]ЛК А№СƒGЊ}бФФИ  АЈЈиЬЬTЅ†ђЩeзЎ““Sѓђ Rг^цхєэгSЅЌi _?й­ LљєO}nШt…Ус|ёХчK–,нБcЛЊk™в „Вя#киXН|љ:==Гuk{Й—ДЕЕњіщнЗOяєŒЬАА_22˜ЫеCоцп%99Й`aYїЋ“ц™™YyyљVV–——––цrЎєіюyхк іё…;{ФФ>QќЌМpСыщ‘””Ь\?55­K—џžœбЅ‹gJJЊєдСўПќЪРР@ Јл‡ŸП5dЫќ<рrи>5ЯЭЕmє{qџ>ытйЩШШЈВЊ*)9Ѕ‹g'А07ЫЪЮЩЯ/ SD s3йn[wѓюR^^ёWTДHTMВщФвТ"#3+-эU[grЭ[iъ рr8џЇјF"їэsЎBREY2yRРфItе›Р!П“ШсHЄ‹ЈЬткч““™^Н1|иK ‹’’’лџмўП!pцм…юнКZYZp-[š’]б•Ћ]o.ЮNёЯž?zлЋgwюл+ёrУЩžІЅН$?BикЖ"Ыy<^mmmee•О~}?сЇЈЈјњѕ(‡ѓIЏdџН{ѕ8§ЫЙЈЈшУ‡@TT4‡УщеГ‡\\ytъшбЉ#У,д€ЫсvpkчЮ]йкФ?{~џўЃOzї’§й€: ‚ ШŠ­mjJš›[ћЗт”4iк@оOeeoйtC Ў†тэ–- ќпСї<И}ћŸ;іL:yЦєЉ”6ьоЙьВRЃО}z&&&п{S]]ЭчѓŒŒ Щцм9ˆ}жж–нeЛU<Оpё2yЊЇЇыюжŽ}'мx<WЏ3Ш:€‘У3и,‘ˆ9РЉ­­ ‡ЫсpИ\љ\W,Ўe8eY"Whna%=ЮЮJ‚‰„|іЕј|M‰DЬ,ОЕЕхŒi“џŽўчьЙ PKSгТТœl"K.ќњ[ee•†††­­Э@ПўЬхъAзлрAј|ЧЗям€U+—RњZюєТЏП€™YЫ~>dyЏžнюо{Аoџ!ЙNд dл.RŸ€‰ўіvu;lуш0~мшшлw|џXZZŒ7кБЕ…Ь…Њхг\Ž––І[{з{їJ;P|}ћџ|2,xЫйТŸO† є Н/?~ќЄGnфqьуЧвеUщІ^Й[ЙSטии^={…1ББЮЮNвоoїв’іэлЕoп2ГВЦŒ8kц4:ГMMM8cІЬ_Лdj3іMLьуџ ! eVІ9\nќD"!ЗxЈЕДЕџэзЮžУ;dуцmА:h^,AЁD L6ЋН[ћй3Ї[YYfgчќpєјѓ„Ф!Ÿ—зЩГЋ­эцMыкЕkћOH\Бrѕќ/?2x l)в’/ц/š29РЃSG‚ .GўyђTјЙ3с –П7NAвЏЭqЙ\.—л$SцrЙф/#ж=лCц9 јуъ‚ ‚аЁЋЋ{ьшсЭš3Џ  аЬЌЅя€ўGј^іKGгІNZЙjMVVЖЅЅХм9Г|д=ЦnбТљЋОў&??Ÿ ˆЇHГŽaџ*ƒV­ЩЪЪЖЖЖš=kЦа!ƒ2В$`тј#GŽ=ћW“ЯямйsзŽ­Ь9 gЬ”љп.›бŒ}ѓрСЃaУG‹ТœКч|4M>Mўт8ёцIирќvёWoя.јўAAQJ‡N]тž‚ ‚ Hђ~&NЭџtчЮЦVжж|>_щ#Ÿ‡SSS“•џЏ—Wg|c ‚ ‚А!ёљгїг0оЧ О—Wч‡bянПџў˜ЄЏЏ‡Щ4‚ ‚ H3 љЏO“xcђŠ ‚ ‚4R>}ы  ‚ ‚ jцг3ц,D!AAD №сяХ 5Nн_у ЌЦKJ_m.О&џ”6ЖЈg}ДRё§ ƒAѓщцЦѕ‡Щ!aQŠз&з'Qf A@у=h„ VМƒЛ2йќ­N(?%ˆD0w.‘|іTWSЎXzzАbЋъ'ŽЂ>”…JƒGЁЅђЈчћ­g“dљЭ~š‚ ˜O7’ГОšиWёяQr†EC~JиИ22 %RRрхKиМ™Кpп>јѓO ­kUSƒrжJхдЇ‰ ‚љt"‘ˆЩ?‡#‘ˆ%’Z‘ЈŠЙIZZЩІMџxx“ЎУЪ-Ш’Ї|>xxРЕkџ•Ў.xx@RSџййрчZZртЗnQз)(˜›я­Њњ/уЌЊЊБАи[P  ќwіЄЄ"OЯcККлƒ‚Ђо,]m’ЋcjКЛeЫ=ЯFхгЇaЫ0333†№pъТyѓ`р@XИ4RuœвRбŒ—ZДи-;Љьь ?П-­m..‡nнJWЕЯГgлЗ?Тчo•э“У ^Е*JWw{чЮЧ““‹e%хѓЗzxЛvэ%ƒIt$•ON.&ЗjU”:ы‚”Ъ7ЉžьЅS ZКТEO Šвенюсq,)Љˆ,IL,ђђ:ЁЉЙеЫыDbb‘Ќ§+WFщыя№є<Ц0w:w(6oŒizxлДщŸДДМ"‚Р˜)ѓ‰’рS7…BP( Ћ@(Ќ*//ЪЭz|ъІbхЬЬђ;єьљ“ЏoјСƒБyy•d9uчЕЕDt4aeEHЋmмH…ФЦ„Џяе›AœШЬ,'A>V>ъ|Z (—ћt‘ё*‘2ŸцёBzіќ)!Ё9u$т?WWBCƒ 8œџЊ•–A”–ККLЭuu €К?isEЎ^}бГчO99\np~~U?]Нњ‚.ЗабйVZ*$ЂЄDЈ4ŸЎЈЈ&“g‹К1E“OE>-[xѓ&1u*‘­оАњњ;Š‹… zn—цВ3b™џљј„uьјУтХзУТžUVVKЋ‘z–– uuЗПё{ŠЋыa `й(MЂЫџ•збйVRR7ЋO2t‰‘ЌШMЊ'{щ(ƒ–.’?=)ч.;#mВ•ЫЪDJпХ юkоЈг|ўМ GŸxММЁ"ђбТћ˜зцХbIyyБX\kllVT˜S-ˆ„•”5_Нњ<"тљŒ—єѕљcЧЖ5ЪХЬL—ВцЬ™pр  "*З ок.ЂЁhk+iеППУмЙ‘Лw?”Hˆ]ЛфцVіяяа šшщё@CƒCn„Vќgq•ПјеК5$$@ЧŽЯžAыжд…Пќ))pьAh(,\и 7548СRmmžRх) ##Ч]Лі2&&їћя‡†ЦDGORЈOМёћхќ†m#‰ wж_yК§*ИƒRљ&еГ‘ЄћpѕdАЭР@Г>юmоHгЬЯЏ:>щЬ™ФЪЪџЖgЮŒФяEфЃхЃо?]QVTœŸS”—ХљйЅХљUЅ”5­­ѕ/іОsgђѓѓООoюйџVMЁЬЬ@$‚ѕып*пПЊЋaџ~шн[Ж[И{ї­jРš5PQЁФrfЭъДuы§€€іССїfЭъШ№Є‘ž=mˆ­Ў‡†ЦШ%ёёЪюї+фўTVyм8XЕ ?‚‚`ТъТ1c (ИмК/&юнЋъ8У‡;-]zЃ @№Жžkжќ]QQ­шаЛw3•ђљмСƒWЎьўэЗНууѓeМ[]-оП?ЖWЏVoќ^kfІ+‰зЏПУl{>љФvїю‡"‘xџўXuмAЉ|“ъЩ^:Ъ Ѕ‹фBOйЙїю]7їюн­їя‰ФЁЁ1нЛ[3ДЅœ;;ъѕ.f=M_пˆќ|СoпžМxБЗЕЕ>оPС|њcЄД(Џ0/Н07уСпПхg–хV–—27iгЦxѕъЯ OСлћ­ИэлaФАЗ[лЗƒ‘„‡П•(nи~~o5?x’“См\љгЉgЬш ЅЅБsgоєщЄ QŠ_4мЛзїдЉgFFЛфюЛ^оо'ђX”_в\НЌЌ Mpr[[ Ђ.фО Eђ‹‰ ЈžЌј‰Хgчяeч~№рРффbsѓНr_YлАЁŸпiЙЙ+’­ДДЖЮ›wхшб!2оэ ОoŸяПћŒqжоў€­­ГI _•#4дїќљ$cу]е\ЎъfЄTОIѕd/eаR~(zЪЮ}яоR‘OŸNазпqіlтО}~ m)чNчŽzСzšЯXНКG›6ЦxEсŒ™2џЬ{?Т™_˜LљhМ.Юж§Нœ12њO СяђсЪ qёbJPPдѓчГQ:дAyљxїOїїrЦМyџPŒj ž‚ цгвx—+ЌMОšлЬІгЬєDAоИ(‚ ‚ ‚`> ‚ ‚ ˜O#‚ ‚ цг‚ ‚ ‚љ4в`(>Iš|@3‡гHУЋё’вW›‹/‚жKYииf žѕбJqФї_( AЬЇ›з&‡„E)ў]˜\ŸD™%СєSУѕ„хC ъyW–ўˆ†’O "Ь FF`dŸ}еед…+V€žЌXСj њ‰ЃЈš?§иј>Њ—;(•G=6МZЯ&Щђп}и ‚43>очх=JЮњjb_ХђА(|.ЕњŸ?dlм’0y2lо п|CQHўЬИŸддŸŠж Jх‘† o Aф#пя!‘ˆпўЋ‰Њ˜›ЄЅ•lкє‡Ч1ЪŸж–žђљрсзЎ§WККрсIIL§ggƒŸhi‹ мКE]Ї @`nОЗЊЊFZRUUcaБЗ @@љяьIIEžžЧtuЗEН1’і'šCCcLMwЗlЙ'"тyУЈ|њ4lйff`fССN]HўЬјТ…ЄpшЊу”–ŠfЬИдЂХnйIegWјљEhimsq9tыVКЊ}ž=›иО§>Ћм†ЏZЅЋЛНsчуЩЩХВ’ђљ[=<Ž]Лі’С$К ŠЪ''“Ž[Е*JuAJх›TOів)-]сЛгГоEщъnї№8–”TD–$&yyадмъхu"1БHжў•+Ѓєѕwxzc˜;;›7FиxxлДщŸДДМ"‚Р˜)ѓ‰’рS7…BP( ЋШПђђЂмЌ—СЇn*VЮЬ,пЙѓAЯž?љњ†<›—WI–Pw^[KDGVV„ДкЦ„PHlмHјњўWMБљˆФЩ“„PHDFЮЮДЦwсјё8щщБcOЧџUІл-В• пДщŽPXЛnнmй—фЊ‘%ыжнЎЈЈŽˆxnoПŸ,‘ћSЎЌмЌ45 ‘ˆ ЂwoB$"ДДh ‚(( fЯ&"#еph@РХ3ўКцžgOžŒ k##гœПg˜;eЁЁсЮПџNЏЉЫUлИёŽPXЛqу?ПˆЗ§.‰ŽNЗВ e0‰r,Eх ‚№ѓ‹XЗюЖPXЛiгВОjю Йщєd/eавEђ;в“љ=ЏМнsїѕ ' }|ТШТuыnћј„ЩV^БтfY™hоМ+ sgp‡lѓF ›ММЪƒc яеыч;df–‚ +u>-”Ы}КШx•H™Oѓx!={ў”PЈєоњЧ„Ћ+ЁЁAЮеJK ‚ JK ]]ІцКК@нŸДЙ"WЏОшйѓЇœœ .78?ПЊGŸЎ^}A—[шшl+-QR"TšOWTT“Щ ‡ГE혀ٯ ‚"Ÿ–-Мy“˜:•ШЮVoX}§ХХB=ЗKsйБЬџ||Т:vќaётыaaЯ*+ЋЅеH=KK…ККлпј=ХеѕА†FАь@”&бхŠЪышl+)ЉˆUЊG—ЩŠмЄzВ—Ž2hщ"љщYя|ZqюВ3вбй&[ЙЌLЄє]ЬрЙц6ЯŸєшё‚7TA>Z>ъп‹%ххХbq-!!BR-ˆ„•”5_Нњ<"тљŒ—єѕљcЧЖ5ЪХЬL—ВцЬ™pр  "*З ок.ЂЁhk+iеППУмЙ‘Лw?”Hˆ]ЛфцVіяяа šшщё@CƒѓfГhА‚С*~ёЋukHH€Žž=ƒж­Љ љRRри1 ‚аPXИ6oЎџt448СRmmžRх) ##Ч]Лі2&&їћя‡†ЦDGORЈOМёћхќ†m#‰ wж_yК§*ИƒRљ&еГ‘Є{Gz6 ЖhжЧВЭ)lђѓЋЮŸO:s&БВВЦпПэ™3#ёп{љhљЈїOW”ччхeхgчg—чWU”RжДЖж_МићЮЩ ЬЯјњFМЙgC|ќ[5…B03‘жЏЋ|џ~ЈЎ†§ћЁwoйnсюнЗЊ kж@E…Ы9˜5Ћгж­їкп›5Ћ#УЗЄzіД9p ЖКZ#—pФЧ(ЛпЏћSYхqу`е*((€ќ| ‚ Ј ЧŒ  рrыΘИwЏЊу юДtщ‚Слz:ЌYѓwEEЕЂCяоЭTZШчsv\ЙВћЗпіŽЯ—ёfluЕxџўи^НZНё{­™™ЎH$^ПўГIьљфлнЛŠDт§ћcеqЅђMЊ'{щ(ƒ–.’п‘žѕF:їоНыцоНЛѕў§Б"‘844І{wk†Ж”sЇsGНоХЌУЦз7"?_pрРРлЗ'/^ьmm­7TA0Ÿў)-Ъ+ЬK/ЬЭ(ЪЫ(ЪЯ,-Ъ­,/enвІёъе=?žAž‚Зї[+pлЗУˆ`oЖЖo5,.##+QмАќќоj~№ $'ƒЙЙђЇSߘбAKKcчЮў::Мщг;HЂПhИwЏяЉSЯŒŒvЩнwНМНO4фГ(ПЄЙz5XYA›6рфЖЖD]Ш}Šф,P=Yё‹%ЮЮпЫЮ§рСЩЩХцц{хОВЖaC?ПгrsW,$[iim7яЪбЃCdМ)42кў|п>п7~ї1тЌН§[[f“О *GhЈяљѓIЦЦЛ**ЊЙ\е+AЉ|“ъЩ^:Ъ Ѕ,|wzв}™5вЙян;@*ђщг њњ;ЮžMмЗЯЁ-хмщмQ/X‡ЭуЧ3VЏюбІ1оGA8cІЬ?ѓуоpцз&?JЮR,ятlЯЫCгЊрwЙ+@"!.^L Šzў|6J‡z"‚ я!яўщў^ޘ7#яЪс€ЃЃё‘#ƒQ дAС|Ašяr…ЕЩWs›йtš™ž‚ Ш{%@AAЬЇAAѓiAAС|AAA0ŸFšЕqЋf+AAD=№љ @ƒ<ЪšУaњёaAAѓщfЫЃфЌЏ&іU, ‹ТG\#‚ ‚4opПGУ ‘ˆпўЋ‰Њ˜›ЄЅ•lкє‡Ч1†Ÿ1 ]]№№€ЄЄК’ФD№ђMM№ђ‚ФФКТЄ$№є]нџ~8?LMЁВрњuшвЅЎмУуиІMџЄЅ• ЫAA0Ÿ~ЈЉЃšќЋЊ*/-ЪЇЌ™•UБkзУ^Н~ž7яŠЉЉЮеЋу ЂnЇ‡є@ЁУИq0~]Щ_РшбP^#FР_д~љ%ŒХХ Ћ[WbfН{CDРсУ0wn]љеЋуMMu>ћьJяо'wэz˜•UОCAЉœ1SцŸљq/ QBТЂŽъЂЃc [˜ё*ёфэlХ} |ўжЎ]­ŽвЖm‹З<ЁАšУвR04„В2АВЊ[lже…œ04„вRАВ‚Њ*љBcуК~ЮŸ‡­[стEhл^ОƒЗЌƒ„„Т™3/?x]SГ=ˆ ‚ Ђ6ИКa‹%ххХbq-!!BR-ˆ„•”5_Нњ<"тљŒ—єѕљcЧЖ“хс IDAT5ЪХЬL—§@ьПГјПџСмЙ№еW0jдЩt~~ељѓIgЮ$ўŸН{‹ЊLќў 8r. #ŠКи"bњ•г4RЩZгЭD1АˆE—BTDEAшВЖ™ŠЉxлb]„д•ŸNbЈ@rёBrЕБ5]М  ˆ" œпЧЦёЬ3gЮ\TРЯћхЫ<<ч<їgž9ѓЬ9wюДћњКddМ…Жаі{FsS}CнењыЕѕuЕ uWъю67Rc:8H–.[P№nrђыuu-SІЄБсЄМœ9)‰ДЕ‘Є$2aТƒqуHRim%‰‰dмИžž$9™ДЕ‘ФФ‡ЧŠХdю\Вc z8eJZ]]Krђыљљя.]:жСA‚ЖаЎOFc§ѕЛЭїлл a! УТwсйЩЩ&&Ц#&Цƒ§5<œŒKZZЙна@Ќ­‰‹ IOИТ~чђбGdд(’šњ pг&тчGўўwўHЏНFўѓ2~ќУ3g‚аX„§г`ћOм§ћфнwЩџ§љ№C4Ру‚ыгр=ЦЙ оgZ,&cЦoОAћ`= кУгž|ыiЌЇАžРz@=‘h-ћOн_QE№ŒР§=КƒмЪZ$zь7ї`˜Hžu3ћWУ.п&’§пPхЄŸыщn­є|эJџWTУПизoq иябхtvv<њя~kы]ўC.\И[шцЖC$""!„(~P‰HT‘HˆЛ;пЉž>хЬ™ы„гЇЏ=џ|ЪяЧЎ‰жŠХыммvќ№УyЇю9ОСн}‡Йљ—ббyŠ?‰DkOйкnДГKHKЋдЎŽд•ГКšŒCzї&cЦъъ‡‘ЃЃ‰Й9=šœ?ЯsV7ЗББ….мB'ЌЇЛБікиwяоnЌЏЃЦЌ­mоАЁdќјяBBйкšЩdo3ЬƒŠ8Ў\!уЦёЅюу3ьрA9!фрС >>Уи@†‰d˜Ш{ї"ОўzЪ{я§‡чp6&'pб"™KCCИЅeoх№††{П§š”єZdфЪkqеE9{Ю‡gVWЮаPтуCnп&3fаа‡с ih Гg“E‹дž“™ьm[[Г>84aBъ† %ЕЕЭш ‘hVРЂŒnBEt_ьЮ ›љ‚™™Ѕrрх‹еЉљWTїˆХы^|qРіэoИИє}ЄQiћŠE"вдD,-5d Дєъ’%GNœxgќјя^}с…ў„ƒхGkjъ;:‘ˆtvFў~NњцcNИЙљ—WЎ,ВЖ6ijjЕЖо и{нмМЬТBмбСˆХ_(ЮЉMчU)ЇЙ9Йz•XY‘ЦF2`Й{їAДЦFbeEššШ€фЮ'ЎЊК9o^Nqё•ііш“РћЇЛœŽŽЮлЗ::ю3 УtЖнkiНG_^МИ -­2(ш D"ž=лeцЬaRЉ9Я™5.І !/МаџъеfЙќжеЋwиХ4!dоМœффзІMsjmэАВŠWŽЯ0м%ТYXˆ !ЦЦ"vUЌњэF­П)Јёы‰МъъюfeеddTпЙгюыы’‘ёz#h„§]NsS}CнењыЕѕuЕ uWъю67Rc:8H–.[P№nrђыuu-SІЄ§ОN%ххКgр/Кlй‘7пЊЙwяОTjоккёp2№гOџгxТ—_МqcIkkGRвiоЕn$чŸЦѕ8ЗœуЦ‘Є$вкJйз’”DккHR?žч|SІЄееЕ$'ПžŸџювЅc$ш ЎOw9ѕзя67оoo'„!„0 cddLп…g''›˜˜із№p2v,iiбёnr>>.^^ЉЧŽНЃљђЫЩ3fd2 YНz‚rЬ5kМ^{-НЙЙMБіUўК!љ§sbт”9sВут~ZО|Ќ‘‘Ш`5ЅZЮЄ$ђЮ;фЃШЈQ$5ѕaЬ†bmM\\Hz:ЯљЮœ BїmaџtзbћOwMЬО}ПЌZ•WYљЗ'лЧџэИр†ыг]‹їчyŸi‘h­HDўјG›””ЉheРz@;Oѓ„И8 XOƒСнПпБyЯЁ#?•ЕЕпGm`= кйš)Ы9~ ѕ 'м/я%+8ƒJРzt„mXOУSv щCƒШNSxLАКЧ2Ш­Ќ$}8=tСѓ&№œzІЮЮ9ЩxїсГ_ѓttшз[м‹’)+œўЪ˜y%;ВŽ9ыщgEщљк•ўЏЈ†Б;ЏGотZˆёюУCоўsТwџ9[§+Лу%cCфG џњGи\v=нЫиш~G':`= „вййёhгооЦHЛ>^cFМLHmlОKh;.јM=^Z‘}фЇYS<еЅлд|ЗЌњПF?фЇ26фхб#Ъj.65пeЯЦй­zNE4еНs㠘„БЃœƒfz/­PнQ­8„§Aљ ާьњXћyИ…™iеЏ—“vчlлћУЖН?XZ˜-~gZўщЊSrЮy”џ0!ег}јЂЙoєїЪ?]uМДЂОё6Кжгаc1Ь}33KхЫЋЉ1w|ЖИцПЕПнљкMўsŽpЛ%Ѓ§~Чў‹о§Ы+ъЂЪ?§ЮєWJ+фпЦ…Пџч юпШгѓœЌь#?нkkЯ?]ЙržМ›e$Y˜™,јє›ŽЮЮщЏŒ‰z+ќѓmџчьјšчŸvdх645ѓои|7чјЉœуЇйл†ќežїŒEБшfXOCеббyћvCGЧ}І“a˜ЮЖ{-­їюPcХlђzaDxР_ZZлђOWžЉbЏOыьlѕЏ‹п™6cв‹"‘hЦфmЌ,ЮVџjBнkk'„tv2"!Д;~№ЌАыšіЪoПпAЩ>rђщ'Œ~~€Дoќ?їџѕ­Щг_ГяhёЎяsеn-1їp>о}ИЉIяЇ*cЗfЂжг=YsS}лН{їяЗ1 Cцў§іЮŽvjЬњЦлйЙ'ГsOіёzaфšАwЧneЏ §эJ"fЅќв^/dчžœўЪXžЄ†Ю?3wКW^ёЙйЏyІШcЕ‘еS5uUZ]Ÿ.<[эћњјяs‹:™ЮщЏŒљѕђЕќгU УBІН2цЃ„}6—g=Н&ьЇ*“ўsЅћЇыщg@c§ѕЛЭїллй-д УbЮsШ•К†Дџw"эџ`§>їф†ЈљНХНЫжoвEЮї™;Э‹чћˆ,YсYп?пš!{qдА lЄІ~бPн9USзгПф-№›К}Этот^еП^ўrg6ѓћ2Ÿ§bтўЃХ<‡Гя1”‰f,Ъјч&TDЯУџiмSР p}КЧђуќЬоgр‰СѓЦŸQb1оJ`= Кšќт(T€ўp‘ђЕрэ?їъe,+8У>vАžmО—ё‚ЗџМрэ?Ѓ*є§XOCOС>ѕ ЛР~аŒџVжЈРz€Oщљк•ўЏЈ†Б;ыixЦaПвййёшПћ­­wљЙpсVllЁ›лBˆHD‰­-БГ#ii"TW“1cHяоdЬR]§№РUЋˆЙ9qs#55E"E$тюў ФЭmGllс… За:€ѕ4tuэДБџюоНнX_GY[лМaCЩјёп…„ВЕ5“ЩоfУШoП‘Є$љ fh(ёё!Зo“3HhшУ3XZ’†2gYДˆ{ђ+WШИq~–ЩоЖЕ5ћрƒC&ЄnиPR[лŒf€'O4+`QЦ?7Ё"€ЧЛѓТfО`ffЉxљbujўе} bёК_А}ћ..}і3in&ЄЃƒˆХЄГ“BЬЭЩеЋФЪŠ46’ШнЛb66++вдD wю<"ЃF‘дд‡с ФкšИИєtО,9„vЌЇЁxСй!ЛЌ–о„єц„ <ƒђжХЯЯ?ONЂЧ\Л–я XOCwт=Цї™ ТўiЌЇАžРzыiРzžY"бZ§OВeЫKЫx]N%Ё tkЖџКfоКQ‰8) Щ€РLъ_"=эВ=3жгOР‘’ѓ_ьЮS§wЄф|зЬ№пџžќј; )dэ.hйM}55ШKla!ё№и•1LWЙйИjе ЬЇ55–HЯ6xИkѕёЕ‘Р“zE™<™Bžž44tЧїџЙ Oю? OTщљк•ўЏЈ†Б;Џkотњъеf7З~нЃrПњŠ,[&(tЎЯ.˜šXсР2}:ЉЌ$R)щгѕO ЎOУ“жййСў‰Dї[[яђrсТ­ииB7Зфї{ХbтцF~јACZ‰‰Їlm7кй%ЄЅU*EЂЕQQyЩWюю;и+Wš_{-ЭФd§Аa[ŽЛЄˆ&­e˜?шRдФDbkKььHZQd]љuьЏббФмœŒMЮ?zёžz яПџ%ЇOzрЄ_?rWЉžяо%ЦЦФШHP Н=Йqƒ^Цѓч‰Л;17'бб3V]MЦŒ!Н{“1cHu5_ ѕpЊ+WШkЏ2l9vŒ/fc# "}ћ>RЅддE"2v,ёї'NNdо<ОњЄю1‰51?NBдbЊ&Єю№ššѕЙjе#љЄцG$"QQD"!ююj;­V5ЯгiuќmЄьпџ&"‰Œ$Ы—“#ШёуЯІš%ъ$BэŠъЪЎZuдsђ4мЕkФнlлЦSCnn;bc /\И…W ЌЇИкh#„ДЗЗнН{ЛБОŽГЖЖyУ†’ёуП 9dkk&“НM~џАїо=ђѕзфНї4ЄеаpяЗпB“’^‹ŒќQeyЖhмИьЯ ўы_G55-нДiЪпў–У2L$ЛЭCёЛАVўЇ8›"&'yђлo$)‰DFЂє9Еђжд@–DBШьйdб"ЭеКq#YИгэьШЄIdЯž‡JO'ООФзWPрЄIФЮŽžюЂEФЧ‡44KЫ‡ЁЁФЧ‡мОMfЬ ЁЁ|дУЉ, §+ij"›6‘П§/fh(‰Huѕ#UJM’”Dў§oВ{7ЩЮцЋOu{ „4БV ЇЎ˜œ„дОp!™3‡44ssЂд;љ2sх 7Ž/!с5ЏUЇ^uъкH™Ÿщш ’іv@zxе,Q'jWф/ЛrеЉ›˜Ј wщёі&‘‘dў|ž9D&{лжжьƒM˜КaCImm3^>КЎY‹€'eэП~liЙЭщ„—/VЏ§зЊ‘{ѕњТгѓлЊЊ›ЪџљѓќѓŒБ1C#ёЅEШчЭЭm УмПп)}ЎоддЊгмќKB>gџ)Чd#ыXTB˜цf†a˜ћїЩ(!єШЊ! У0ŒЙЙ†Д™Aƒ˜І&О@™ŒёєdЎ^eŒŒ˜К:ЦУƒ‘ЩДTЧЬŒЙuыArŠR˜™=Шќ­[Œ™™†@еУЉbnЮђрУK$LC%ŸЊЉТtt<ј_љœдњTЭ•žMLMˆZLu ЉЎ\LемЊf‰SFjBъj^ŸN+МъдЕ‘jЬSЇ˜W_e†qv~X.j–Ј“Е+ђ”SuдsR‹IушШ,вт•ЗВђ†‡ЧЗНz}W€. ћЇсIышшМ}ЛЁЃуОДўцеЖ{-­їюPc^МИ -­2(ш D"ž=лeцЬaRЉљМy$9™L›FZ[‰••†Д,,Ф„ccч‚—Ѕeoх_E--ЫMM5 е]ОЇhaСž]п/oi<<%…јњr/ёrННЩћя“Ig'йА\ЛFМН !Zъ™am+aљмпи˜ДДSSзЁ‘бƒџ•#PыSџ&ц”ˆšКbRžOuTUMˆЇцUK$МЙ…WЕ8Љs~АВ"Лw??z–„O"њˆ47Sпш<ђы›o’хЫЙОЉЉЋyУGЉO§›XЕDд„xŠ)$Ÿžž$9™ДЕ‘ФDz–TK$МгjUujf‡GRg2s&9vŒькE""У<\LЋf‰:‰PЛЂ№цаjbŠŠ";vwо!wю№Фš2%­ЎЎ%9љѕќќw—.ыр СЫ@з…§№„ї{TŸ;yњЇУХЧџS|ќ@ёёEЧі—œ8HняA•’ТєэЫєщУ$$а?BЇnеPї3ыкЕ;3fdš™­gЗ|lПѕч˜ЦЬŒ›uе@B˜•+SSцObЊЋљ>ИOKc|}ЙЉSџї?ЦТ‚ЙvБДdўї?­ЉЉзд0nnŒЉ)У=ЌЈ`мн™^НwwІЂ‚/z8У0лЗ3Щ#i]ЛЦܘё –јw2мКХ266ФЄІЮўUљuUЇја_љœZ5Бj‰Ј Q‹IMˆzxEуъژš2QQЎšyu›+TVWѓЊ%оi…WЕTSome† aюпg|}™м\ YЂN"дЎЈЎьЊUG='ѕХqqЬ{ясE gЭ X”ёЯMx_OЦ‘’ѓЅчkUУ_pvшšїЫ{jD"Ё$<"жгXO`= €ѕ4@—!№YXOCWXј?oQ€ѕ4€.ОњЊxйВБЈРz@ƒ nХЦКЙэP„ќїПЇO_ѓёqa‰Hb"БЕ%vv$-эAœъj2f щн›ŒCЊЋžmе*bnNммHMЭУ@‘ˆDE‰„ИЛ?qsлсТ-д?`= нRmmѓ† %уЧrШжжL&{[ёЇK.mlќp7tCљэ7’”D"#„„†rћ6™1ƒ„†><­Ѕ%ih sцE‹И)^Йђ№ЁЫ2йлЖЖf|phТ„д Jjk›б"№X‰f,Ъјч&TŠXМюХlпў†‹K_х№ІІж‘#ЗUTќЭвВїƒЮ'"ЭЭФТ‚ttБ˜tvBˆЙ9Йz•XY‘ЦF2`Й{їAЬЦFbeEššШ€фЮЂ8CSБДЄdЃЊъцМy9ХХWклW QрёСѓЦСР.^\–VtP"Яžэ2sц0Љдœ’’Rцы;\Б˜fYXBˆБёУ§jћФ_ЮbКЎюnVVMFFѕ;эОО.oЁEрБТ~00ЩвЅc оMN~НЎЎeЪ”4BHG“”tjЩ’17Ž$%‘жV’˜јp!$)‰ДЕ‘Є$2aпсSІЄееЕ$'ПžŸџювЅc$hxЌp}''›˜˜BHffѕшб§­4•”Dоy‡|є5ŠЄІ> oh жжФХ…ЄЇѓ~цLjАž†ž&>О8>о›ЈМЕCёѓѓЯ“SЇ5цкЕ„ч XOCUX€J€ ћЇАžРzыiЌЇыixТD"д`=  ЄА№пbХ €ѕ4штЋЏŠ—-‹zРz4ИpсVllЁ›лEШџлxњє5BˆHєртДтVM qw'ццdеЊ‡nn;bc /\И…ZЌЇЁ‡Ћ­mоАЁdќјяBBйкšЩdo+ўДqcЩТ…ЃE„†y№ˆoХЌ… Щœ9ЄЁ˜›? ”ЩоЖЕ5ћрƒC&ЄnиPR[лŒz€žG4+`QЦ?7Ё"žqbёК_А}ћ..}•У›šZGŽмVQё7KЫо;ш‘Х4!Фмœ\НJЌЌHc#ББсўЕЊъцМy9ХХWклW Њ ‡щ…*BШХ‹ вв*ƒ‚J$тйГ]fЮ&•šBRRЪ|}‡+/І…ЋЋЛ›•U“‘Q}чNЛЏЏKFЦ[ЈgшyАп!ФСAВtщи‚‚w““_ЏЋk™2%вбС$%ZВd 'В…)/$Фг“$'“Ж6’˜ј0pЪ”ДКК–ффзѓѓп]КtЌƒƒѕ XOCчфdуqцL!$3ГzєшўŽŽVœ8ссdьиGОИiљзПˆЕ5iVк#}цLPLŒ‡““ jz0ьŸЕ<<Ої7ЮU іOƒZ……Ј~ия€ѕ4жгXO`= XOУуЃ| j€gю—зc)9_zОV5ќgя1ЮТWЬ ƒКРzњйSzОvЅџ+Њс_ьЮОž~ияб“uvvАџD"QggGgч§жжЛќ‡\Иp+6ЖаЭm‡Hє`;‡тVM qw'ццdеЊ‡nn;bc /\И…:ЌЇЁчh вооvїюэЦњ:jЬккц JЦџ.$ф­­™Lі6У<ищЁјЕp!™3‡44sѓ‡2йлЖЖf|phТ„д Jjk›Qљ№ŒР~žŒaю›™YВ?›ššB._ЌІЦttL~ёХлЗПств—џœљљ$3“˜˜А0ђёЧЅRѓ>pћрЗЊЊ›ѓцхЌXqДН}ъАž†ю­ЃЃѓіэ†ŽŽћ66вњ›WлюЕДоЛCyёт‚ДДЪ  ƒ‰xіl—™3‡IЅцТЊЋЛ›•U“‘Q}чNЛЏЏKFЦ[Ј|xF`ПGOжмTпPwЕўz-!ЄЁюJcCннцFjLЩвЅc оMN~НЎЎeЪ”46мТ‚”—?гг“$'“Ж6’˜ј0pЪ”ДКК–ффзѓѓп]КtЌƒƒ•Я\ŸюЩыЏпmnМпо^|ќ@}нџ†122&„яТГ““MLŒGLŒћkx8;–ДД<мBНiёѓ#џ; xд™3AЈmx6‰f,Ъјч&TDЯcћO?\ŸюБМЧ8cн №Иaџ4жгXO`= €ѕ4`= €ѕ4h­А№п*~‰P%XOƒ`_}UМlйXджг С… Зbc нмv(BўћпЦгЇЏљјИBDЂЇ?(ˆD$*ŠH$Фн§Aˆ›лŽииТ nЁVАžюсjk›7l(?ўЛCЖЖf2йлŠ?mмXВpсhcc!„aУх8Ў\!уЦ=јY&{лжжьƒM˜КaCImm3ъ€‡hVРЂŒnBEtGbёК_А}ћ..}•У›šZGŽмVQё7KЫо[ZDYL‹DЄЉ‰XZRN^UusоМœтт+ээ+PеъєBt_/.HKЋ Ъ™ŠЧ IDAT :(‘ˆgЯv™9s˜TjNII)ѓѕЎМ˜цСYLзенЭЪЊЩШЈОsЇнзз%#у-д3\Ÿю фђ[џўwхž=UgЮut0..[ŽёwtДRŽ#‘“'ЩШ‘6ПЪEk7ЗООУ§ќžwrВAХh„ыг=““MLŒGLŒ!$3ГzєшўœХ4!$<œŒKZZш[ЈЮœ B}‡я#і4ёёєлф­YCюох.Іљзж ЎOї4……Ј€'зЇАžРzыiЌЇыixDЂЕь?uEРГВ.šАhкЫЮЈ<ИџєМї— .t\OB~Љ)GuhћЇАžРz {щХџчЁУFВ?˜ššккі}ўљсгЇMіЦT‘H„КшЅ1ћ=Хііі7nž9svлŽ]™{ПпœМЉwяоЈ>xЦ ня!‹ ш?uъыiЛПe:;ЗяиЅјгюЇO~ѕЯУGќiђЋNKЯP„6rпўџМљжьџчю6њЅE‹Уыыы=rфш_fЬz~Є›ЧјWжпббЦ€ЛžV066^ЖhџўƒьЏййћ6oNљj§кŸЯ–|ЙnmRђц}ћџЃˆМeKЪ'Чœ=]”ћCŽD"‰љ№яlјёуљ ‰IŸ§уя?Ÿ-ЩиГЛІц— баѓзг„сУ].ўіћѓЮ~ЗzѕGnn‹ХююZ§їvюќЇ"цЦ _О0к],їэлwUдЪљlxRђцЕqk\]Gѕъеk ƒCьgŸfюЭFc@ЗгKЗУ_G<ў——^ЋщХЋkТПўёЯ)~ЖЖЖjiia>W^>cІ/!„љОрЯЪzКВЊjˆЃЃАe7}•мйЩ§tТкк нšжћ=:::6%&џх/ги_‡•(ўZT\ь2ЬYуIўoфˆŽфЂірYYOпПџкЕыџяlю;„ПАс}янOVџуьйВћїяŸ=[іЩпзќѕЏяi<л’%‹т>_їŸƒ9------…?§э§h шv4яї`щbbbТ>ЯхнwчNŸі№y.oНѕцн––ЅЫWўяЕ:, ~ѓїKз<<=Ц%%nLHLŠZѕУ0њ“ыпцџнŽhVРЂi/;Я{ ћмЮU€ѕ4жгXO`= XO<6ю?Э0 ъ@[ЂY‹P OŒ\.—ЫхO ‰Ч ›PЗШЇ–,Y’Р'!!!,,ьiѕЂЇž(Oл=н6эR=ъщіp§“ж8 R:??ПŸўЙkN]ЄuЈГ Еwiех„дЕбWќoG9U7лhЛўуŒwс5пeš†A]vюЂўIШ”а3tЁч#ЦХХ‹Хтррр5kжАсNNNNNNооочЯŸзxEVˆјјјлЗoO›6Эее566жззЗл5›ЯЁC‡Ў]ЛЦэкЕk‡ž9sfw)з’%K6nм($€:F••UYY™––жЗo_C%$•J+** ˜ѓЁC‡ІІІBd2™ЕЕuh CЭ6œёn№šLщOy=mbbтуу“••ХѓіTqнBёVU.—EEEЅЇЇ/[ЖŒчќеееEEEoОљІЏЏЏL&ћѕз_•џzуЦ7>џќѓ„AƒэкЕЋВВ255uа A<ЉBќќќJKK‹‹‹зЁ===уууЏ_ПожжvњєщBˆD"INNЎЈЈШЮЮvppрЩ'5fЏ^НbbbJJJNŸ>=oо<ž,йллЇЇЇWUUЅЇЇ+_W“ЫхССС'Nœ8wю\pp0[Lš4)''Gё+ЕBX999“&Mт?лж­[§§§ !ўўў[ЗnUф'""ЂММ<33Г_П~< Џy~уЦ#„ќєгOъееgfffyyyDDџљЉ1Љ­щххu№рСъъъcЧŽЭž=›Ї‰Љ‡ЫхrеВSSWWŸЊ‡ЋЋdxzŘдN+<Ÿ<нFѕ#еsR›ƒ:“““нннхrydd$OWTqъZS.—‹D"УnŒ™Ьfff~њщЇŽŽŽЪ1WЏ^НfЭSSг lкД‰чœд˜лЗoЗЗЗ—H$+WЎф9|уЦQQQІІІббб6lPŽЖeЫ–Z[[s ЅЊИИXљsUž БББ)..ц?лрСƒe2™™™™L&SьЈ‘Ыхl>###зЏ_Џ1!!5ЯoїюньTЫШI(>>>22’­Oў§д˜дж,..žѕь6дц „Я>ћŒ}{ЬіъЂЂ"ž|ЊŽ8с“€>ІOŸ.WЁеˆ[Зn]ttДЉЉщЪ•+љ{2ŘТ{{rіGŸ>}4:лhЕ CуxW7 LHѕ№шшшааPE„)SІАŸяŸдцP7oЋыЩЊ ):УW_}Ѕm1ЕЭЇЮ Яє+dJЇvХI“&mоМљфЩ“QQQwxtЇѕєЖmлrssCCC—L”пЋО9VЈfffьџеее‡ыО}ћЖoпЎšPYYйіэл9‹`“ђђrўi…§,U,здд(оІійgЇNњќѓЯmmmйЗЫьeKKЫввRžsRcцччsђІю№’’ЉTЪОy())QŽ&ќrKMM‘‘ѕ3N…sЊ*<<<55uЩ’%Ъљaѓ)•Jйu‰ў5Ягm„oГуЉOў jLjkž8qт§їпwvv‹ХŠУЉML=œZvž|rъ“zИž/{ТŽZLjЇе6Ÿ:wjsPТ?ў8dШ9sц”••mйВх—_~сЩЇъˆ> шћБ ‘QAAББёњѕы_~љemG\QQ‘"&OцŸX4і:Й\юээ——Чџ‘џl#Мj;о•чO ЉўвK/энЛзЦЦІІІЦЪЪ*))iюмЙZѕOjsPёєdе„чфМEИžж*Ÿ:O,<гšV;ЇUg†~§њ…††цццВпЄшњz ŒЧ0 У0Ъ!NNN_lZZZиџ{ѕвм„ LLL†>rфHХшRNˆ5qтФшшш!C†kЬ9ћYj{{Л"r]]]LL ;­'&&њћћїэлїјёу"‘ШШШˆSRjLЉTzљђe!5imm}ѓцMBШЭ›7­ЌЌ”џЄq?ДrЁЌЌЌnнКЅБB,--›šš4žpпО}‹/ў№У•й|жзз+.ъYѓЊMЉ ѓ6;хњд!&Е5CCCCBB­ЌЌbccйjЋы6ЊeЇІЎЎ>UчA­UN №†ЃS]Ї˜O=Л Е9ЈЁџў—/_ЮЬЬ ‰DŠOКех“3т„Oz>|ј/ПќвббсююўЩ'Ÿh;тњєщЃˆ)$ѓœbjеыж­[—žž^[[+Є\g­&сѓЇnгoII‰ЃЃуoМAёіііђђтLƒћ'Е9”_ƒ‘Ї'ЋRœSЗэѕТѓ)pбvњ2ЅѓЯ u œц§гѓчЯ ДББ9pрР7п|3qтD‡ˆD"ktЮ(ZМxqBBBRRвђхЫy\ЛvэњѕыGŽщъъЊsъьТ:>>~єшб„7nИКК:;;;99 :”'Ÿд˜7nм8p  ijjbП&ekkЋѓkOUUеЈQЃ„TˆЋЋkeeЅЦ†††fgg/\ИГМ`gavjжПце]ŸжИЭŽ'Ё[Зn)ъ“?ujLjk–••…††Ž?>44tХŠїмskзЎMMM]А`Ж oMžšзШЩЩщ№сУ~~~)))NNNаjФххх›ššъy­šувЅKaaaЋWЏ–H$# ™mєЙ8Эп955zј‘#GЄRщюнЛэээ<ЈmЮЉЭЁmOVЅш ЧŽгXL=ѓ)|П‡РщWр”NэŠссс2™ЬЫЫ+..ювЅKњ8€ЇFЯћO/XАрмЙsЪїї jюW:}њєВВ2EрюнЛgܘѢьыыЫ~)„š™9sц”––ž:uъНїоуD ІЮљљеW_ЭЬЬЌЊЊ:{іьж­[џ№‡?BЬЭЭПќђЫŸўљФ‰ГfЭтЩ'5ІX,ўјуKKKЯœ9УЙ;'K ШШШЈЎЎоГgOџў§u›)8ЗнUW!§ћї/--еxџщ]ЛvБ™={іЮ;љ‰ŒŒЌЈЈPў‚Йž5ЏюК”V7(х$dooПwяоŠŠŠˆˆўMд˜джœ6mšL&ЋЉЉ9x№ ЇЇ'OSЇ–š:Е>yЊŽSvсЏ…< Чљ•ZLjЇžOuХT§Є‚zNjsPТ'Ÿ|ЭSќн’gPќТ /ЄЇЇы0СŠХтММб у§ЉыRЭёѓљ˜Іtƒ8€'ДЬB%@З3lиАoП§ЖММ\&“щЖзМ[ŒИUЋV™››Пџўћ_§5§Љы.Эn№јєB@QSSау‹Y__ђфЩ‹/ъАMžйц@З§Љ>яё%д-ђЉƒ%K–$$$№ЧIHHаљћˆOЗц ’(Oл=н6эR=ъщіp§“ж8 R:??ПŸўљYћRVхЅЮ6ПЈgЭS'jƒЬоOЌХоДG]‰КBЯФWn <ј…*NЬ;wњљљБ?Я;—}ЪЈb9{іlJJЪрСƒѕŸ•lllтттŠ‹‹ЋЊЊіьйЃxˆку[”Мў9З SЧооОЄЄDу§ђžиыFBnДФsfсyшІыщЧ‘эюrN‚A2SXX8bФ}jЃ;Ў6јпŽЊо’:лhЛўуŒwс5пeš†A]vюў yžл#t_F]'+qqqСССЦЦЦbБ888xЭš5l8ћрooяѓчЯkМ"+D||ќэлЗЇM›цъъыыылэšЭЧЧчаЁCŸO~экЕУ‡Яœ9ГЛ”KчЇŽУГIu Œ=:++ЋВВ2--MЯчР)“JЅьƒŸ eшаЁь-іe2™n”юj 5лpЦЛСk0Ѕ<хѕД‰‰‰OVVЯлSХu хЫEEEЅЇЇ/[ЖŒчќеееEEEoОљІЏЏЏL&ћѕз_•џzуЦ7ВOя4hаЎ]Л*++SSS Ф“:!ФЯЯЏДДДИИXqкгг3>>ўњѕыmmmЇOŸ !„H$’фффŠŠŠььlž|Rcіъе+&&ІЄЄфєщгŠ'EQГdooŸžž^UU•žžЎ|]M.—Ÿ8qтмЙsœ'ТЈš4iRNNŽтWj…Аrrr&MšФЖ­[ЗњћћBќ§§ЗnнЊШODDDyyЙђєЌy~QЋЎ>й‡DDD№ŸŸ“кš^^^ЌЎЎ>vьиьйГyš˜zИ\.W-;5uuѕЉzИКJˆЇ‡pP‹IэДТѓЩгmT?RP='Е9ЈСбб199911бнн}< OWTqъZS.—‹D"УnŒ™]\јˆ“JЅхххK—.UdIн ІZLсНN!$$фћяПЗААр/”йFј$@­yuѓЇ@Њ‡џ№УЮЮЮŠbБИЈЈШЮЮNxџЄ6Е'ЋлХСvх‡ћ№Мь6ŸТ_пљЇ_S:Е+feeљјј˜˜˜`}=арСƒЃЂЂNž<ЙyѓfvКОпC.—ћћћГџчххЉ;ŠеЏ_ПьььььlKKKN;;ЛШШHv5єшQOOЯоН{~їнw<чd_ЂЬЬЬ|}}>Ьfff~њщЇŽŽŽЪ1WЏ^НfЭSSг pž“Т9'5fDDФіэлэээ%ЩЪ•+yпИqcTT”ЉЉittє† ”ЃmйВeрРжжжœBЉ*..Vў\•ЇBlllŠ‹‹5ЖЏL&333“ЩdŠ5rЙœЭgddЄтбhzжUGœ№I@гЇO—ЋаjФ­[З.::кддtхЪ•ќ=™ZLсНŽ=9{ЃOŸ>‹FmДк†ЁqМЋ›?&ЄzxttthhЈ"Т”)SиЯ „їOjsЈ›Зеѕdе„сЋЏОвƘкцSч‰…gњ2ЅSЛтЄI“6oо|ђфЩЈЈ(;<К“mлЖхцц†††*?ѕ”њz n`›™™БџWWWkЎћіэлО}ЛjBeeeлЗoч,‚MLLЪЫЫљЇіГTБX\SSЃx›ўйgŸœ:uъѓЯ?ЗЕЕeп.Г—,--9ЯŽтœ“3??Ÿ“7u‡—””HЅRіЭCII‰r4с—[jjjŒŒŒЈŸ!p*Фии˜SэTсссЉЉЉK–,QЮ›OЉTЪЎKєЏyžn#|›O}ђПHPcR[ѓФ‰яПџОГГГX,VNmbъсдВѓф“SŸдУѕ|йоpдbR;­ЖљдЙлP›ƒ:~ќёЧ!C†Ь™3ЇЌЌlЫ–-ьC•ехSuФ Ÿє§XаШЈ  Рииx§њѕ/ПќВЖ#ЎЈЈH“П'ѓO,{\.їііЮЫЫуџШŽЖо?ЕяЪѓЇР„TщЅ—іюнkccSSScee•””4wю\­њ'Е9ЈxzВjBŠsrоЂ\Ok•O'žiMЋгЊ3CП~§BCCsssйoRt}BŸчТ0 У0Ъ!NNN_lZZZиџ{ѕвм„ LLL†>rфHХшRNˆ5qтФшшш!C†kЬ9ћYj{{Л"r]]]LL ;­'&&њћћїэлїјёу"‘ШШШˆSRjLЉTzљђe!5imm}ѓцMBШЭ›7­ЌЌ”џЄq?ДrЁЌЌЌnнКЅБB,--›šš4žpпО}‹/ў№У•й|жзз+.ъYѓЊMЉ ѓ6;хњд!&Е5CCCCBB­ЌЌbccйjЋы6ЊeЇІЎЎ>UчA­UN №†ЃS]Ї˜O=Л Е9ЈЁџў—/_ЮЬЬ ‰DŠOКех“3т„Oz>|ј/ПќвббсююўЩ'Ÿh;тњєщЃˆ)$ѓœbjеыж­[—žž^[[+Є\g­&сѓЇnгoII‰ЃЃуoМAёіііђђтLƒћ'Е9”_ƒ‘Ї'ЋRœSЗэѕТѓ)pбvњ2ЅѓЯ u œц§гѓчЯ ДББ9pрР7п|3qтD‡|Ь/У0œQДxёт„„„ЄЄЄхЫ—ѓИvэкѕыз9вееUчдй…u||ќшбЃ !7nмpuuuvvvrr:t(O>Љ1oмИ1pр@!вддФ~MЪжжVчзžЊЊЊQЃF ЉWWзЪЪJ' ЭЮЮ^Иp!gyСЮТьдЌЭЋЛ>­q›OBЗnнRд'ъд˜дж,++ ?~|hhшŠ+xš˜ЇлIП>ѕ\Z N‰T‹ЉCЇUЮЇХTFmъ@`†Н\нййЉУ‹Б№жд‡\.пПџјёухrљ!CЪЪЪІOŸЎеˆkhhАГГSФз6ѓZ5ЧŒ3^zщ%www!E8лPёŒw]QѕE5Pѕ№ŽŽŽcЧŽ………=ztХŠЇNR]nђЃ6љ§›єЪЋRžžLНpУvХлEžbъ“Om ™~NщдЎ8qтФoОљfџў§666ѓчЯЧB zШzšrщвЅИИ8///™LЎqеЂќ ЧуђхЫcЦŒQќъссбЇOŸœœœДД4'''v™KebbR__пЛwяХ‹ыzJJЪиБcMLLњєщГpсBіЫу‡ZО|Й………ƒƒƒbѓ"5Ÿд˜YYYќqП~§,,,8o8YЪЯЯŸ7ož‰‰Щќљѓ tkЖмммЉSЇ ЉЉSЇўјуќg1bФЈQЃVЎ\щъъЪ~у“Хц3((шјёуЉy':\œц$tќјё   Х—чдЁЦЄЖfbbЂ‹‹‹X,–H$­­­г”n№№„–YЈшv† іэЗп–——Ыd2ніšw‹Зjе*ssѓїпџыЏПFЃ?uнЅ9аmŸf гs IDAT^Јш1jjjz|1ыыыOžuАdЩ’„„ў8 :ёщжМAхiЛЇлІ]ЊG=нЎв‚AJчччїѓЯ??k_*аЊМдйFрїѕЌyъDmйћ‰ЕИР›іЈ+QWш™јЪ €П№AХ‰ЙsчN???ічЙsчВOUЬ gЯžMII}ZёЄ(j–ьээгггЋЊЊвгг•ЏЋЩхђррр'Nœ;wŽѓDU“&MЪЩЩQќJ­VNNЮЄI“јЯЖuыVBˆППџж­[љ‰ˆˆ(//W~ €ž5ЯOу#jее'ћ‚ˆˆўѓScR[гЫЫырСƒеееЧŽ›={6OS—ЫхЊeЇІЎЎ>UWWЩёєj1ЉVx>yКъG ЊчЄ6u 8::&'''&&КЛЛГGсщŠЊ#N]kЪхr‘Hdи1“'OЮЭЭ:tшЭ›7йЇ‹ qRЉ4##ЃММ|щвЅŠ,Љ›СT‹)Мз)„„„|џ§їќ…2лŸЈ5ЏnўHѕ№~јСййYA,ййй яŸдц ЂіduЛ8иЮ ќpž—]УцSјы;џєЋqJЇvХЌЌ,ЌЯ  шcњєщrZИuыжEGG›ššЎ\Й’П'S‹)МзБ'g/pєщгGcбЈГVл04ŽwuѓЇР„TŽŽ UD˜2e ћЙ№ўImuѓЖКžЌšЂ3|ѕеWкSл|ъ<Б№LПBІtjWœ4iвцЭ›Ož<Ѕq‡'@wВmлЖмммааPхЇžR_д l333іџъъjУuпО}лЗoWMЈЌЌlћіэœEА‰‰Iyy9џДТ~–*‹kjjoг?ћьГ‚‚‚SЇN}ўљчЖЖЖьлeі2€ЅЅ%чйQœsRcцччsђІю№’’ЉTЪОy())QŽ&ќrKMM‘‘ѕ3N…sЊ*<<<55uЩ’%Ъљaѓ)•Jйu‰ў5Ягm„oГуЉOў jLjkž8qт§їпwvv‹ХŠУЉML=œZvž|rъ“zИž/{ТŽZLjЇе6Ÿ:wjsPТ?ў8dШ9sц”••mйВ…}ЈВК|ЊŽ8с“€О Џ_Пўх—_жvФ)bђїdў‰EcЏ“ЫхоооyyyќйёЯ6ТћЇЖу]yў˜ъс/НєвоН{mlljjjЌЌЌ’’’цЮЋUџЄ6OOVMHqNЮ[ыi­ђЉѓФТ3­iЕsZufшзЏ_hhhnn.ћM*€ЎOшѓ\†aF9ФЩЩIр‹MKK ћЏ^’›0a‚‰‰Щ№сУGŽЉ]Ъ Б&Nœ=dШccc9g?KmooWDЎЋЋ‹‰‰aЇ•рррФФDџО}ћ?~\$qJЪA)•J/_О,Є&­­­oоМIЙyѓІ•••ђŸ4ю‡V.”••е­[З4VˆЅЅeSS“ЦюлЗoётХ~јЁr ›ЯњњzХ%@=k^Е)tоfЇ\Ÿ:ФЄЖfhhhHHH`` ••Ull,ћ@mbuнFЕьддееЇъс<ЈЕЪ оpдbЊыДѓЉgЗЁ6u єяпџђхЫ™™™"‘HёIЗК|rFœ№I@OУ‡џх—_:::мнн?љфmG\Ÿ>}1…džSL­zнКuывггkkk…”KрlЃе$ |ўдmњ-))qtt|у7!ооо^^^œiPcџЄ6‡ђЫЂb0ђєdUŠsъЖН^x>Ю!кNПBІtў™сБŽAƒгМzўќљ666јц›o&NœЈёљe†3Š/^œ””Д|љržзЎ]Л~§њ‘#GКККъœ:ЛАŽ=z4!фЦЎЎЎЮЮЮNNNC‡хЩ'5ц7(ЄBšššиЏIйккъќкSUU5jд(!тъъZYYЉё„ЁЁЁййй .ф,/иY˜šѕЏyuзЇ5nГуIшж­[ŠњфO“кšeeeЁЁЁуЧ ]БbOѓt!ЉѓзЇ>ƒKЋ!У)‘j1uшДښ䨘ڣЭA УА—Ћ;;;ux1оšњЫхћїя?~М\.2dHYYйєщгЕq vvvŠјкf^Ћц˜1cЦK/Нфюю.Єhg*žё.А+ЊО ЈЊоббqьиБАААЃGЎXБтдЉSЊЫM~дц П“^yUЪг“ЉnиЮ xЛШSL}ђЉ-!гЏР)к'NœјЭ7пьпПпЦЦ&00pўќљXЈAYOB.]Кчхх%“ЩТУУ5ЎZ”_сx\О|yܘ1Š_=<<њєщ““““––цффФ.sЉLLLъыы{їюНxёbROII;vЌ‰‰IŸ>}.\Ш~yќаЁCЫ—/ЗААpppPl^Єц“3++ыу?юзЏŸ……чЭ'KљљљѓцЭ311™?~AAnЭ–››;uъT!2uъдќ‘џl#FŒ5jдЪ•+]]]йo|Ви|?~м 5яЄB‡‹гœ„Ž?dbbЂјђœ:д˜джLLLtqq‹Х‰ЄЕЕ•Ї‰yКдyъSјрђБЌ№„ЈХдЖгrђЉm19ЈЭAЕЕЕЯ=їмкЕkSSSuxЂВ№жфЉyœœœ>ьчч‘’’тффtрР­F\^^^ppАЉЉi`` ™зЊ9.]КЖzѕj‰DЂ1ВйFŸ‹гќ]‘3QSЉ‡9rD*•юоНлооўрСƒкцœккіdUŠЮpьи1Хд3ŸТї{œ~NщдЎ.“ЩМММттт.]КЄџˆшвxv_-XАрмЙsЪїї jюW:}њєВВ2EрюнЛgܘѢьыыЫ~)„:~цЬ™SZZzъдЉїо{š:ччW_}533ГЊЊъьйГ[Зn§Уў@177џђЫ/ўљч'NЬš5‹'Ÿд˜bБју?.--=sц чюœ, 0 ##ЃККzЯž=§ћїзmІрмvW]…єяпПДДTу§ЇwэкХdіьй;wюTф'22ВЂЂBљ цzжМКыRZн ”“НН§оН{+**"""ј7RcR[sкДi2™ЌІІцрСƒžžžеSѕ“ ъ9ЉЭAŸ|ђItt4OU№wKžI@5ђ /МžžЎУф)‹ѓђђŒ7mкфссЁœ„Р'•Jй›*„……Б{Фy2ЏšsсНNyЮдјCнl#ЄђŒwNLuѓЇъDM ЄneeURRbddЄœс§“кTдžЌ.Ё•+WVTT(пь…Z"ƒфSјzZШє+|Jч™—Tщ<тј„……qnA@НЂѓѓŸи•Зѕа­ИžбуkIy илл˜››?юtSSS•WУПH!„ЕЕ5ч{“OuЖј|DƒŒїЇЎK5ЧSЬчcšв >тžа2 •нЮАaУО§ілђђr™LІл^ѓn1тV­Zennўўћя§ѕзhєЇЎЛ4К Руг U=FMMM@@@/f}}§Щ“'/^МЈУ6qxf›нєЇњxМЧ—PЗШЇ–,Y’Р'!!Aчя#>нš7HЂЂž5OЈ 2{?Бxгu%ъ =_Й0№р>Ј81wюмщччЧўљЕkз>>>YYYНoа AЛvэЊЌЌLMM4hOъ„??ПвввттbХuhOOЯјјјызЏЗЕЕ>}:$$„"‘H’““+**ВГГxђIйЋWЏ˜˜˜’’’гЇO+žEЭ’НН}zzzUUUzzКђu5Й\|тФ‰sчЮqžЃjвЄI999Š_ЉТЪЩЩ™4iџйЖnнъяяOёїїпКuЋ"?хххЪаГцљi|D­КњdRС~jLjkzyyЃтыъ7rIZyDhyFšn@ Дщ™1Аœы’яфЁCј§qОsО=чTWW?TР§Y.”UЕwэНkŸъsЊO9;;gddАЮIaѕ‰Ќ)“ЩN:%‹нннsss1ЭїяпŸŸŸяьь,—ЫїэлgXэиБcžžžЌAqikk3|ЎŠ1ˆP(lkk3щ_ЅRщттЂT*™5zНžж3//9ЭJЫу!<€%hяоНyyyД=ёћ=5‘оlkk[Кt)Ч *..ЦИй9vЄtЄ=1І#ЂЭ*$wr˜Ш %згЪАAК9о{я=њу1е*• Ѓ'wЦ‘'kHHHаs0kЦЫхrggчмм\|$#‡Iutчє ///“CCfГЖa˜œяЦђ'Ё nsЙ\.•J™ Ы—/ЇŸЧ'вЦђЖБHц b‚су?6w˜цъiqbСЄ_’”Ž ХИИИ’’’жжжќќ|“;<`:qђфЩккZЉTjxъ)ђz`lbЛИИа?LNз .œ:uŠ+ЈЋЋыдЉSЌE0ŸЯяээХЇњY*ЧгщtЬЧєїо{ЏЅЅЅНН§ƒ>˜5k§q™О рссС:;Šе'Вfss3K7cЭеjЕH$Ђ?<ЈеjУjфЗ[t:НН=ђЫ ,Г#ЩЮЮ.--нКuЋЁ>Дž"‘ˆ^—XoyLиoГУи‘@жDzГЉЉiгІMссс<iŽt1В9rь=YіD6ЗђВGю8ф0‘AkЎž‡ вШ‰p§њѕЕkзvuu;vŒ>Tй˜žмGžЌ},hoпввтррАgЯž%K–˜;уT*SЩјФb2ъєz}|||}}=ў‘>лЧЇЙѓн0 т6ŠŠЊЈЈ …:N >|x§њѕfХ'вH0‘ЬФєЩњˆBИž6KO‹ &­™Еsš›|}}ЅRimm-§M*˜њžч299999iXJxБљс‡шŸŽŽ&ФНєвK|>Юœ9ѓцЭcf—Ё šииXЙ\трр`RsњYъƒ˜Ъп|ѓЭŽ;шД’žž~шаЁuыжy{{766кйййллГFЪYS$ ‘Xвггsdd„ЂЈ‘‘@`ј_&їCJ Œ™4ˆ‡‡ЧјјИЩ/\И™™ЙsчNУBZЯббQц •–чК’Стmv†іД &в›RЉtѓцЭ)))р§їпЇ/H юи‘вй“лвЊЌBrЧ!‡i,h ѕД2lю@N??ПЁЁЁђђђффd;;;цIЗ1=Y3Ž< XЩœ9sОњъЋ‰‰‰ШШШЗп~лмчххХд$Qž5LГЂЎИИXЁPмЙs‡d\„йЦЌ$@ž?-KПjЕ:88xхЪ•EХЧЧЧФФАв ЩјDКУ№ВШLFL$saњДl{=Йž„9ФмєK’вё™сЁЮAА9ІїOЇЅЅЅЄЄ…ТK—.=z466жdТc~'''YГ(33ѓР‡ЮЩЩС4,**кГgЯМyѓ""",–N/ЌїюнЛ`СŠЂ†‡‡#""ТУУCCCУТТ0z"kd||œўšдЌYГ,ОієїїЯŸ?ŸФ }}}&;”JЅUUU[Жla-/ш,LЇfы-oьўДЩmvAcccŒ=ёв‘5‘оьъъ’JЅ‹/–JЅлЖmУИ6$вёіДfr™5eX#тг‚ 5дг‚a‚tr"LNNвЗЋўљg .ЦфоДН^ётХХ‹ыѕњЎЎЎ„„ГfмЗп~ыууУд7WyГмБzѕъЈЈЈШШH’Ёf$˜љNŠм Зл|bbЂЁЁ!++ЋЎЎnлЖmэээмх&Є;Ј|“оpUЉdф:˜‹˜aZЃЇЙЄ_Т”Ž ХиииЃG^МxQ(ІЄЄЄЅЅСB ˜!ыiŠЂ cbb”JevvЖЩU‹сУаааsЯ=ЧќэххuхЪ•Я?џ<44”^ц"сѓљЃЃЃNNN™™™H?qтФѓЯ?ЯчѓНММЖlйByМКК:''ЧЭЭЭппŸйМˆдYГВВВ  РзззЭЭѕa€ЅRssѓЦљ|~ZZZKK‹enЋ­­]Бb‰AVЌXq§њu|osчЮ?~nnnDD§OZЯдддЦЦF›X>”ƒ7ЇY‚SSSљ|>ѓх9c k"НyшаЁgžy†ЧуЙЛЛџєгOcТ†D:Цžф“‹фБ,Й ф0Э Z–žц“вШ‰pчЮЇžzЊЈЈЈДДд‚•ЩН‰БМIBCCkjj$‰L&;qтDhhшЅK—ЬšqѕѕѕщщщЮЮЮ)))(o–;ГВВvэкхююnВ2IЖБцц4>Y‰Yˆl~эк5‘HTVV&‹/_ОlЎцHw˜Щ\˜`hhh09L+ѕ$пяA˜~ S:2ГГГ•JeLLLaaсрр ѕ3І4˜нW===†яї ŒМЏ4!!ЁЋЋ‹),++[Нz5§{RR§ЅфќYЛv­FЃiooпАaЋR:ыїeЫ–•——їїїпМyѓјёуПјХ/(Šruu§шЃКЛЛ›ššжЌYƒбY“Чуh4šЮЮNжл9X*Эž=ћќљѓчЮѓѓѓГ,SА^ЛkЬ ~~~ЦфћЇЯœ9Cфѕз_?}њ4ЃO^^žVЋ5ќ‚Й•–7v_ЪЌ”В‰ХтŠŠ ­V+“Щ№›‘5‘о\Еj•RЉдщt—/_^ДhЦХШцШБ#Ѕ#э‰1kьфзBŒуX"‡‰ Zr= “ћЄй'вШ‰№ілoЫхrŒ)№a‰IмЪ .T($OW__яррp№рСшшhC„3N$б/UШЪЪЂїˆc”чjNu†9гф cй†$>1ѓUгXўф&jd!ВЙ@ PЋеііі†њ“Ч'вH‘lLPnnЎVЋ5|й rD6б“|=M’~ЩS:&/qБxЦрШЪЪbН‚y;ФтѓйТзzX6ИŸ1у­d8ФbБJЅJNNvuu}иrKKK WУ6ПIA‚ЇЇ'ы{“dЖ!<б&ѓ§Б3Ѕмёѕ|H)нц3р-ГРРДущЇŸўєгO{{{•JЅe{ЭЇХŒлО}ЛЋЋыІM›>љфpњcgКИТŽ`f :.99yЦsttДЕЕѕіэлlžXw@ижУ=яс šzZРж­[8€ЏsрР‹Пјx-oЁп=^ŸNЉˆzМnНh“С&Ѓ“H$нннOк— Ь/2л~бJЫ#ЕMВї#ѓ8сK{Œh*D&|хl<љЩ'ЋцщгЇ% §ћњѕыщSF™ rѓцЭ'NYŸ•„Baaaa[[[џЙsч˜CдоЂмціgН&ЬbБX­V›|_о#Л˜„фEK˜žЩu˜Іыщ‡Ёіtщгт‰`eОќђЫЙsчZcщИкРхО™mЬ]џБц;ЙхЇьBгЖ3hЪц.ђ3ф1ЏG€щ‹§дQЅАА0==нССЧуЅЇЇяоН›.ЇўˆПuы–Щ;В$ьнЛїЛяО[ЕjUDDФћяПŸ””4эм–˜˜X]]mђ|ђЛwяжддМікkгe\Ÿ:<™p'Т‚ *++ћњњ>џќs+Я3D$б?йŠАА0њћJЅвВЅЇЖЪ6ЌљnsЫвр1ЏЇљ|~bbbee%цу)sпТ№Хђ*•*??_ЁPМѕж[˜ўT*еЋЏОš””ЄT*џєЇ?ўя№№№ў§ћщгћЯœ9гззWZZˆ‘NQ”D"бh4mmmЬ}шE‹энЛїЏ§ы§ћї;::6oоLQ”ЛЛћ‘#GДZmUU•ПП?FOdMGGЧ;vЈеъŽŽцЄ(ЄJbБXЁPєїї+ УћjzН>==НЉЉЉЇЇ‡u" —ИИИ+WЎ0" BsхЪ•ИИ8|oЧ_ЗnEQыж­;~ќ8ЃL&ыээ5rфШЁC‡"##щуQ0ЁШqЦМЉзыэььlЛ1fщвЅЕЕЕaaa###єщтф3N$?ОЗЗїЗП§-Ѓ’Б Ц&yд1lоМљ‹/ОpssУŠ$л'ЄххOBИЭЏ^НЮTрёx*•ЪЧЧ‡<>‘ю@‚ŒdcЛ8ш`0<мsйЕ­žфзw|њ5™в‘ЁXYY™˜˜Шчѓa}Ь@‚‚‚ђѓѓ[[[KJJшtIОпCЏзЏ[ЗŽўY__oЌЏЏoUUUUU•‡‡ЋŽO^^НšЏЋЋ[Дh‘““SJJЪйГg1}в—(—ЄЄЄššКАММќнwп 6ЌЙkзЎнЛw;;;gddАЮIaѕ‰Ќ)“ЩN:%‹нннsss1ЭїяпŸŸŸяьь,—ЫїэлgXэиБcžžžЌAqikk3|ЎŠ1ˆP(lыј IDATkk3щ_ЅRщттЂT*™5zНžж3//9ЭJЫу!<€%hяоНyyyД=ёћ=5‘оlkk[Кt)Ч *..ЦИй9vЄtЄ=1І#ЂЭ*$wr˜Ш %згЪАAК9о{я=њу1е*• Ѓ'wЦ‘'kHHHаs0kЦЫхrggчмм\|$#‡Iutчє ///“CCfГЖa˜œяЦђ'Ё nsЙ\.•J™ Ы—/ЇŸЧ'вЦђЖБHц b‚су?6w˜цъiqbСЄ_’”Ž ХИИИ’’’жжжќќ|“;<`:qђфЩккZЉTjxъ)ђz`lbЛИИа?LNз .œ:uŠ+ЈЋЋыдЉSЌE0ŸЯяээХЇњY*ЧгщtЬЧєїо{ЏЅЅЅНН§ƒ>˜5k§q™О рссС:;Šе'Вfss3K7cЭеjЕH$Ђ?<ЈеjУjфЗ[t:НН=ђЫ ,Г#ЩЮЮ.--нКuЋЁ>Дž"‘ˆ^—XoyLиoГУи‘@жDzГЉЉiгІMссс<iŽt1В9rь=YіD6ЗђВGю8ф0‘AkЎž‡ вШ‰p§њѕЕkзvuu;vŒ>Tй˜žмGžЌ},hoпввтррАgЯž%K–˜;уT*SЩјФb2ъєz}|||}}=ў‘>лЧЇЙѓн0 т6ŠŠЊЈЈ …:N >|x§њѕfХ'вH0‘ЬФєЩњˆBИž6KO‹ &­™Еsš›|}}ЅRimm-§M*˜њžч299999iXJxБљс‡шŸŽŽ&ФНєвK|>Юœ9ѓцЭcf—Ё šииXЙ\трр`RsњYъƒ˜Ъп|ѓЭŽ;шД’žž~шаЁuыжy{{766кйййллГFЪYS$ ‘Xвггsdd„ЂЈ‘‘@`ј_&їCJ Œ™4ˆ‡‡ЧјјИЩ/\И™™ЙsчNУBZЯббQц •–чК’Стmv†іД &в›RЉtѓцЭ)))р§їпЇ/H юи‘вй“лвЊЌBrЧ!‡i,h ѕД2lю@N??ПЁЁЁђђђффd;;;цIЗ1=Y3Ž< XЩœ9sОњъЋ‰‰‰ШШШЗп~лмчххХд$Qž5LГЂЎИИXЁPмЙs‡d\„йЦЌ$@ž?-KПjЕ:88xхЪ•EХЧЧЧФФАв ЩјDКУ№ВШLFL$saњДl{=Йž„9ФмєK’вё™сЁЮAА9ІїOЇЅЅЅЄЄ…ТK—.=z466жdТc~'''YГ(33ѓР‡ЮЩЩС4,**кГgЯМyѓ""",–N/ЌїюнЛ`СŠЂ†‡‡#""ТУУCCCУТТ0z"kd||œўšдЌYГ,ОієїїЯŸ?ŸФ }}}&;”JЅUUU[Жla-/ш,LЇfы-oьўДЩmvAcccŒ=ёв‘5‘оьъъ’JЅ‹/–JЅлЖmУИ6$вёіДfr™5eX#тг‚ 5дг‚a‚tr"LNNвЗЋўљg .ЦфоДН^ётХХ‹ыѕњЎЎЎ„„ГfмЗп~ыууУд7WyГмБzѕъЈЈЈШШH’Ёf$˜љNŠм Зл|bbЂЁЁ!++ЋЎЎnлЖmэээмх&Є;Ј|“оpUЉdф:˜‹˜aZЃЇЙЄ_Т”Ž ХиииЃG^МxQ(ІЄЄЄЅЅСB ˜!ыiŠЂ cbb”JevvЖЩU‹сУаааsЯ=ЧќэххuхЪ•Я?џ<44”^ц"сѓљЃЃЃNNN™™™H?qтФѓЯ?ЯчѓНММЖlйByМКК:''ЧЭЭЭппŸйМˆдYГВВВ  РзззЭЭѕa€ЅRssѓЦљ|~ZZZKK‹enЋ­­]Бb‰AVЌXq§њu|osчЮ?~nnnDD§OZЯдддЦЦF›X>”ƒ7ЇY‚SSSљ|>ѓх9c k"НyшаЁgžy†ЧуЙЛЛџєгOcТ†D:Цžф“‹фБ,Й ф0Э Z–žц“вШ‰pчЮЇžzЊЈЈЈДДд‚•ЩН‰БМIBCCkjj$‰L&;qтDhhшЅK—ЬšqѕѕѕщщщЮЮЮ)))(o–;ГВВvэкхююnВ2IЖБцц4>Y‰Yˆl~эк5‘HTVV&‹/_ОlЎцHw˜Щ\˜`hhh09L+ѕ$пяA˜~ S:2ГГГ•JeLLLaaсрр ѕ3І4˜нW===†яї ŒМЏ4!!ЁЋЋ‹),++[Нz5§{RR§ЅфќYЛv­FЃiooпАaЋR:ыїeЫ–•——їїїпМyѓјёуПјХ/(Šruu§шЃКЛЛ›ššжЌYƒбY“Чуh4šЮЮNжл9X*Эž=ћќљѓчЮѓѓѓГ,SА^ЛkЬ ~~~ЦфћЇЯœ9Cфѕз_?}њ4ЃO^^žVЋ5ќ‚Й•–7v_ЪЌ”В‰ХтŠŠ ­V+“Щ№›‘5‘о\Еj•RЉдщt—/_^ДhЦХШцШБ#Ѕ#э‰1kьфзBŒуX"‡‰ Zr= “ћЄй'вШ‰№ілoЫхrŒ)№a‰IмЪ .T($OW__яррp№рСшшhC„3N$б/UШЪЪЂїˆc”чjNu†9гф cй†$>1ѓUгXўф&jd!ВЙ@ PЋеііі†њ“Ч'вH‘lLPnnЎVЋ5|й rD6б“|=M’~ЩS:&/qБxЦрШЪЪbН‚y;ФтѓйТзzX6ИŸ1у­d8ФbБJЅJNNvuu}иrKKK WУ6ПIA‚ЇЇ'ы{“dЖ!<б&ѓ§Б3Ѕмёѕ|H)нц3р-ГРРДущЇŸўєгO{{{•JЅe{ЭЇХŒлО}ЛЋЋыІM›>љфpњcgКИТŽ`f :.99yЦsttДЕЕѕіэлlžXw@ижУ=яс šzZРж­[8€ЏsрР‹Пјx-oЁп=^ŸNЉˆzМnНh“С&Ѓ“H$нннOк— Ь/2л~бJЫ#ЕMВї#ѓ8сK{Œh*DцУжО_<y–ќхh˜NNŸ>-‘HшпзЏ_OŸ2Ъd›7ož8q"((ШњЌ$ лккњћћЯ;ЧЂі№х6З?ы5aЦ‹ХjЕкфћђйѕР$$/ZТєLЎУ4]O? ЕЇKŸO›(ѓх—_Ю;зkLЧЅўу(ї}”Шlcюњ5пЩ-?ešЖAS6w‘Ÿ!y=т4К,ьЇŽ*………щщщ</==}їюнt9}№G||ќ­[ЗLо‘%aяоНп}їнЊUЋ"""оџ§ЄЄЄiчЖФФФъъj“ч“пН{ЗІІцЕз^›.уВјдqрЩ„;,XPYYйззїљчŸ[yœ!"‘ˆ>јЩV„……бЏиW*•–(=еАUЖaЭw›[€”y=Эчѓ+++1Ÿ™ћ†/–WЉTљљљ …т­ЗоТє?00 RЉ^}ѕеЄЄ$ЅRљЇ?§Щ№‡‡‡їяпOŸоxцЬ™ОООвввРР@ŒtŠЂ$‰FЃikkcюC/ZДhяоН§ы_япПпббБyѓfŠЂмнн9ЂеjЋЊЊќ§§1z"k:::юиБC­Vwtt0'E!U‹Х …ЂПП_ЁPоWгыѕщщщMMM===ЌaИФХХ]Йr…љiš+WЎФХХс{;~ќјКuы(ŠZЗnнёуЧ}d2YooЏсVZЩ#jй“>Є@&“сћGжDz3&&цђхЫ ЏПў:ЦХШцzНž;vЄtcіф67fdB0Т9Ldа’ы‰ ю#nŸHw 'Bpp№‘#G:I‚ EюŒ3цMН^ogggлЧЭK—.­­­ ЁO'Ÿq"‘шќљѓНННП§эo•Œe0ю0ЩЃŽaѓцЭ_|ё…››~P$й†< -o,Тm~ѕъе№№pІЧSЉT>>>фё‰td$лХAƒсс>˜ЫЎmѕ$ПОугЏЩ”Ž ХЪЪЪФФD>Ÿы3`”ŸŸпккZRRBЇKђ§zН~нКuєЯњњzc­h|}}ЋЊЊЊЊЊ<<УjЧŽ №єєd ŠK[[›сsUŒA„Ba[[›Iџ*•JЅRЩьЈбыѕДžyyyЬбhVZс),A{їюЭЫЫЃэ‰пяЌ‰єf[[лвЅKy<^PPPqq1ЦХШцШБ#Ѕ#э‰1љmV!ЙуУD-ЙžV† вШ‰№о{ябщЈVЉT=Й3Ž< XCBB‚žƒY3ЎИИX.—;;;чццт#9LђЈЃ;Їopxyy™2л˜Е Уф|7–? q›ЫхrЉTЪTXО|9§м€<>‘ю0–ЗE2W ќБЙУ4WO‹ &§’Єtd(ЦХХ•””ДЖЖцччГvxТ~`zsђфЩккZЉTjxъ)ђz`lbЛИИа?LЮ .œ:uŠ+ЈЋЋыдЉSЌE0ŸЯяээХЇњY*ЧгщtЬЧєїо{ЏЅЅЅНН§ƒ>˜5k§q™О рссС:;Šе'Вfss3K7cЭеjЕH$Ђ?<ЈеjУjфЗ[t:НН=ђЫ ,Г#ЩЮЮ.--нКuЋЁ>Дž"‘ˆ^—XoyLиoГУи‘@жDzГЉЉiгІMссс<iŽt1В9rь=YіD6ЗђВGю8ф0‘AkЎž‡ вШ‰p§њѕЕkзvuu;vŒ>Tй˜žмGžЌ},hoпввтррАgЯž%K–˜;уT*SЩјФb2ъєz}|||}}=ў‘>лЧЇЙѓн0 т6ŠŠЊЈЈ …:N >|x§њѕfХ'вH0‘ЬФєЩњˆBИž6KO‹ &­™Еsš›|}}ЅRimm-§M*XOSвѓ\&''''' KBCC Ѓќ‡~ ::šївK/ёљќ9sцЬ›7™]†‚hbccхryHHˆƒƒƒIЭщgЉ<`*ѓЭ7;vь гJzzњЁC‡ж­[чээнииhgggooЯ) dM‘H444DbIOOЯ‘‘ŠЂFFFс™мm8(@066fв ууу&;МpсBffцЮ; i=GGG™[€VZžыJ‹Зйкг‚šHoJЅвЭ›7ЇЄЄ‚їпŸО ]l,lИcGJ7fOns HЋВ Щ‡ІБ %дгЪАAК9ќќќ†††ЪЫЫ“““эьь˜'нЦєdЭ8ђ$`%sцЬљъЋЏ&&&"##п~ћmsgœ——S“Dyж0ЭŠКттb…BqчЮ’qfГ’yўД,§Њеъррр•+WRУJƒ&ущУЫ"31‘Ь…щгВэѕфzцsг/IJЧg†‡:Рц˜о?–––’’" /]КtєшбииX“MљœœdЭЂЬЬЬ>|8''гАЈЈhЯž=ѓцЭ‹ˆˆАX:НАоЛwя‚ (ŠŽˆˆ Уш‰Ќ9<<@bёёqњkRГfЭВјкгпп?ў|ƒDDDєѕѕ™ьP*•VUUmйВ…ЕМ Г0š­ЗМБћг&Зйa1іФKGжDzГЋЋK*•.^МX*•nлЖ уbLиHЧлгšЩeж”aˆ;L ‚жPO †iвШ‰099IпЎўљчŸ-И“{гєz§Х‹/^ЌзыCBBКККЬšqп~ћ­Sп\хЭrЧъеЋЃЂЂ"##I†F˜m`ц;a(r/(мBnѓ‰‰‰†††ЌЌЌКККmлЖЕЗЗs—›xю ўёMzУU)&’‘7nш``>.b†ižцB’~ S:2ccc=zётEЁP˜’’’–– 5`†ЌЇ)Š,,,Œ‰‰Q*•ййй&W-†W8 CCCЯ=їѓgttД——з•+W>џќѓааPz™‹„ЯчŽŽ:99effZ §Ф‰Я?џ<ŸЯїђђкВe §хёъъъœœ777fѓ"ROdЭЪЪЪ‚‚___777ж‡–JЭЭЭ7nфѓљiii---–Й­ЖЖvХŠ$YБbХѕызёНЭ;wўќљЙЙЙє7>ih=SSSmbљPмœf jllLMMхѓљЬ—чŒЌ‰єцЁC‡žyцчююўгO?a\Œ щ{’O.’ЧВф‚У47hYzš;LHw 'Т;wžzъЉЂЂЂввR NT&ї&Цђ& ­ЉЉ‘H$2™ьФ‰ЁЁЁ—.]2kЦеззЇЇЇ;;;ЇЄЄX МYюЬЪЪкЕk—ЛЛЛЩЪ$йЦš›гјPd%jd!ВљЕkзD"QYY™X,О|љВЙš#нan$sa‚ЁЁЁСф0­д“|Пaњ%LщШPЬЮЮV*•111………ƒƒƒАJf8˜нW===†яї ŒМЏ4!!ЁЋЋ‹),++[Нz5§{RR§Ѕф”^Лv­FЃiooпАaЋR:ыїeЫ–•——їїїпМyѓјёуПјХ/(Šruu§шЃКЛЛ›ššжЌYƒбY“Чуh4šЮЮNжл9X*Эž=ћќљѓчЮѓѓѓГькЬzэЎ1ƒјљљi4“яŸ>sц =з_§єщгŒ>yyyZ­ж№ цVZои})Г^PЪ$‹+**ДZ­L&Уo DжDzsеЊUJЅRЇг]О|yбЂE#›#ЧŽ”ŽД'ЦtЌБ“_ 1Žc§‰&2hЩѕ46Lю“ dŸHw 'ТлoП-—Ы1ІР‡%& p+/\ИPЁPX>>ООО7‡‹угмљЮ„"Й nѓЈЈЈŠŠ ЁPЈгщСсУ‡зЏ_oV|"нЩ\ALŸЌѕсzк,=-N,˜ДfжЮinf 0’œŒЙH!зг$S™*1ІC^ IF3Ђeтїџч'ŠЂўўї {{;ЫФќŸ{?ЬЋЅ(*і_чрkюлІ.?x№РСС)МpсТ‹/ОxсТІD*•>ћьГЇOŸnooЇŸ”aр6gd ­,оfчщщ922BQ§гмšоооЗnнъььdЖБ"Ч.‰†††X}"›#Œ”SЃгщДZ­ГГ3оGЦ 5YhLф0 •7t(Ёžфв‘}bB‘`~~~CCCyyyЩЩЩ‡ЖГГУыЩкйoЬ›6gЮœ9_}ѕеФФDddd{{;SNytt”Й…4——S“Dyж0ЭŠКттт+WЎмЙs‡d\ууу›ХмљN˜[№Эеjupp№Ъ•+)ŠŠ‰‰љУў`V|"нAЁvb˜•T™>- Er= sˆЙщ—$Ѕ›•ЬиD0v‘BB2eЉяnФZ9eр ZOлzU№ѓЯ??<RЉДЊЊjЫ–џw‡ЛЋЋK*•.^МX*•nлЖЭмц4§§§ѓчЯ'Й9MaЗйq ђџ>œŒб_;›5k^ Вц№№pDDDxxxhhhXXfьУУУЌ>‘ЭЩЅэйГgоМyФС`Щ3rAШaŽ3ЪfC=-І!˜Pdифф$}шчŸžœœ4WЙ7­AЏз_МxqётХzН>$$ЄЋЋ+!!Y аыz1dЬtп~ћ­Sп\хЭrЧъеЋЃЂЂ"##I†бззg™Y0ѓ0'''ЙŸыX…мц YYYuuuлЖmkooч.7ё нAЏGi,KЊL0Œ™І5zš=2~ SКЙ™$РŒMc)’ЄŠд™*1}"Џ†жL€ѕ4)ЙџХgЖЛЋ‡гПФPUѓ™‰/Й[ќНЙsчЮŸ??777"їОјі IDAT"т—Пќ%]xшаЁgžy†ЧуЙЛЛџєгOЌЩц4ЕЕЕ+VЌАьО”Б›Uccc†YЉББ155•Яч3п1ВfuuuNNŽ›››ПП?Гi9іЪЪЪ‚‚___77ЗœœLsrщ|>ttдЩЩ)33“ФSЌБ#н,$„fssѓЦљ|~ZZZKK‹Йzš;L˜Pdи;wžzъЉЂЂЂвввŒŒ s‘{cy“„††жддH$™LvтФ‰аааK—.бџE955ЕББcКњњњєєtggч”” ”7ЫƒƒƒYYYЛvэrww7YyХŠзЏ_З, ОѓŠCCCЯ=їЋ>ЋйќкЕk"‘ЈЌЌL,_О|й\Э‘ю07’Й0Сааа`r˜VъIОпƒ0§Іts32РXz"'ц"…LЊ$S™*1}"Џ†жL˜™ыiцЕгжМš…КюЯёЦюќvIBјй=_ШНњF’——wьиБ‰‰‰cЧŽ1ЏDИrхЪЁC‡z{{333YЩ‚Є9“q~ѓ›прПŸnюЭщ'Nќїџ7“C‹ŠŠЂЃЃ;::ўўїП›МЧР­Y\\ьээ}уЦ …Bёх—_bЦОoпОЏПўњЪ•+ЭЭЭЬ1dsrщяПџ~IIIcc#сѓtжиЩ!„faaс /МаееЕ`С‚їпп\=‘вЩпX‚ EV€544ЌYГFЁPDDDlоМ™фЕ!фоЄ(jсТ…Ќ=ж„№xМЙsчЖЗЗ/]К”u)ЕГГышшˆŽŽ.**ҘюУ?\А`FЃЙџ>sž\ysЃnpp№РяМѓОšŸŸпo~ѓ›ŠŠ lBxsŠ{іь9~ќ8+–X…Шц п~ћmCCУииГйƒ<>‘ю dŒ ŽŽŽ_|ёУ?ФŒшaшI>Е‘i<Ѕ›Š„†œ˜‹IREъ‰L•˜>ЙWCkІ &n0?ьѓ=YYYЌW%А |­‡<о“џІ гнJ†&‹U*UrrВЋЋыУ–[ZZm[G˜лФгггВ5§УраЁCŸh“љўи™RюxŒz>Є”Ž Ащ›ЌfЦˆ€™ЩгO?§щЇŸііі*•JЫіš?Ц6ф•ЗoпюъъКiгІO>љœўи™.ю€А` т&€Љ†NЇKNNžёУmmmН}ћЖлФ'ж6№ЈлОНnћv[U€ЉЬ?эŸЖЗГћџ–/џЭЏ~EQTuwїЁЋWё/$ЊлО=ЎА[nXјЮўг[Bнп<Удѕ40ЃжгЏП№BЗї›GRЕуеWŸ{ЎМ­Э&b˜e4Ь$ўщ<—ЅПќхЉ††БяПћўћџjl\6w.EQ†w‘™п™нАmx’љЇћгAГf§yx˜ў§Яп|ѓ‹YГŒ5ЃwtляСх›Soў}тчЊЖлВџнњЗqG’ї Sk=эТу§xџ>§ћИ89Y/€ощсщъTєцѓЩ1avЕљX3и˜ќг~‡йЄqНї/•л–Э оНїУGЛп+яЃ€Qр|DxаыѕzНўБШЕО‰DвннmAWeШS< —Їщиx2БG–†††NгeЧж­[пxу “њгšъЌf“БПјт‹eee$…ЦJ$оьъъrwwgўtwwящщ1w‘œœLџОaУFЈ^Џяьь<{іlttД5~‡…>ЬЈѕєєE$iЕкщЂэж­[їяпORXiЅПќх/^^^ ЇЇG xyyŒŒ˜+єЕз^Ѓљз§WУŸ111ЇOŸў№УЃЂЂ†ђLяѕДс-1Тлc|>?11БВВ’ЂЈРРР3gЮєѕѕ•––тJ$Fгжжo(4==НЉЉЉЇЇ'==Ђ(wwї#GŽhЕкЊЊ*Іš^ЏЗГГГxП W:г•aŸЦ e2YoooyyЙЏЏ/‰И_|‘ЂЈ7n Џ^НЮ”ѓx<•JеййIXшѓsу AіщууƒД'EQєˆ***Фb13L•J•ŸŸЏP(оzы-КP,———їііЪd2ќwьиЁVЋ;::6nмШ4W(§§§ …/o:C*++љ|>SrчЮYГfНќђЫ.../ПќВЗЗ7Нž&;EQ?ўјcXXXXXиН{ї Х_Нzѕ?џѓ?џу?ўУиD@*œ\х˜оыi мmAAAљљљ ЏМђЪ(ŠњєгOKJJ~§ы_здд|№Сј_zщЅ?ќ0//ЯАќљчŸу7/^ќђЫ/SЕmлЖ‘‘‘ TWWoџЧЁ0ЁЁЁД>Ь/&5gUуJGіiLЃЃуТ… U*UnnЎБ$†рoNзжж._Оœ)Нuы–BЁ ,цJDі9<<ŒД'3ЂжжVCwьнЛ7==НВВrѕъеtI~~ОJЅZИpЁ““оьйййЁЁЁЋV­ZВd Гт—ЫхэээЯ>ћlggЇЁtЎ cІуzѓРЏМђJCCC~~~PPѕћгЫ–-ыщщYО|9sкЌБ_Иp!11qЭš5хххмбЉTЊ_џњзЦ&Ryфьр*™qLEXы?ђћг'OžЌ­­•JЅШД|>ПЗЗ/згг“Ђ(Їгщ Ы яRuуЦ КФУУCЃбр•gaЎtŠlџД^Џ‰DE‰D"•JeВЙЩгQQQBЁPЇг ‚У‡Џ_ПžМ9Fc5‘і4Q[[SшттBџ  еj5]гзз!ЭЭЭСССЌBУцjЕ#c:$ОООRЉДЖЖіфЩ“™™™oМёFGGGttєЭ›7пxу z§jжи…Ba}}}uu5ŸЯчnкvpp #3Œ)Я†ЪCR€ЉŒЃЭ{œœœœœœdўŒ•Ых!!!&лвб>>&&fчЮїюн#,DjˆьcOfDє' š~јўщшјƒЧгг“ЎirGВH$b6AгaЂ‘ўх/љЫЊUЋўњзП~љх—п}їнПќЫПєѕѕ™;іБББ[Зn§ЯџќЯO?§Ф•хссёнwп›HхёГлL›ѕєФФ„““г§ћїНННё5гвв‚‚‚о|ѓЭK—.utt|ійgзЏ_/**њня~WWWчффдееeЅ‡‡‡—.]њу?šЌЩН]jйs;;;ют†Uшээ=<<ьххХк\ЫПsš1{CCCVVV]]нЖmлкллGGG)Š"/DКYг˜=™a†366Fзœ5k–IЧ|§ѕз†…уууLѓёёq LЧ%66V"‘DFFVUUЅЄЄ .^МјЅ—^њЏџњ/ŠЂZZZ^{эЕІІ& ЦўяџўяЦ„>џќѓ7oо46Ъ#gWyШS0•1НњіэлЋWЏvvvNKK3YyppААА0&&FЉTfggSХчѓGGGœœ233mЅtuuuNNŽ›››ППџž={05C9X nlllўќљ& 7nмШчѓSSSёkzТзz\ЛvM$•••‰ХтЫ—/›[ˆ”ŽЌiЬžЬˆ0іillLMMхѓљЬW IЏЌЌ,((№ѕѕussЫЩЩЁ ›››iAiii---x_оœЮЮЮV*•111………єzє/љ‹НН}}}=НžЖЗЗЇя@[9vЅK—юмЙГЄЄФиD@*œ\хЭ§аРc†uy^ЖlY[[›FЃ‘H$\ЙзЎ]Ћбhклл™Wі’ШХoкvuu§шЃКЛЛ›ššжЌYc“ЕFbFFFOOОPЏзчххiЕZж!ИН‘ПsZ Јеj{{{FУь!!/DJGжDкSЏзчццjЕZУ7–nfz‹ХZ­V&“a6пSХуё 4Mgg'§ЊŠЂfЯž}ўќљsчЮљљљaYѓfn—ююnњ“bБXЏз/YВФ‚БГ†FoЪПyѓцйГg-Z„QЉ<љь0ЦТ…  d-˜rыiИуeх‡ eeeєs“…РLВвCRОДД”ќ€™Аžfі`Р2lВY"‘tww[аеВњ‘ s кгф™SDљ'p/ОM^Ы ЋіOOС‹йж­[пxу “я™цmю0m2vђwчЭ< ‡IшќЫјL‚yЙХоtppxчw:;;;;; ьээћ<выѕŸ|ђ yч„0XL‘5Зeo”ЗxnТА§zz "‰ДZэtб–№`—ЩЬfjjъSO=КaУ†Љ еЏ~ѕЋЇžzЪЖ}Z.Ф'№фЎЇёGЋ сѓљ‰‰‰•••Ež9sІЏЏЏДД400пP"‘h4šЖЖЖјјxCЁщщщMMM===є! юююGŽбjЕUUUўўўL5Н^ogggё§3ЎtІ+У>ЪdВоо^У@№рOПzѕjxx8SЮуёT*Ugg'aЁљ0ХbБBЁшяяW(Ьa4zНщ.ŽŽŽ;vьPЋе†G$bš›<6МВВ211‘Яч›4#2ШЃщMМђ†$$$ьнЛwddddddпО}ЏОњЊYІ#—ŽІ1NŸ>Нiг&“V2&caУMHƒˆХтђђђоо^™LЦ4A:3 Yj[Щ†Щ ŸH•ШуЬ^OхччЗЖЖ–””ФХХQUWWЗhб"''Ї”””ГgЯтЩd2—ЄЄЄššУђcЧŽxzzв=ькЕkїюнЮЮЮ4І­ЙУDJЇШі{шѕњќќ|ggчММ<жшЦTТь"—ЫЅR)SО|љђввRђBГ†Йџ~ZyЙ\Оoп>“a!“ЩN:%‹нннsssIš›<м$..ЎЄЄЄЕЕ5???((Ш˜Э&ъНI8іЎЎ.WWWњw77З›7ošеœ\:&цЙНЙККоИqƒ^PbЌD>euŒ… r=ЭбоН{ѓђђшцLM+gV$s“2>‘*!уlАž>yђdmm­T*Eо хѓљНННxAžžžEёx<NgXЮ:С›Y+xxxh4ќв„pпЇ1щф+0‘HDQ”H$RЉT&››м9UQQ! u:@ 8|ј№њѕыЩ ЭІZ­І•їѕѕUЋе& ТЂЙЙ988˜мžф;S}}}ЅRimmэЩ“'y ШЈ#є&сиoнКeggGџnooыж-Гš“KЧ“л[NNŽ\.gў$Ÿ2ЦњфZоXи згјЈуъ`™уШ#“ЌŒХ'W%V|O,Ž6яqrrrrr’љ366V.—‡„„888˜l{яо=ŠЂ§‡?ќљbЂЙЭIІ’БАБ ъ,K\ЬdVВBЦ'^%ЫŒ ЩЇ!0“YЖlY[[›FЃ‘H$\жЎ]Ћбhклл7lи€onlЃ6З•ЋЋыG}днннддДfЭ›\З0322zzz№…zН>//OЋеVTTАv{Г’ПsZ Јеj{{{FУь!!/DJGsіьйчЯŸ8wюœŸŸŸIƒАўфёxІГГ“~‹БцVОZЉ20Q—аееeв›„хррАkзЎ›7oоМyѓwоЁЗ˜ћJщ˜˜ЧXщаЁC+aТ›d9ˆ dК@ŽH,WTThЕZ™LЦbЧЕї{цF2Щ4$Я`ЦXИpЁBЁ€K ˜ЗдУ€­ЧЬц &ёiHiiitt4˜lЖžfі`Р2lВY"‘tww[аеВšфЙщ V*oђрЬ'0BР `VэŸž‚д­[ЗОёЦ&п3MkNЈџУ[ƒ’П;xHVТМ•м\ЊD`ЬтЕЛЛћдЉSєБэШBŠЂЂЂЂ …VЋmii‘H$„сjю(lhл:–яРЬ\OOAD"‘VЋ.кьšn:ЊDЏ_ЃЂЂЎ_П~рРc…ХХХћїяійgзЏ_Я=’Sw=mюсEёљќФФФЪЪJŠЂЯœ9гззWZZˆo(‘H4M[[[||МЁаєєєІІІžžњИwwї#GŽhЕкЊЊ*Іš^ЏЗГГ#|ZM"щЪАOc…2™ЌЗЗЗММмзз—DўдёЋWЏ†‡‡3х<OЅRuvvњјј“KыЩ:w†[шшшИcЧЕZнббax‚dMБXЌP(њћћ ы€ГlѓцЭ_|ё…››Цt\Ч!#„<‘.6ŸoRYY™˜˜Шчѓ‘џћ§їпійgO?§ДБBЉTњс‡677пПџЯўГL&3+ЖНA “вJHщx+Рt]Ocр> ЪЯЯohhxх•WшћjŸ~њiIIЩЏ§ыššš>јпa``рK/Нєс‡цхх–?џќѓoМёЦтХ‹_~љeŠЂЖmл622В`С‚ъъъэлЗ3Ъањœ+ЮдФKGіiLЃЃуТ… U*UnnЎБѕЂ!ј›гЕЕЕЫ—/gЪcccoнКЅP( ‡‡‡1Ып… ЖЖЖ™[˜КjеЊ%K–`VчЦjЪхђіііgŸ}ЖГГ“ё‘Й$%%­YГцпўэпўіЗПaLЧu2BШCб˜‹Х'W%n€8pр•W^ihhШЯЯ b5wssKKKыээ5VјмsЯ555Yyђdmm­T*Eо хѓљЌхWЇЇ'EQ<OЇг–ГnpоИqƒ.ё№№аh4˜ХЋžƒЙв)В§гzН^$Q%‰T*•Щц&wNGEEUTT…BN'>М~§zђBЬ0=лкк0…ЭЭЭЬЮ]<ШšjЕšюгззW­V›`zН>>>ОООžЙ™jЬtHЧa"Š„.6!ф{‚}}}ЅRimmэЩ“'YёЉбhžyцc…:Ž>ЬœdТrcў‘“ŒY‰+e%˜т8кМЧЩЩЩЩЩIцЯииXЙ\BВИwяEQ<`UО{їЎсŸоооvvvііі†ВȘuOۘtBFFF(Š …&u0ЙsZ­VЏ\Й’ЂЈјјј˜˜˜;wоЛwАDOz1dЌP$ ‘ Yггг“юsddD X`Яттb…BqчЮ“Іу:!f…ЂYbжž`VФвссццЖaУ†;w&''# ЧЧЧ=<<ЦЦЦHD†Н~Фa%Є•№вёѓІІї{LLL899б—d|ЭДДД””ЁPxщвЅЃGЦЦЦRUTTДgЯžyѓцEDDиJщсссˆˆˆ№№№аааАА0LMђћгxьььLвіёђђЂ№;ЇГ744deeеееmлЖ­НН}tt”М#бгpqЦ- tЗцјј8нчЌYГЦЧЧ-АеЋWGEEEFFš4a„`BqrrЙЊCњФ›\bcc=zётEЁP˜’’’––fјПћлпNœ8БpсBc…цЅ—^Вэ”yA&Є•вёV€щКžО}ћіъеЋIЎmƒƒƒ………111JЅ2;;›Ђ(>Ÿ?::ъфф”™™i+ЅЋЋЋsrrмммќ§§їьйƒЉЪСqcccѓчЯ7YИqуF>ŸŸššкииШZгГкОжукЕk"‘ЈЌЌL,_О|ймBcв=0…•••ОООnnn999˜>‘5›››щ>гввZZZ0Эирр`VVжЎ]ЛмннёІ#ŒL( qп˜є;B•ВГГ•JeLLLaaсрр ы]\\$ ы6Пaс‘#Gђђђ/^Ьуё‚‚‚ŠŠŠЌŸ2Щ м$€ДR:оJШЬФ€iРВeЫккк4D"БръЕvэZFгооОaУ|scЕЙ­\]]?њшЃюююІІІ5kжифњŠ‘˜‘‘бггƒ/дыѕyyyZ­–ѕо noфяœjЕкоо^Ѓб0{HШ ‘вѕz}nnЎVЋ5| ВЧуh4šЮЮNњЕ*ЦњDжœ={іљѓчЮ;ччч‡iŽ 0ц—eЫ–1ы?Є•ŽCF&КККLʘ+ШЪз`3MzzzЪЪЪцЮkЌЂЈшшшѓчЯгяŸ^Зnс“ўѓ‘ вJф™С .T(Ђ`†@О (++ЃŸ}›,ІО•РqбJЅЅЅббб`^xтжгРLТLX†MЖ K$’ююn К‚ а\ƒpпиmЅ•Шпю#Ќ€Oo_~љ%ѓ~1“В_јErіВeП;oŠИи&Ѓ&<ЫњбжађмСpўќљsчЮ1:88МѓЮ;іііfШ&ІxxFЖ8>aљЬTfкїE"‘VЋ.кьŸŸ˜˜XYYIQT``р™3gњњњJKKё %‰FЃikk‹7šžžодддггCтюю~фШ­V[UUхяяЯTгыѕvvvŸ+Ю•ЮteиЇБB™Lжллkx$ ќЉуWЏ^ gЪy<žJЅъьь$,єёёA EКC,+Šўў~…BС:Œ†ФJHwћiOcСРХббqЧŽjЕКЃЃcуЦ˜щѕz•J•ŸŸЏP(оzы-wRYY™˜˜ШчѓYхQQQ7oоМyѓц /М@—$$$ьнЛwddddddпО}ЏОњЊѕѕбйp#­„”nЬJ№Dcжz:(((??ПЕЕЕЄЄ$..ŽЂЈКККE‹999ЅЄЄœ={/H&“ЙИИ$%%едд–;v, Ргг“юaзЎ]ЛwяvvvЮШШ8x№ 1mЭ&R:EќшŸплл‹фщщIQЧгщt†хЌ›І7nм K<<<4 ~ХРТ\щфK‘HDQ”H$RЉT&››м9UQQ! u:@ 8|ј№њѕыЩ MКеаjЕšVоззW­Vу‡ЩЕЦHПкг˜;X477Г ‘#выѕ...єЯ“ю@тыы+•JkkkOž#UVVјњњКЙЙхфф˜Т›гйййJЅ2&&ІААY&.ZДшнwпЅCынwп]ДhEQ—.]кКuЋЗЗЗЗЗїж­[/]Кєbў!™;‹‘VBJGZЩЌOМ0YЖlY[[›FЃ‘H$\ќжЎ]Ћбhклл7lи€onlЃ6З•ЋЋыG}днннддДfЭ›\ž1322zzz№…zН>//OЋеVTTАv{Г’ПsZ Јеj{{{FУь!!/DJGКcіьйчЯŸ8wюœŸŸŸa§„„„ЎЎ.Уar{FКуwVŸЦьilЌ?y<^AAFЃщььЄ_џblD†Џ”f:БђmпзЏ_ Ѓ Ѓoх:88ькЕ‹~щЧ;яМCo‡0k+?їЯGld’ј$ŸкЦXИpЁBЁ€ €yыxќk=€G Xў1ZЉДД4::Ь €yыi`&a&,У&[%IwwЗ]=Q m;XЬ +№ЏцАЁ 'й›Лус wРc[–YпЩ—_~9wю\BY„я {xKђwчM}?Вuе#XИ[яMn€с—Є6_˜Zа›eF{xюАx"РђxФ8ЮАёˆD"­V;]Д%<и˜ОоФC0Єзы-;iь Sгяї ?єЯч'&&VVVRxцЬ™ОООвввРР@|C‰DЂбhккктуу …ІЇЇ755ѕєєаvИЛЛ9rDЋеVUUљћћ3еєzНс­>щLW†}+”ЩdНННхххООО$т№ЇŽ_Нz5<<œ)чёx*•ЊГГ“АаЧЧט7Йібыѕ„cGZž\вШШ>ЩУЉЇБXB| ttмБc‡Z­юшшиИq#](‹ EПBЁ`Nчбыѕ*•*??_ЁPМѕж[$о4ЄВВ211‘Яч“ЬИGц.о†щiOЄtr{РуЧЌѕtPPP~~~kkkIII\\EQuuu‹-rrrJII9{і,^L&sqqIJJЊЉЉ1,?vьX@@€ЇЇ'нУЎ]Лvяоэььœ‘‘q№рAcкš;LЄtŠј‰v~~ОГГs^^ыtc*сv‘ЫхRЉ”)_О|yii)y!ЙOЭЛЙ–'„ь6 B"“ЩN:%‹нннsssщТ§ћїго”Ыхћіэcњ\Зn§ГООžФ›†ФХХ•””ДЖЖцччMwpџ|”юрІ Є=‘в1і€iМž>yђdmm­T*Eо хѓљНННxAžžžEёx<NgXЮ:СћЦt‰‡‡‡FЃС_ШЩyFJ'_1ˆD"ŠЂD"‘JЅ2ймфЮщЈЈЈŠŠ ЁPЈгщСсУ‡зЏ_O^hюzšpьф–'„щ6 BвммќџЗwїAQ}РЛg—]о\^P#бKbЅžœ&1hyEJАLЉЯc"QIтI”в,9RŠoQыruMЄМx+•XI*Ї…ЏIљPgˆ €a7EУе,ŠРЫvњў‡yлйeй~?PlяЬєtџК{z›avД,бnЗ бŒŽŽЖлэт1ƒ‚‚„ŸnЃЉ*:::##ЃАА№Р§!ЪосЗpш ZѕЉЬ]Е>ќЬїїO3ЦcтЫ„„„Э›73Цd2ЙнЗ­­вйй)лјіэлв—‘‘‘%%%”RŽуЄy)yt[ЊVюнЛwтt:УУУнžƒл;ЇэvћшбЃчЬ™CILLŒпВeK[[›СDOOо`йзМёŒTщQГщM‰ЂЂЂnмИ!K Ђyяо=›Э&ІџўћяТOГйь6š:Єџ„Cк8…IАŸУ!.TыS?wя рзљДЫхВX,‘‘‘њ[ІЅЅХЦЦ.YВфдЉSUUU‡>{іьЮ;ЗnнZTTdБXjjj|rв---3gЮ|№рл-•‹gо§уЅTyй–%FFFЖДДDDDSњwN‹е^\\œ™™YTT”]YYщt: !Ц}EZLšgŒ™L&—Ых“hъ4­ŒTcd$їЇžzъчŸ–&ЖЗЗ б:th{{ЛбTJHHHII™0aТЩ“'SSS›››Ÿ№pЈЊѕЉšЛЏъ їмџ?тѕызчЯŸ˜––цvуцццœœœјјј‚‚‚ЌЌ,Bˆеju:‹eЭš5О:щгЇOЏ]Л6$$dфШ‘В›••Гg/Вkmm?~МлФхЫ—[­жeЫ–•””шЯщ >жуЬ™3QQQ‡Љ‰ЩЯЯї4Q+їо”]ЇцoмИ1qтD_ESЇйЈfЄ##БNœ8БmлЖшшшЕkз ‰чЯŸЂ™––VVVІ@ƒ‹гYYYёёё999^OўY8”У…j}Њцюi}т!z№GJJJЊЈЈp8)))^\“-Zфp8*++—.]ЊПЛжкЪН‚ƒƒssskkkKKK“““}rедЩ1==НЎЎN?БЉЉiУ† Ч—нэ-лбј3Їm6›нnч8ЮсpˆїOTЭ]ѕnrуeзЉљyѓцеддШž‚b$#еcъ4YFZсаЉщЫ€€€mлЖ9Žъъjсщ1„#F=zДББёШ‘#У‡—ю({Аt/оТ!Оєs8Œtуcˆ–ИИИММ< ц§šёЫМўc=``AрD}9к… ^xсƒy|Œ—‘oTіŽёgч И@їнJџљwЂКЄ!V6EŸЬЎњКrjjjBCCХ—ЁЁЁuuuН?№№№œœœŠŠŠ+WЎ9r$11бџ48†xяNИя:‚зЃMпŠб`ћФЈЈЈ†††rЖПиЏCьѕwљм­[З"""l6[]]Эf‹ˆˆОЁН—vяо}џў§ЙsчОјт‹лЗo_Иp!ЦўйРГљДёяbY­ж œ8q‚2jдЈЏПўњђхЫ5j”ўŽ)))‡ЃЂЂBК.еддДbХŠвввКК:с7BCCїяппааpђфЩ‘#GŠ›555QJНўГЃ2wёPВoСPM\Зn]}}§БcЧЂЃЃdЇџ­у?ќ№УиБcХє€€€ђђђъъjƒ‰У† г ЈЌŠT‰‰9vьX}}§КuыЄSЃGжззПїо{т–ЊсP­Oе-Зеšзj6J111yyyWЎ\ЩЫЫ“~чŽВЉг‹оБjеЊoП§6$$ФmмЅNœ8Б`СЋеъ]пь}рŒИyѓцаЁCgܘ4cЦŒШШHa>­uLЁw(ПэHfъдЉЛwяОsчNGGGUUеЊUЋˆЦЊМjcPЭ=>>>??ПБББИИјЭ7пtлРdcˆёfcœџ;‚tLVmŠЊЙoŠ˜O{OЙTЛqуЦтттйГgяйГ‡ђЭ7п|ійg/НєвїпПcЧ§Ž5jкДi}єб† Єщ“&MZМxёЋЏО:cЦ BHvvіН{ї^yх•гЇOoкДIЖtgd OмR?wеcjed6›уттЪЫЫзЏ_Џ5щqЛ2$&Юš5KLOHHИvэZ^^žСФ––ВYумИqcyyy\\œХbзЏ__YY &Њ†CЕ>UЗ4оBДj^ЋйШlоМЙВВђх—_ЎЎЎ–žЇВЉгS .LNN~їнwћэ7И+›тž={fЯž]\\МqуЦииXOѓэ}рŒ|rжЇ“’’ъъъfЭš%ЎOkSш/^дQ]]нІM›FэЖХЊ6емssswэк5nмИwоygђфЩž60уЭЦ8vх˜ЌкUsзiЧфя$†ёѕщfddЈ.аZ­жњњz§ŒТТТ„жЋWЏJгekZ?ўјЃ2dШ‡УЁ3hR№4wbјцХЈЈ(a!АММмэюnяœž§єгЗп~лxЂзaЗлэB‰ЂЃЃХФђђrБ˜bЂj8TыS'pЊ-Ф`ЭkNFZ"ЛнЎгРT‹iМ›ššЯ;Ї\є5~ЛjtttFFFaaсД2э‹РœOЏYГfётХUUUSІLЙtщвтХ‹…I˜VcsЏЈЈа9lTTд‡~XVVVYYЙcЧŽЁC‡zдTs/--]ЙrхиБc ~:’гxГб:šrђ[Gа“ЕšЂ2wІЈJOГЯШcŒI—K7oоўјуМММ›7o‰ЛNWњCgЄумКukюмЙwюмЙpсТ§ћї_{эЕЫ—/ыSЬ]˜ѓiЙ{їюћяП/LЌWЌXБwяоЗоzЫxйUsЯШШXЕjUjjЊЭfлО}ЛЇџ~gМй…„ЇŸ;‚lLVmŠњЙ{з0Ÿ~ЬхrY,–ŽŽŽШШH§-гввbcc—,YrъдЉЊЊЊУ‡Ÿ={vчЮ[Зn-**ВX,555>9щ–––™3g>x№РгЅ&тэŸ))ЅЪ+Š,122ВЅЅ%""BИЊщаПsZЌітттЬЬЬЂЂЂьььЪЪJa&d<б8еЗЖЖ %’ЎўњыЏУ† ЛsчŽtKусPнRЇ…0ЦL&“Ых2ЗкллХЕЗЗыlЉZLzЧќљѓїьй3aТ„ЊЊ*§И+%$$ЄЄЄL˜0сфЩ“ЉЉЉЭЭЭвwƒ‚‚Ф є[рTчггІMћђЫ/ !eeeoМёFiiЉЮ1ХобккjфјwяонН{wuuЕG}S5їšššŒŒ BШєщг?љфOчгЦ›M/GАОшЊcВjSTЭ]П)€Ш§§гзЏ_Ÿ?~```ZZšл›››srrтуу ВВВ!VЋещtZ,–5kжјъЄOŸ>Нvэк‘#GюкЕKeHЦ‹ьZ[[Чя6qљђхVЋuйВe%%%њszƒѕ8sцLTTдЁC‡bbbђѓѓ=MдЪн`ˆKJJ–-[fЕZ—/_.&ž;wnХŠЉЉЉ^„CuKrуЦ‰' ‡‘OSчЯŸb”––VVVІГЃj1UЉе;š››333?јрщCх .NgeeФЧЧчффˆ3˜мммgžyЦl6''' +С}8#їOswюм9a>ЭqœАˆЋuLБwыі‹/Θ4i’еjˆˆXНzЕлчlШƒjю{їю}ўљчBCC>|шiї7оlz9‚ѕQGPŽЩЊMQ5wеІоHJJЊЈЈp8)))^мЗhб"‡УQYYЙtщRя• м+88877ЗЖЖЖДД499йгщЃGЙBвггыъъє›šš6lиааа |‚lGуЯœЖйlvЛу8‡У!оCb§&))щиБcWЎ\ЙtщвчŸўєгOХ-Ш:A5їЙsч\Нz5??ъдЉž~ 3оlŒMxщчŽ`dД1>P/,@_]H:$ќБеmbп1›ЭзЎ]ыЭТТТ”џG5јјА˜~ё8№SФї#€џцг§Сœ9s МлwгІMССС+WЎмЗoп шр+ц8џ3Ѓ žРЉПгщЬЮЮіnwЇгyётХызЏЇЇЇтZ|Х|B0јаwЦMMMЪФgŸ}Б№f>­:У№ЮчGмџнъ§п­іеf§™Y6Ч•ОLџЫ>я*лqЩк)oІЧ…†YПкQізм т6˜OР@ЧЩцСТTXќЅїцќѓ‹щџžPrъкŸЧь1pЈqLЬnЗиџнъџкwvўвb<ћц? kЪџзЃ ўќюK„ov]ј§ЗЮЏvœGР`bhС82fШцх_ŸјњТЉS=ЭрйŒ"„ќыW9зОсЏŽЯv;}GT`PЭЇ ŽWu<ь*?{5њЉpO3b!„йgџЗхп§У УжўЧlT: f#§§џBКК\чёѓЊўН3(дRіп?QJ !ŠЁПНЏюл№ƒ>џСцІ_Ѕ/;;\ЈtР|кЈТЃ— !S_{юе9ЯB.ў№?њлуўi@дŸ?-ќт“[/}r1њЉ!ыїНpўoзrџх4*@S_?"ОoР{˜O@џaˆ' 4xОќТDBыI|—•/Dd„1Т%”Rс[Лгэ(M”Н+nЃќEu/ЗoЉœBЧo ?)5iq&B%хzД=•FŒ˜МŽzžЋWv3хЄЇ!љ‰_C)л€ХЁЅБ#ЁДgЛЫb&DЋ 9вIљ™SJhнzT%„ЊзЙ{ЯЧ)тэ>ž˜.ОeтП”§”53Щ[„є,—лvЂеЎ$ч<=ˆ^жLЃ Iš„ДАOƒги0ЂJБ}ЉžЅїЂ„p’З„Gаѓ„№jлЊtІX’LІ‰8wоч‹„1Ÿїrqxщёa•1J Ѕœ~UŒЛъZ•ч)Lp… Д8}јd"!ФC<В0E>и“r1ёёG&ЦcЇе–˜r9вƒFЃ ѓщў=W—­Oћbфъ§`O ž0ae›1Ц„5J)ЯуyђxН‡‰ы>нѓnЦTGjйхAэЊпНЋdвУщ^‡VP\Ћ ЧY,aE:ТњЁK>ˆз`§ЮeќєўШОЩѓяЇ§ТŸ(єў4tр§VћžЏO{\Ў~њ1о‹-йТ“лiЂb1FљaиHCэўєеЧT3l"Ь\Н]!aОUФbђhх›чyaMМїыeЪІ+Ќ’‹йТ /ЛK/:ХЄї№џАГї,‹ IENDЎB`‚Scrapy-0.14.4/docs/topics/_images/firebug2.png0000600000016101777760000020742011754531743021220 0ustar buildbotnogroup‰PNG  IHDR{uяв~psRGBЎЮщ pHYs  šœtIMEи  );уЛfВ IDATxкьyXЧћР7\ їrVT„ЊЈХЋоЈX№.і‹Z­ЖЖVPёЈж­Е*V[[ **x+ hPАѕЈхFQ  „IIШЙП?–пCВ,gQпЯУУГ;3;ѓОяЬЛѓfіЂЬ Z}љєЄ3FЩT›žэ`ЕЌс-B“ВFш }КЎf лЦ Оm`Єѕ—ўМUжг[vЯШ|—М A‹GЋ/c3‹Х4C­!c~kу3ЩјПЕИслˆLŠЮѓxvЉРSW‚'2_‰њ<НјЬгm O”ˆбщŽyžzZишt|tнtGЋsЋЫ@‡Жќƒ€р=‘3999::КsЅŠŠŠJIIG%щw7™—lВ§mћы‹GЋСкЪ‰=UЩ9БЎ5xЧ<юOЅQОі-zWЧLGММВUtt)M2~p“Їœ˜uПf •ѓ_9ёя\OCтpѓ§ёA˜D тќяgSшЇžD"IOOїїїямj§§§гввЄR)X˜ FІкуЬіœw‹=T жh+—~ЉоuЦЕя`]ŠS_кžѓn_эВ'O€єV№J2L[dq+ЖN9%ћ/ўœ/­Гю7('о<[їЩџ,`„IфНј%ж+џH‡ќБЋ2!К†У–йЙшЎБYАкІЌЈqуМ’ИœZoFП ђщР‚ ?;ЎšT„9л0Jц}Оwd(3хЇОN†{р§ыМ•Хљ"Aњ|ЈџХ6ЛЇ›7ŠчоЏ|p3ѕ G!Gf-Г йп[мˆ)OЙФЁPO‚,зъ­­гtY$+ЙЁЂ8OhiЋћХ6;џхV­Ж‚§ІФYт2зБЄЧУ*ЏŸЊ50жђgМП7Э ЂMЖкЂ&9х2T“ѕTШЭЭussГВjjБ   55•С`шщщѕыз/ РддAхЃ"##Б ЁPxётХ‚‚]]н1cЦјљљaщжжж...љљљC† w%Iџ!ЌrIѓ oЃз ђъЉш№fж§]ЪЄyцы#•‡JЫЩO9Б’!љmћыЧwъыыd.§iŸoБ›Ва\эЋчШmЋјы:§ZJ3ађє1 Ж3УT­Rš0БыЂ‡-ГДеUNљb›ђ.Б;Ÿˆюзо\~ї W.C‡Œ5 љЉЗ›Ї>Щ\ќHЌiQЎш№fюƒmЪЄyцЁ‘НЧšфЈTTМAŠŠŠФФD:ЎЏЏ?mкД#F`щИУтЎ ^IяБЦс+‚zЙЁ‰v“й~Пзї‹1…x™zŽ<чя†ˆ8зVЯсЪ.FмбšFЉІ“Я+шb,ёЪoе;?Їух‰g2нŽЩˆфDC<Т |™М›wpQцоН{ёёё0\{qЊ§Уsёш уŸ”њРС(Š>ћW0У%O"nžХŠЮy˜Г)заr>јvкKмЅ1Ў§СžўВеFБ\хјщйП‚бYoЄ•H$ЁЁЁФ'‹7 ‚&ё‚У‡уY555сссрЋd&‰ž,=‘;г5?.’еnЏйіП’3?U)чЦьЋ$qю\JŽЈ$9™ зЭ‹фэPYeŒRb]T(ЬЮpЩ њшљб-ЬЄг5ŒBywhѕDЄ"Цщ§Uл‚ш$sЩАuQЩЙУ,хмиƒUš4Uёж˜˜˜GМЅл‚W’сE–`еЄBlћіљКŸ‚ЫА04щtгPY6њХ“дzМ<ё,CІЃ;8 сОLоЭ;8‰рШdВАА0.— УЕЇб­OЭ\њЦѓГG–ОhDdР0ЧЈ—БёЌЫЧи&:Ѓ?Q]x3гL%хщ?хЋ‚ŒišџXаjЃf7WиЧ‹ж(TМ‘ђЁ>ГИЉpж§хжНЧ=Я лŠ b‘т№FцLзќсКYУ(™cMrъXвійVйМЖЏХ6щХчѓMLL4нšуююŽЇlйВe§њѕ))) …KDQTљЈЪЪJ|лЬЬŒЧуСхт[VА??ћМшн•_lГ ЖiЗзљўшv§ŒХ$ ЛyъџТЌЎh}(`‚Qк&]њвПVфѕe˜ŽхЏМeЃ WŽ/R ЁVOD*bL Дx’RO2—ŒORљОѓЭ•s'/ди*оњђхK>ŸП}ћі7ž:uJ(Жк#р•dшчmPS%cП–b7qgŒ Шр1Fи­œЏщтЊ2ЩаёЦdЮс8­vt›&#ђp_&яцœDšMєф‰ЛЛ;v †ы{z'‚ *їHб Д$MceЩfляб§—[i љђ“{*П;цдj ‚4№фf–кЪ)І–:|ЎœLЃ‚˜Y5[@Іе2E"nŠ„xЕВ™.љЪUQ(ЩVTиПІМЎZігеœњвє Еф2tИnVЧЭK aЛ[l“^ккк*)и5FFF*wƒ)ŠЊЊЊkзЎёљќйГg#тсс‘€н3ž ‹С9лєK|'Жъ5м™eЏ7Ю *Л*(џXhy+$?\t;ИЖ<рƒ|{Wъ€ Чћ›ŽŸmЎЅеf—!ЅmеEG—2zšщшiІ‚H%шЁѕЬ№хЅGўtoЕЁжOD-ФрАe$sIjЊђМ3ёуЯЪо*jkk7oоŒ H||ќЅK—–,Ynе)LYh~ч|нgыzхќн№хN{AМ?6:КЙA[Бu~‹,”<ё,CВЃлtв&ё'№eђnоСI_МИ{їю_|cЏвSžUюkbыЌwі A3XfV:ц˜ЋЗ…МFІкмZЙJрelІнщ›iп­ЌМИјЏЂІ^цl‹rючm oЈ… HU™ЄЋ%ьЂпhни˜ЫхЊ$FFFў№УcЦŒ‰‹‹{ГЕьээƒ‚‚žЅNf–iSGw"Ф#œР—ЩЛy'Œœœ›^НzСp…ˆ“ˆЅ›lЯ`бŸ7ЦЌў|‹]ЫпvjёnјwвЋх'ёМ†vКxУ&Ї'pлcbmŠBўЦ%c‰U~-№˜кЎ–L‹-хlNNN Ѓe:F›8qbYY™КHH ϘnhhИlйВ§ћї‡‡‡›šš*_@)--uvv_эDˆНfФdуф oМехіЙц]чў4х‹Х‚$žlNЃќL’Nз’t<„э;HЮJЋ_юИЇ\nџV Б.*\ЧT‘АŽ%5Едщ”‘ŠwЮзљјšЬ%ƒЯ$уЛW8Ъ))—8$НеоОЭ/Џ$IяЈк:”јЈšС7џŠќБQьA–Mы/§ЖЮ2mъшN<Щ“™j |™Œ›wpiВFJŠЏЏ/ Wˆ8[aТs+;нc Э­uќЭIѕй:›#›˜щ \‘@!(вИG6W…іъtёVюАћѕћŠ;8XCORљСг‹ЩшрІїшvНВSŒђ39ИŽЩЋ• љђЫЧииЫ&КTB2-Ж”ГMxzzfggуЛgЯžeБX …ЂОО>))ЩХХK?}њteeЅBЁЈЉЉ‰‹‹4h–~ўќy.—+•JѓѓѓoпО=uъTхпЌžžžрЋБзЌиaѓ#+ёD/ђхёQ5џ6‡˜ѓПЖйїMYСП™eП–FяЎdОjОтЫ0ћ‹ПTŸ=РтАeR šџX:ћ•ІЖblсЭГuеL‰BŽrkdgВ†M0V+pЛ]†XтБ–Œxqџ:O$PH97|ЗАdсЗ6r"zњ#ёDЭщ§ЌлэHц’tџшˆЊ1ЕX%зOеЊќ0 №VŸ„„ЁP( уууЩИx%yІZDэz=dlsФ9dЌQм!жє ‹vЬ2mъшN<ЩЗrваьЫфнМƒ“‚ zzzЎЎЎ0\{&|ЇкO|я—–ВxCЏн+пьqавІlqИЏЩжуЮбU[щ‚|рЅџ}”ГІнм<ѕпшstЫын+rъ5мd\Лf_яНЋЪXхm2ХжуЮ{ОdLwЪзеЃŒ0лqвхоЕœ.•L‹-хlƒ JHHЈЉЉСоІцххSUUe``0`Р€ЅK—bХxцЬ™ЊЊ*cccooящгЇcщ...‡‰DЎЎЎЫ—/ЗБišцйl6N_Мx1јj'Bь5Ž}ЈGoЛоРќёлr=šжФ9fлЂœ“/6­ЃЬYi%“(Ж~FЏ,•XкъЮњмrыяN‰'jА\['НшПћћО"f_Ÿ+wѓдџќ;[MьЋ]іŽВ„”ѓЙrнёf{ЮЙЊИн.CЌ‹ 'єЛUГџлВъ )U_ы/кќolf}nй)'Ђ­ЧЌe eJХЈїЃ_юИ;іЁ’Ь%ƒS_к‘[}"7Tь]UІЃK™8зlѓ/Žiз8dМuФˆ'""B*•zxx,\И/‰п<‡m`ЗаWЖ‰) Э­gzm ƒЧЫхЊ‹‘œeкдбэ8ЩЗœФБ’Ф#œР—ЩЛy'lsтФ‰*еТpэ9P:ёЛъР{NJJ ƒСXО|y'жэььмђ* МEЎК‹>fѓwУОеeчrtКЗ‚Wі(ˆ;&Ўя`ФљЖ|=шргЃ='”#р­р˜0,žŒ€Г6аеёG7ЌqоНТ9ЕUœ'двІ|рЅП№[2ьо:Ч„ŽоRрЊ:аЕh ˆ8ˆ8 "Юa”Ь–xиКewkœ§љШšОњCž;чыІиц)ЛpЇЫ МУШЄh@ŸЇRЩ/[gОЃd–4*'JФшd›мюљReљ&С!єєЎЋјOшф7РЗњ€^НyЎ#$''—••uю РЂЂЂ\\\р`@ї€ћTOžq Є\P/_АкІнFnЈјщЊлРQFяŒ‰КТЭС§3Ћ.хЃIЦnђЦ˜с‰YїhZ9ёна№ФПop=} -ltРh]9РDпшІЋъ=і.‰$==нппПsЋѕїїOKK“JЅ0Т€юФШT{|€йžѓnБ‡Њ;RЛBђсHЃwц„аEnю0m‘Х­и:х”ьПјsОДЮКп œxѓlМл&њїх—Xwў@С–СёХplжFЩќG:ф]• б5ЖЬЮE/pђђLV:?rCEqžавVї‹mvўЫ­АєzŽќиЖŠПЎѓиЏЅ4-OУР`›13L‰ГTШЭЭussУОЉ… ˆP(МxёbAAЎЎю˜1cќќќ№’‰‰‰t:]__кДi#FŒРвёЏРaпУАЖЖvqqЩЯЯ2d 2 ›щ?Ф€U.С]я>п;2”™r‰S_'УCНћзy'"*Б”їљPџ‹mvO7ХAф#­LхаАsнPyХ‚X‚“CЛн\“лjrsMЇ…чЯŸпН{ЗДДTWWзУУcіьйFFFрў†їXу№ AНмаDЛi>ќ~Џяc ё2ѕyЮп qЭ_}$v‡ћѕƒЎcІ^с(фШЌe–!ћ{‹б!х)—8 ђIхњCНЕu(Ф>лъœлVџ"‰&yЉcI‡U^?Uk`Ќх;Я>žЬDщвЅффd•“xz›шфˆSхdФ‰; Ц?)ѕƒ АэMѓ_]8ТТГмт­ђmєУu3Х"ЙZIВT cГ›ЧшЦЖ->ŒmЧФФŸП}ћі7ž:uJ(ЖZГ™™ЧƒEt {юZСўќьѓЂwW~БЭ.0Иљъ’Вa<§G€_ЊУ;г4џБ@ухА.sУV%!89ДлЭ5ЁЩЭ5”),,tqqї”щчmPS%cП–"’§Ш8cA1Тnх|MW•I†Ž7&яf7Лa/ZЃPёFЪ‡њЬтжЇNbЗjЧ4кrъoШ#)odЮtЭЎ›5Œ’9ж$ЇŽEЊQ2ZŸž2яo:~ЖЙ–V+Y-бжжЦЗ=<<Аћ‹Фb1~GmmэцЭ›‰Пtщв’%K`є=т_w*ў… HOnfЉ­œbjЉУчЪ5еаunиЊ$'‡vЛЙ&4ЙЙІгNEEХЕkзV­ZCPaЪBѓ;чы>[з+чя†/wк#т§БббЭ‚мŠ­ѓ[dЁ§Gpz?+-žЛыŒ+q–j§ЦЦ\.звВщЇв‚ .\ИАcЧіёЧcЯ ЂЇЇ7kж,AТТТZ•œЫх’YV€ЎІх$adЊЭ­•+П…W+36гю~7lЋ$$›&vsMhrsMЇŒтттиииЅK—Z[[ƒћ*ј-ВјnaЩЬЯ­ђІшаФBG[‡RW-Л[Зя’[gЙyŸ% yџъ <Љ—9W‹МЬ­›ДЎ*“t~ Ёю ““cccгЋWЏVЯёёёѓчЯЇ(Чьрщэ›ŒКЛ=mŠBŽ’/?l‚qz—h’ RњвŸГвъ—;ю)—Й$ГšЪЩ‰С`рЛ†††Ы–-лПxxИЉЉЉЛЛ;–nooпVMKKKa„=Џс†'Нq%шя$žзpУюwУЖJвІІ5ЙЙ&4ЙЙІг‚ йййgЮœYЖl™ŠГƒћН? jыPтЃjмќCe№ЧFБYz4­Мє;зШћl'њWх‘ˆQ]Нц`юFLmЧ#2g€””•Wij:TVVўќѓЯ!!!иCюЪКƒЇїшˆгСMябэz”єШYЙУюзя+ю\рˆ ‘@ё$•<НЫZ1Ж№цйКjІD!GЙ5ВГYУ&ЗšЅ‚ЇЇgvv6О{ўќy.—+•JѓѓѓoпО=uъT,нЧЧ'!!A( …ТјјxOOЯV%ЯЩЩ!S КŸЯжййФLOрbn•žР=ВЙ"(ДWїЛa[%QІнnЎ MnЎщД–––˜˜јѕз_;::‚ћš˜hЕыѕБЭчБFq‡Xгƒ,:бШћ,IШћWххgrp“W+ђх—ББ7Cu0rhѕ PPP ЇЇчъъJц Љђц+еРглDw_U_ГЏїоUeЌr Š’z Д›Їўс}ŽnyН{C.CН†тюїе.ћ GйBЪљ\ЙƒюјГ=ч\[ЭRaа A 555и›К\\\:$‰\]]—/_ncгєјХˆ#8NDD„T*ѕ№№XИp!^ўsлРЦ"›ЭІгщ‹/†є@†ћšl=юQЕ%Ž Ш^њпG9L']ч†m•D™vЛЙ&ЗефцšN ёёё‚ьнЛohпО}T*мPfЪBѓCы™оc›Gѕр1Цr925аЂнМЯ’„МuPž­Чї|٘ю”ЏЋG`ЖуЄЫНk9ŒZ=ЄЄЄLœ8QЅСDЏ№єЖB™Дњђщ#яГ RRR Fч~ќ*::кйй>~яА›ƒћLєрщџAФ‰ 'гc?Ж яАп›р˜œшc€s+р˜аЅh ˆ8ˆ8"Nр­8 žЮырƒ{]WsбuњМзgЯфоКТPЩЩЩбббн#TTTJJ є#@Ф МGH$’єєtџюiЮпп?--M*•‚хр@Їћ›ЬJчGnЈ(ЮZкъ~БЭЮЙžUЧ’ЋМ~ЊжРXЫwžy№ўо4ƒц˜[ЗУWя№žЩeшЛ*Ђk8l™‹^р›Ћm№Ѓ*’пЖП~|ЇООNцвŸіљЛ) Э‰›FЩМЯїŽ eІ\тдзЩ№†$'`%ѓщ _=оРЬКп ЃK™4Я|}ЄЃђБ-пы†'жsфЧЖUќuЧ~-ЅhyњлŒ™aЊжP-•Z:тХль>žnЊ\yQЎ(xњЫЅjыP”гsssнмм№O„!RQQ‘˜˜HЇгѕѕѕЇM›6bФA RSS †žž^П~§LMMaГй—/_Ігщ‚ИККЮ›7ЯккAEoмИё№сC …2jдЈщгЇS(AЌ­­]\\ђѓѓ‡ ^ o=sƒVЃФP$ЃеЌТlЏuЮнЋqЃЂ(Wрžџw/цч{§TX$g1%лўWђуЗe*‡Зlb(’ЖЌ4:ЂВКB"•(ђ7Ьюћєъq6–[Щћ9фžљЉЊ–%7*r6Ќ(&гмŽ%єДxŽИQ&–œРФ–7NБЭНіЛ'kрЩЎќVНyС+}•П™Rєѓ&&‹)‘ЫМZщ§ымoІJEЉ?уjWљЉлЕМ4*ќuЫFOž<љяПџтЛеее;wюЬЭЭ‹ХuuuqqqXњбЃGsss…Baccу;w:„ЅџјуЗnн …BЁ0))щЧФвШЗwЅјШpМПщјйцZZdХ@dЮJЋ9§ž­оы`fЅЃЃ—~eяЛфІщpmmm|[ джжnоМAјјјK—.-YВЯ AФШШл@dЮœ9‘‘‘ЗnнBФммO‹ХT*лІRЉbБ| ть(ЦfкW‹МLЬЕ[fЅ^ц\-ђ2ЗnЉЊLвёцЬЌtjY2нЖ6з2n#МуZл7KXЫ’слzTŠXЄ ъ7KS[еМЂщфNМбG"FK_4>§Gpz?+-žЛыŒЋІЖZ*efЅ3qŽйепйЫЖкЅ%№,mu=} еwœБ1—ЫЕДlZегг›5k–‚ aaaЪ…###гггуттжЌYƒ HllЌЯјёуЙwя^llьз_G™4M%њD„Ых*ЏЊ№івнoG6С8=Ћ6K"Fuѕš— oФдЊW›ЂЃф›хg’tКЖнЭ‘”М#Œ˜lœ|ЁN9хіЙц]чўДќЧo\ЛO<Љ*Ї•вwўœ•VПмqOЙЬmЋЁƒm.cЫЄш…#е ПЕбTЬЩЩ‰С`рЛіііФевhД‰'–••aЛ%%%“'OжзззззŸ>>x999xЕМеtїUu7O§У7њнђzї †\†z 7 э…em=юМчKЦtЇ|]=ЪјГ']ю]ЫQ9|ЭОо{W•БЪ%(ŠД|QeKlєЂџюьћŠ˜}U|ЎмЭSџѓяlЩ7GRђŽри‡zєЖћс ЬП-зЃiMœcЖ-Ъ9љ"ЫГвJ&Ql§Œ^Y*БДеѕЙхжпOд`Й_эВПp”} ЄœЯ•л8шŽ0лsЮЕ†њ4ИзКYХЫПЗS^єUaа A 555и+9GŒСсp"""ЄRЉ‡‡ЧТ… Бb^^^111UUU XКt)–ОxётЫ—/ЇІІ"тттдїU[[ћУ?`лиK=aГйt:}ётХрЂ№@™Дњђщ#`ˆїЉgš_ьЅі†Wœ””ƒБ|љђn)::кйййззz тlОxў.Af§€$vUb”З‘a”LšжШЉ&;OЙšhƒAшб'№6ПшДРDœDœ'''@Ф @Ф @Ф qqDœDœDœ'''ќзgHHHWHйEеo_Ф @|  ‚Щр‰JЅ:;;Я™3ЧжжЖƒбXdddgIЯbБ’’’Š‹‹ХbБНН§ЄI“мm­kЂšxw"N<~jllМџў™3g6lиаCDgГйПќђЫдЉS.\HЅR+**юоНKq=1тФ бh'NМ}ћ6ЖЋP(nоМљјёcБXьххѕщЇŸRЉTхђЕЕЕWЏ^-..–ЫхюююŸ}і™‘‘ЖbЧЧYMѕШdВ‹/цффPЉд &ЈщЯ?џœ4iвшбЃБ]ggчЯ?џМSZ—JЅ/^ЬЭЭЅRЉуЧП~§:V^*•^Йr%''Aooя9sцшъъbuЄЅЅёxМC‡с+ЉšъўќyBB›Э611™2eЪШ‘#1БЛg  ‡FœщщщиnrrryyyhhЈООў•+WчЯŸЏ\ўјёуѓцЭ[КtЉ\.ПuыжЕkз‚‚‚"##U‚*MѕмМyГЁЁaћіэ(ŠЦХХЉЉЈЈШпп_mVЧ[ollмО};‚ чЮУЫпМyГООўћяПGQ466іцЭ›И #44дШШHY MѕŸ={vС‚žžž|>џіэлxФ №Nвњ“C!!!!!!›7o~№рСвЅKБФЧЯ;зЬЬŒJЅЮœ93//OхЈяОћЮнн]WW—FЃMŸ>НААPmхšъЩЬЬœ3gŽ‘‘‘ББёœ9sд+T"МЮj=+++ РШШШШШhіьйxљЌЌЌйГgcRЭ;7++ ЯТЪ“Ќ_WWЗОООЁЁСммќгO?ХЫУ'я$ЄюуDQДЖЖімЙsL&гТТAЗgЯМ …BQ9ŠNЇ_П~ЩdJ$Е04еУчѓБ†БДДT{ЌЁЁaCCƒ‰‰IЫЌŽЗnnnŽmуb`щИ0–––|>Я255%_џВeЫnпО§чŸвhДйГg{yyС@рНŽ8БPЩЪЪ*((шрСƒ§њѕЃRЉ&&&kзЎUfaœ:u* РУУƒJЅŠХтЭ›7Ћ-ІЉccуКК:+++AjkkелЗoпœœœБcЧvEы .ыъъ4IellLl7Mѕ;99­XБEбчЯŸŸ?"Nоmк№>N333WWзЬЬLAF}сТ…šš…BQYYЃRX*•ъшшшшшдее]ИpOзззЏЎЎЦw5е3dШkзЎ544444\ЛvM­<~~~)))> …2™ŒС`œ§ђхЫссс‚ Ѓ?olŸ0=й[\Eцт|бшiІ]ч=а8ЙЙЙnnnVVVиnAAAjj*ƒСаггызЏ_@@€ЉЉ)‚ ЯŸ?П{їniiЉЎЎЎ‡‡ЧьйГŒŒАCBBBšтѕШHМZkkk—ќќќ!C†РФ @;шк5Ю th:є_Ха+…^Ўhч•є|‹.ˆ‹ЌЎЋ–НУ:~ЗАD&EПлїQЃїеB/ПEQс•яF”Н9ЁШЧзфZ‘gzНїІЃNЗЯsоv/mы˜Єh§uƒ‡яІ'pi†ящ§3’F…Ž.х]u Е<{іLљ3iїюн7n\xxјŽ;ьээOž<‰ЅЇЅЅMœ8qїюн[Жlбзз‰‰С‰ŒŒTŽ5qМННŸ>} Г&=1тФ P льJž‰№e •U lуС-оЏ‚zY3]ђуЃjTr‡Q2ЏќЦžщ’?B/k‘їѓЂ!–ЎЃGПЋ˜l“;к {ЫЇ%BОœ 6Е‰ЪЌ=рј"K8ЇягuГŠя^сH%шЛЇcЦ=ўЗћzлЛRЕД)цж:“˜OыKма0Jfм!ж'ŽљУ(™ОжЙѕ9^[}lrЏмzŽзB&EЎcњZчN0Я9{€екн#ЧwО^ВЩvо*kSK=*ХkИс—н№рcї Ц8гœqІ9+’FБЭ‰{Š`w%SЙC‰[‹;–”~l˜=Х6яєUjЏвЖuL.йhГЏ пйWЕt“-ОЋЉ#$b4ьѓџ—d?ЋЅŽjЛЕЂDМnVёXуьQДЌ5г^‡Хš k2Ž&‘ZяхюаЄ8IзPЋ>nЧQЋZчњШ?КЪЪ\\\№нoОљfрРњњњT*uќјёXњЊUЋ<<<ЈTЊ‘‘бЌYГшtzЋVuqq)++ƒY€žq"ТaЫNьЉђaD\lЧтвЏТьюѓНџИп/џБ eЬ4~єƒўї8ƒ'Ь6лНВщмwbOеѓLal–G2k U_ычMЕЕк„Я$у]Ї]n2N˜c~с(лЯ>oп7eOџМK:mБ’‘їH )TВ45„ ШгБYшаIѓЬЏgущWзј-В01зЦS~пљКф™(.л#‘ў!‹)i‡ эю‘'Љќ) е_Cџѕћзьзвk/НЎyV•IŽm{MlѓVSЏ C‘џ_і&йГЧЖНnрЩЏ—~x!@Юп jЋmы˜œ8зМЎZšїАk”[+›0ЧМе.>Ж­‚У–%в?<Ÿы‘qЗОeЕjЛ5dfq`HЏфъAwXƒœћб­+'АAa ЦiEЄ6uСи&уjе'у8jUы\Q†Яч›˜˜ЈYю•Hвггннн[f*Љš033уёx0kаNц­FЛ†ЁH†ђпxѓьWO…x–JIlуЧМ GXЌrБкмЁHЏVŠm‹rLl{†s^щ Ж][%™м+— 6Е‰dнчOыЇ"№лЎc=Gvd3sОчГ‘ДЌщNy?—ёъdФ E2Њ+$и6§ЙhКSžLЊ@QT&UЬpЩЋdˆ•Е˜ж;Q(RiДM*ДЛG|t21СZтgŸЫ(jФЖ…"?‡\b›їTЫŽkЕ€кVќr™ЏšЄ*{йЈIЏ6IEЏќЮ^;ЋEбеS‹Ўg+KЂЉ#ќrЫ^6лЇЅFjЛU‘@юkЃж­жdЕ"єAwhRœЄkЈUПUЋjR­ы|$44T&“Љ$oнК•ЭfЋd1™ЬАААъъъ–‡ЈЄШdВааP ]tэwеё•žzŽќТ‘ъ§kЪЅі%(ПџŠ[Txхя;+LЕзG:ŽЉzЫП‰E“Р4-ЙЌщкbu…tnџgЪИ jkЕ aƒтюЮ˜к—yЂЩ Ьg,Б|—t46г^Нзaѕ^E…gА6/(љ5й !AЌэu›.ЎѕЇЙyъЇ^сNYhžz™3h”‘­“žr§5•R7еЧЇкЄBЛ{ФдR‡У–YйщЖЬЊeЩzЛ5ЩщрF­cЩˆmNR0ђЈmЅЖJfчм$•Н‹СсфЧ$‚ 3–XпљњіЙК—yЂ }ШtDm•ЬСЕй>-ыTл­y~оTQ˜- *І%…5‡XЄ6A0ЖЩИ†ZѕЩTЎVЕЎ№‘&ŒЙ\ЎЅхУ#22ВББ1===..nЭš5xzqqqllьвЅK­­­[5 —ЫUЛz №пЏq*яŠ#iYјB”H ЧЖЙ5R•’ њwwŠm.ЩеЃOѓXL‰&1Tj#HФј'Ѕ~ћbњ8гь/S.е‰яžŽ-W›Fшe7Є"иЃлМЅ#ŸЃ(КtФѓY2‹aэPЁ=ВuQЩЙУ,MkœЪ f~іЙФ6W+˜Іn%ГЦЉ6нЯОѕ5ЮіЩ“{+}t2O§PIr,НБ XдHrгЯ!їіљ:>WІP  <™&}лTИй8Djп'ёlе5ˆз8I:Nљˆ2'OžЬЬЬT›%‘HжЏ_пМXž•Е}ћіВВ2Е…[ЎqfddФФФР: эЃ›юуlрЩЯўTхмЏщ7}пAњg~b5 ев=_5пŠО%^Rа(“Ђ(Šр‹­Э_YGЌd0_‰х2Д8_Дхг‚кZmтањђ~ƒѕЏyКоgвЕ8ДžЩ­‘qkdšn‚lп˜\Кйіщ%JЯ w„_ ХЁuхЖЌЎZv Є\эКiЫn‹P*ЂGгzMGЌd‹дІТdDjлЏkc›Р5дЊпІЪЛЮG”ёєєЬЮЮЦwЯž=ЫbБ E}}}RR~ПfZZZbbтз_эшшHв€999žžžАL@ћшкЋъјƒœњ†Zƒ?6њсBгSУпџсМk9#*МвЦAwЩ&лдЫMяЏяoКaЮ+ц+Бs?ZјYW’­,нlГЏjеЄ"іkЉs?кђяэjkЕ‰s9оyWlЗЛxДzЯW AНТЪNwФ“}нˆjЩЂ›=_2~ОЅцA„•;ьo`~:ш9ŠЂ+ЖйЕC…vїˆ“;ѕXjп_ЗVйФl*м,йи Ыњ&Т~пъђ€>Oёoўѕn{тЊд ІЉ[q‚B{ {.lP|SуЊpћ=_2І;хkїzјg}ЧЧdћЦвЊpћˆ/Ыf8чk…к>ИЩ#г­лЃЎcVЮ-Бqа эuчбЛЈкT˜ŒH*>ˆЁЩђdЦ6kЈUПM•w(3hа „„„ššь•œ^^^111UUU XКt)V,>>AН{їтюлЗJЅ"JяуФ6А7%Бйl:Оxёb˜5h”ЙAЋ/Ÿ>†€џœвk>)N,ёS!%%…С`,_ОМыŒŽŽvvvіѕѕѓаГ"Nјр!@бУ?а’ЎКЊ'D ЩЙУебЛ+%ŠБГЬЖќюl`Є6 т 3 Ж Ж;я0Аš@Ф @Ф qя}Фйm/T‚77‘БF;Ќ”œœн=bGEEЅЄЄ@їDœ№НGH$’єєtџюiЮпп?--M*•‚х чг…oGFЩФпЪYœ/Z=ѕхџжїњпњ^=№UjƒрћJQeУ'vМZђфццКЙЙaжУРП•‡}%ЏеєŠŠŠФФD:ЎЏЏ?mкД#F ‚Ђш7>|HЁPF5}њt …‚ ˆЕЕЕ‹‹K~~ў!CРр§8qђ BgПZwАЗп"‹ži<въxмж>Ь\jхЗШТТцm}CъГgЯЈœ‚”x|IœЮfГЃЂЂfЯžН|љr@№чŸbчЃGŠ‹‹7oоŒ Ш‰'?~:™ЉW8-гБ ?‡мВ—и6ЃP„Їт˜wс‹U.&гŠH їЕЮСkцеJёtLт†дЪЌМ;У9Џє…лЎ­’Lю•KœЎI€6i„Ђhж}ўДоyšфŠdЈ§#–РhjuзЄ‹&BCCe2YЫєрр`ЕхUвзЎ]{юм9@ bccO:…Ѕ‡„„( l[ЁPЌ]Л?D&“…††ЂєxКіЎС­П;э^С0Гв2жЈenm•ЬСUлvpЃтщћЏИE…WўОГвШT{}Єуи™І*ц=lјySEaЖP$P BЁ4g™X4iD3а’ЫPт†ZЅКB:Зџ3|oHSК&ZеCи И{…s#ІіežhђѓK,5 ІіЩ!bйŒІ ЕКhТии˜ЫхZZZЖoЈшщщЭš5ЫРРA€€€АА0,JЅŠХb† ˆX,ІR›ЛЫхvdU€nЃk#ЮYЫЌL­tжя<х2n–™JЎЅ­N]ти‡Š H]‚ЇјШ№`bEотэZЮ[9PхРM JжpхgbhЂ-фЫЧ™ц‹ЁЉЁVБВг=љЈПƒ.ЩtMДЊб“T~вщкєЎїЃљ_[™iІGЅДЯцšdkЋбкŠ““ƒСhwФiooЏ6нжжЖММмннAђђr[[[<ЋДДдйй|z>]ўфаИYfњьZЦИ~ЊV%Ы/атаКr[VW-;в|[с–@zIAЃLŠЂ(ЂviM,BЉ4ŠMы5]Б’бЊ šj•Й_YGЌd0_‰х2Д8_ДхгтtMДЊбЁѕх§ы_-ђ:tНЯЄyцэ7 dkеhЦfкŒТЦvЗыщщ™нюУ}||„BЁP(ŒїєєФв?њшЃЄЄ$Чуё’’’|||№Crrr№bєdКу]<оcŒ~ЛлwЭД—Жlё†^xњЊpћˆ/Ыf8чk…к>ИйєШxг s^1_‰ћбТЯКЖЌp{ДѓСuЬЪЙ%6КAЁНю\р  ЉЁVYКй6f_еЊIEьзRч~ДхплЇkЂUЮх ш,kk’­UЃ…і і\и hпћЁ ”PSSƒП’џЖП}Smњˆ#8NDD„T*ѕ№№XИp!VfдЈQЕЕЕ?ќ№ЖН2 A6›MЇг/^ > =Ъм е—OC'%%…С`,_ОМкŠŽŽvvvіѕѕГ@ЯЇЋж8сS–я$ФЫŸнџuO\ @Ž8{ь'"€nF L@Ф @Ф qqq@лщрЋВШžœœнОњЃЂЂRRR ›"Юњ=гў‰$==нппП}‡ћћћЇЅЅIЅRш ш"К№+—xxa`Єх5м0єg'ЗДюq”п к§o U€ БgˆЎhŸzRўџ3я(Š,№|FоиJ)\Ž IDAT>љЛHымм\777ќыšЯŸ?П{їniiЉЎЎЎ‡‡ЧьйГŒŒАЌŠŠŠФФD:ЎЏЏ?mк4ь›™жжж...љљљC† 3t]ЛЦ™Э@‡оd:ојћЯшя†ЩФEVзUЫосiZнhў }z—fиужХŸ={6pр@|7--mтФ‰ЛwяоВe‹ОО~LL –ЮfГЃЂЂFН{їю:Нy@z{{?}њN№VFœFІкAlщ"lW!G~W1й&wДAі–OK„|9–.“Ђз1}­s'˜чœ=РТ‡Q2уБ>qЬџH+“риŠёКYХcГGбВжL{‰ETи"ы0J&ОкŠoHЛW0Ц™цŒ3Э‰XЩ4*№W~cЯtЩЁ—ЕШћyQŽАЅ:k8ОШЮщћtнЌтЛW8R кnЫЈ\eV–ѓђ1і,ЗЇ#ЉY М rўnИ~ЊvvﯘTХљЂVЕP6ZG„_Вб6f_ОГЏjщ&[|WSwHФhичЅfOБЭ;НŸеRAЕ}­ЖЩPVVцтт‚яЎZЕЪУУƒJЅЭš5 ,oнК5uъдъщщ™››т‡ИИИ”••СщотˆГ'?§cUпСию‰=UЯ3…БYЩЌT}­Ÿ7U`щПя|]ђL—э‘HџХ”р‡?§G›хёЏb(СБ!3‹Cz%WКУфмvh]9ђџзpБuV‘~§ў5ћЕєкKЏkEžUe’cл^уY™iќш§яqO˜mЖ{Ѕš(Фg’ёЎг.7™'Ь1Пp”эgŸЗя›ВЇџ:зhORъџHя{3иo‘ХšOŠмф§–к7;иwОyФJFЋZ(­#ТOœk^W-Э{и€Y†[+›0ЧЯедЧЖUpиВDњ‡чs=2южЗЌVm_ЋэD2№љ|ЕY………x0њђхK>ŸП}ћі7ž:uJ(lў9affЦуёрt]Хм еhз0ЩРџІкхVаХXњ чМв"lЛЖJ2ЙW.Ж=­wЃPдВ’ъ ОЋщXeDЙЏu~ИJmи†Ÿ}.ЃЈлfŠќrёМZ)^NfЋjfнчOыЇвZ#(џ‹7Щрж4‹ЁВ‹KE …Вб:"<ŠЂW~gЏUŒЂшъЉEWГ•хдд~Йe/›kЉЏкО&й‰- •Щd-г™LfXXXuu5ЖЛvэкsчЮ @{ъд)МЄL& EшtК4œЭ@‡Ђ(RQ"оЕœQ˜-ДwбCЄКB:Зџ3М ўTJMЅдСкВk{]|[гБy~оTQ˜- ЪщšЈeЩzЛщaлnд:Vѓ\‹&›а Дф2… ŠЛW87bj_ц‰&/0ŸБФ’Р*)dй6ЕlCe—Š@ eЃuDxAf,Б<Оѓѕэsu/ѓDњ(giъŽк*™ƒkГ`-ыTлзmэDccc.—kiљ†ХХХБББK—.ЕЖЖЦRєєєfЭše``€ H@@@XX^˜ЫхjZ% ушtu вћъюГ.AНюkl`ЌmeЇ{ђQеШЪNЗЂDьд—шyvMЧnZPВі€у(?Cm!_>Ю4‡X*Ы^:t‰c*‚ %b ›6исI*?щtmzз{ŒбќЏ­ЧЬ4гЃRкgmJЃPХ”Мк6?Šд-к!М•ђщ›э‹KПоmЏRXSwXк* F—ЈэЧ–}нжNФqrrb0Ъgvvv||ќ_|сшшˆ'кллkЊЁДДдййNаEtгsЧ6Нѕlєg\‚ sПВŽXЩ`ОЫehqОhЫЇ%X™K,ќЖМš)сsхжЊП‡OгБbJЅQєhZЏщbќ6GAŒЭД…-ы™Мат@Hy]ЕЌЎZіSpљ”O-Шыrh}yПСњW‹М]я3ižyЛУMAњв?ѓЋQЈЈЎюљЊЭOЎДC‹і ПtГэ?в!K”ž"юП@‹CыЪ9lY]Еь@ˆšЎTлзš:БU<==ГГГёнДДДФФФЏПўZ9мDФЧЧ'!!A( …ТјјxOOO<+''Gy€З2тDdі ЋЋЧkА№e№ЧFЋ&6ШоКˆ>~vг“(+wиЙzа>є|–[О“žІаGэБлЃЎc~l˜§х„ЂAЃ№ђAЁН‚†=oyћ›{‹^К}žєyjeЏїѕn{ђŠœЫАhmЏ6-‹jтћ?œгтЙуLs–~ёбDуЖо-:Qx‚юXnolЎ3У9с‡Я†ŽWЃ—кОжд‰­2hа :^SSƒэЦЧЧs8œН{ї†ќ?bБA#F˜™™EDDьмЙS,Я›7+ЯfГщtКђћ•ш\(sƒV_>} МеЄЄЄ0ŒхЫ—Зуишшhggg___0#t]u'|Oш\ˆПWд‘xБ}q*џ}ФљV|ХшДРDœDœ'''аѕР‹Ѕ€w`ИТ0юN’““ЃЃЃ‰ЫDEEЅЄЄ€­€ˆѓ}™ЂКGM˜яп#COAw‰D’žžюяяO\Ьпп?--M*•B?№ЎEœ <љђ™ЎљУuГ&XфЌxѕ$•OPО‡ПТГ­ъtЎšя˜15QњЂqумW-sGRГ‚>zžz™г“ЅmŸ‘UŽюя)ЕOЧУ іе=оqrssнммЌЌЌАн‚‚‚#GŽ„††nйВ%&&†Чуaщжжж...љљљ0э№ЎEœп-,‘Iбпяі}дш}ЕаЫo‘ETxeR>ppA\du]ЕЌчЋгѓйq —Н9ЁШЧзфZ‘gzНїІЃNЗЯsРE;нЮ=шёNсйГgФwянЛ7nмИ№№№;vиллŸЎ›9У9якьZxыЂ’иCЌ6Yx(’qщзъ™Ўљ#є2ч{>Ыў‹ŸxВ&Р=SъežА­Х”Хw5й / жhjћw(’џЉTB ЃкжлmgЕЌеC<\еъеЊиауэюqeТТТиlЖк,‰DŠяжд䄇‡Ѓџ]ИЦ9hДQФJFо#XЄPЩњ}чы’gЂИlDњ‡,ІD%їФžЊч™Ти,dж@ЊОжЯ›*№ЌЬ4~єƒўї8ƒ'Ь6лНВŒ 6‚Jp|&я:эr“9pТѓ Gй~іyћО){њ ­ъќњ§kіkщЕ—^зŠ<ЋЪ$ЧЖН&ЃfШЬтР^ЩеƒюА9їЃZWўжsЧтвЏТьюѓНџИп/џБ ƒ~’ЪŸВа\m–& #ђ$ЅўєОї8ƒ§YЌљЄјСMоoЉ}гИƒ}ч›GЌdДŘ&дкŠИ ді/vE5кђв*ŽФ­ЗеЮФhъkтсЊVЏVХ†ow+УчѓMLLZІcїwКЛЛу)fffјEv€wgГž#;В™9пѓйHZжtЇМŸ‚ЫxuMы%гzч1 EšVYf8ч•ОhЪ­­’Lю•‹ре6­ЌˆrL‚к4UB@ж}ўДоyjW€ˆеёГЯe5bлŒB‘ŸC.5• фОж9zІ1?qЬЛp„Х*“ФібЩ”Iе/ЮiВ№P$ƒ[гЌ…Ъ.ЎљbšVМдк / жh$ћп%аQmыэЖГђЊ[Ы82ŽЃV–zЕ*6єxЇєxhhЈLІz '88888xыж­ЪЫŸ2™LyЩ ;бщКXжиL{ѕ^‡е{Pa6ž=РкМ фзdwAj*ЅnTMVWHчі†яR(ЭY&Mг Дф2лV[A%*wЏpnФдОЬM^`>c‰e[еЉeЩzЛщaХмЈu,`8y~оTQ˜- ФіXcюПт^љћЮJ#Sэѕ‘ŽcgšvФТІ–:ЖЬЪNЗe–& cGсZЈьтJ‘/І ЕЖТQkД6ѕ/БŽФ­ЗеЮˆК‡W№Ї[4ѕu[е!#6єx{Мщф`lЬхr--пшюШШШЦЦЦєєєИИИ5kж`‰\.Wэj(@7 г mP(ˆKZшaЧ ц9XŠ•nE‰иЉ/Mmy+;н“њл8ш’Ќ_mmd*y’ЪO:]›žРѕc4џkы13ЭєЈ”vЈcйKЇ‚.qьCEЄЂDlaЃCFЭM JжpхgbhЂ-фЫЧ™цМuЦ№‘сСФ>(Š<МХлЕœ1Жr`G,ь3Щ8хчг56-Г4YИsбжЁ4 X€ТЋmУS8jжжўэˆŽэЩm0эЎФ@wŠŽNNN C%тD„FЃMœ8199O)--uvv†i€џ„.МsхјЂф‹œ:–T!GYх’#›+МFbY3–Xўјmy5SТчЪЌUНqюWж+џЧо™Ч7Qmќf_šЄMЗto)”ZВЃ,ВƒX‘В‚ђP ТEdG@6}O…Ї € TDe­€ O@i e+”ЖiКЅkšДIГЮяс !™™Lв…RЮїгO?wюНsя9чngю,)КgДZАмlУв)yє‘–ЦЄпVХvЅнIјјЧC'Шщiu†OінœЊЊ.ЗT—[6ЭW˜тЫDMЃY|!Л$пшђСВжiЬЅ)љy7,f УщNŒ{ў фЋѕeiŸWjЋ-&#vуr§?'фб[ИiщиUєѕ&uƒоV^l^ћz!ѓIFеОRŽ2ЇСЙЦшш–щЁjk—н•J/hёцhq{туу333‰УoОљF­Vлl6­VћѓЯ?GEEIYYYёёёАьаж<Юзо>§mѕјN7њ 2_y:ЧPoлp(њўJГ"И]'с”ЎЗ^ˆЮŽр;œ8cIPЗў’7†оyFœЙljўГуф.ж-Вв˜r Ћѓд †[ 4ъЬ]тЋрНисњ‹Ўћ‡№чЌaЂцћЛ#З,,ъя•љСwК>#yљьXяХЩїњ{enЇxе7эiсˆСП~щјчЩкБэЏ’en˜Ї9ENoсІeљ‘чjygЭzцvЏ!R7<'2ЃQЕяДEŠi=o9НВ1:КegzЈккewЅв ZМ9ZмžЎ]ЛцччWVVт‡ {іьYДhбЦѕz§Œ3№јŠŠŠќќ|ћя(Д$ЌёгцоЛ №˜’žžЎT*gЭšE“gїюн‘‘‘У† s№HhЎч8сЗћš њ_*bтGвћЃЋЧљ˜ўЪ"афАСxœxœ''gпNђЬ>иэЬ™3ЛwяnБwэк•žžЭ@Ћ№8СЃmL&гЏПў:vьи–ЉnьиБчЯŸ7›Э`yьс6_бuЕжЯV”œџAS^dKйнJ'Пи{ЈДДjљЏіd]qЎ”4ВёХ2чъеЋбббўўўјajjЊ}ЊP(\П~=BшцЭ›Пќђ‹RЉфѓљБББ/ОјЂЗЗ7BHЏз:tшцЭ›<oР€ЃF"Ю%ŠкКu+•нН{wZД„ЧљюфМАі‚ЯЮv ŠрзV[џ>ЇлЕЊДe<Ю&!ЅлЭЄўЃІњ6ЩЯU?nмИaџ3Ъіосљѓчkkkё№Йsч УfГћэЗ/Пќw(Пћю;>ŸџС „ввв.]КдЇOћЂ\X„PbbтѕызСуРžfМЋўї9н›ТBк и–<€;|’ќѓѓ‰д#џЎHŠЪюЫߘšxыN–45иVПІф5Ш;kЭlЅЉС†Чїd]йЛБlИтjЏЬ•3 LFЬe< ­ШhА­˜^ап+sDаЕНЫHяТ/и~;CŸмёњТrЯЉ1›0MaГb;п-xѕqцв)yz/Ю3.|!w 4ѓiaЦ[ЃяV—[ьЯТЅъЩКb/Љ:TFEE9Ч[­жп~ћэйgŸХчЮлЅK‘H$ž}ійттb<ўцЭ›/ОјЂX,‹ХЩЩЩ—.]rЉiTTTaa!Œ+ZШуьњŒdЭlхЕ?ы›sъ•ѓКнтЮеt<Юgѕьћ>ЪЇЫK*JЬппMјўN|YЁщ_я•љ3~Ћ;˜,џЉjЕхпяЛŽЇЏш_я•деZ,xъ`vчЌ?ъHхя=TњсоЈуE]'ЫюЌrmУмТы—ъ=0Хж–нКЂп—бщŒК‹@ФоўЮ}—.5)7%UqІМыiuзШXсЧ UЙьXќП§uRuЈащt2™Ь9ўђхЫ111ј­s{№ч>cbb№C {ШЩ.--uЉЉБu аьчGiэƒ#љЋ_SіНњ|dіцT•ЖЦJЄ.љ4"0”'ђbO[ЄШЩМПWwъ@ѕл[У}ЙО отэс'TљяЧrпо~rПыx}[НpK˜<€+р.иNЃ…XТNšсїйЙŽ›ОoџыБк}oSхФ7#эџˆЄvWўsGx`_,хМЙ>єlšџюF|Џ!Rˆ-ёцЬ]zёДжЅUIеЁУс8Ф`vіьй!C†8ФЇІІўѓŸџ<ў|rr2гЉSЇ~јAЏзыѕњ~јСh4Т€Рšё EЉgоКаyыB1 )sОйЌ^2)яг3їїЯdОїЋŠйVЫ§НД*Е%,š‡CЃеъw™Cлё‰@U™йe<yEe–рШћ'†DёiДазйЮЉљiOенk†с“фЯOїЃЪIњц(/6ЛAФГXїзў[З§тœLНЁоfOЉ:”M •j4?П‡dЮЪЪ T(™Зnнкаа№ыЏПюпПџ­ЗоBMš4щрСƒ+VЌ …§ћї—H$.Хгh4ЄЛЊ€ЧйМАX(*NИh[ј`y}N?З8поA€*Ю3кПВCФ—˜|Ўу]VTЊ4…Fп?‘4Ях_t?я­њѕMтЩФ9’|ј–g№ц}љg\`(Я!ўIy 6‡?=Jц%ушuжAоYMkљˆˆЅRщрqІЇЇO˜04ПP(2dШ™3g№C//Џ™3gтс‹/wлi(((ˆŒŒ„q€=ЭxW}іГwЮЊЉV›mVL­2эXRœаз‹ў”с“}7ЇЊЊЫ-ех–MѓU#ІјI/TеTXj*,[ЈFMuOЯˆ)ОП]ЄЉДh*-OO>(љmUl7Qк„ь0t‚мcw!4ўѕ€5Г•EїŒV –›mX:%70ХВKђkf+O”њp”9 зŸ™™isѓцM>ŸпЎ];ћШoОљF­Vлl6­VћѓЯ?/}ћэЗЦl6gggŸ:ujфШ‘.kЬЪЪŠ‡q€=ЭИЧљкћС‡v–Џ}]YЏЕљѓњŽm8MЪм5!цЉ^ьp!4lЂ|Юъ"Љ[ЩЄ„†:лА‰ђ7>tOЯЋBжўC9&"[,eЇЬWќї$Щ”В:7•)f, кГЁьЁw*JЬ‘БТYЫƒёјїwGnYXT:>/0”7m‘тєС‡Ї-RLыyK_gѓьЋœ]Лv§с‡*++‰OrІЇЇ;?С™АgЯžВВ2БXмЙsч3fрёQQQќБС`hзЎнЌYГ‰Sˆя"сќcIљљљ/Пќ2Œ+ьaŸ6я№о­\JЊЁ7ўыЁ‚л o=—{,/ЁM6pzzКRЉœ5kV дЕ{їюШШШaУ†СИ %yђфž={цЮырeкѓнwпёљќ>ј!”––vщвЅ>}ња—“˜˜x§њu№8 5аŒwеџ>Ї{sCXH;›У’p‡O’~О#ždjА­~M9Ш;kwжšйJSƒ З˜Б- ‹†\,ЯњfГ$юЊлЌиЮw‹‡^}FœЙtJž^g%R{ВЎрзцœўЖšЁЌа4*фZ]­е^А›Уogш“;^_јBюй#5fЦD 'j'%мьЫЯHŠЪ>КЋ’&вс1zљЉ iМРЄPЩPœg\јBю@iцгТŒЗFп­.З8Ћ@чШП+’ЂВћђ3І&оК“ЅЇЏЗАА0**Š8|у7:uъ$$Щ /МŸŸOњЭ›7_|ёEБX,‹“““/]КфВœЈЈЈТТBсаЦ=ЮЎЯHжЬV^ћГоhА9$}КМЄЂФќ§н„яяФ—šўѕ^ џй%y7 ћ3;ЫJ]dr8ы?kЫn]бяЫшtFнE boЇ§яžћпX<0sY№–кўWс–ІЬ”xsьЫщ=TњсоЈуE]'ЫюЌrmУмТы—ъщеYёrСы+ƒг%~ё[lіХzšH*HхgRˆgЛ%CjRnJЊтLyзгъЎ‘БТЊКxxиШ8WЮыv_ˆ;Wгm№8ŸеГ]јv:N&“‘&хффи;ЃK—.}ћэЗзЌY“žžnћ_CbиCNviiЉЫr|||jkka„@Ћ`ќДyXѓ ­БьXR41ўF?a֘ˆk›цжV[№ЄQ!W•w№А2Ч0*є*vM™cp(Їњ<y­рі§дЊ2гpХU‡ 8гzн:БЏ У0х†бaз ѕVz93~гЛцPˆsэЯ…_;ИC­VэSI#Šr)?i!˜є^{ ѕжaYТ;+U[e&ђїц^Ё—yбЂE‹Х9ОЈЈhхЪ•хххі‘VЋЕИИxчЮiiixЬўѓŸ§ћїззззззяпПС‚.ЫБX,‹-Тh4уsRЮМuЁѓж…bRц4|ГYНdRоЇgbBUjKX4Я-ЈVпП‡[YjPX^lwƒ8dБШГЭZД§т“хŸ­(™ўO…PLОЋЏГ=RѓгžЊЛз У'ЩŸŸюGЏЮGGЂw­*§ьƒR‰7чэ­с“МЉ"н•Ÿa!Ь&}sˆ^†kџ­лўNqNІоPoЃБ­=2пћG(f[-.nєKЅRFучїЬЙЙЙћіэ›1cF@@РCяlvHHШДiгж­[7nм8„аЄI“<ИbХ ЁPиПќѕ њr4 еЎ*-LKМ‰ТbЁЈ8сЂmсƒхYxŒŸ‚[œo я @ч‰bќƒyХyЦˆŽBвrќƒy_ўЪЃЏnр >џzЏdЧЛХзўЌћрЋ(ч —б§МЗъз4‰$ч Hђс \{X{ym9жУаOд~8K9АД U$‡ЫjалpOЗЖЪтR~вB/А[6|gRо‚ЭсO’yЩ8zuwVгіˆˆЅRiяqfff=zєеW_ '=…Эf7гНММfЮœ‰‡/^МуВœ‚‚‚ШШHсаhЦч8g?{чЬЁšjЕйfХд*гŽ%Х }Н№Єс“}7ЇЊЊЫ-ех–MѓU#ІјтёЯOїлјІЊМШЄгX7/P98ўѕ€5Г•EїŒV –›mX:%—њp”9 іюЬeС_Є~uy0Oт™}ќЖ*Ж›(эNТЧ?v:AЮа{[š’ŸwГСbЦ0 [zЄ‘ЛŠОоЄnалЪ‹Эk_/t)?i!˜*ŒL dё…ь’|уšйJчŒь.ёёё™™™Фсљѓч;6gЮ7qяоНЅЅЅ6›­ВВrџў§]ЛvХуП§і[Fc6›ГГГOыCZd IDAT:5rфHњrBYYYёёё0Т 5аŒ{œЏН|hgљкз•ѕZ›0Џяй†Cбxвм5!цЉ^ьp!4lЂ|ЮъћNъŠрm‹‹ІtН…aиkя;8cIаž eo НSQbŽŒЮZ~?УДEŠi=oщыlФнd…ЧЈю;Шъь:ЯŽѕ^œ|Џшž12VИъ›v4‘ЫПˆќp–rзЊвРPоєw‚~9\C/?i!˜*опЙeaQщјМРPоДEŠгkNt6В[tэкѕ‡~ЈЌЌФ?ЩyєшQ„аКuыˆ 6l]КtљњыЏЫЪЪЄRibbт˜1c№дЈЈЈ?ўи`0ДkзnжЌYxќѕCЃеэа–рВЙyѓѓТeсО№GсD|џˆўППђЛJЋŠонVu— хK,76в;!ЄЎWпЌИ9ъ›Q­аohсE™ЈCXЁцlўйд“ЉХКb/ѕШuБЧy6џ,BhHЛ!јсs1ЯсББcёРРШDЖІ…Е’ХZЩ о|ОрќвKw<З aБYЖ_кŽzНчыіёoє|!Дэт6‹Э‚/MИВ4ЧjхюYт€ПfџѕzЯзWž_щЗбЯЃџьgЗц–jљE™Е’%Z-кєпM:OјђХ/aА<ю#з=sLܘ*CUY]йиИћч€ШD6b-ьЗ0ї­\г{ІТ…я<ѓ =4_юњђ97ŒЫЪTхќ>ѓ™ЈTЉЏ|ћдлЁЩё“‰ШдОЉи ЬњОЕєэвЯ“>— dDв[}о*H-0НgК1чЦЫ]_ЖпбЅoPф ?g§YПД^ПLъџNl?:@Ы№Х•/t&н„ЮќD~xŒПи|чё:“ю‹Œ/аУїцЈfђмЗrБXїрюЁ”„l–’‚ъм[н}ѓ.~К}QxxFЗwоМc~Я|cЮgЃžeВš8РМL{>ќa{yћwвпй—НЏЮTЇ3щNцžДпрdГияіЗ ЕРќžY™Њ\:`)›Хv™Ф\r—Kі#\” ŒVуЖKлB"иЧSеШаўЩ’Ы–ї-ѓzЯsNЅ1/^ўМоѓ кVи№Ш}иыШаШЄEЕэ‘ЫШу9Ыцђ9ќaбУ~ЬљёXЮБў§ё №N€g[єєЂЭ#6џQј‡ЯzŸџdўg§Аѕ ћ-Д/mlьиA_ йђwЩп[GmлkЎgJŠyтјOуНжz-;ЛьЕюЏmЕ Ÿз{оЖQлВЪВB7‡>ћеГ:OА?‹FМ§уїї ы;ѕШTП ~k~_ѓfŸ7a hjЕЛ3v 8‚W_Сc^щіŠ€#и•БKkд:dІšЩOм=ўwлm|чёФ|[фDю ЊкD шўYїЩ‡'wшМы…]LV—–щР Б/ „вnЅQВЄџ’ЕCзКqHЖNvјцс5CжМѓЬ;.“˜KюrЩ~„‹Вƒг†в›ѕLjdbџi]ІšxЈСв0fп˜—wКeyœ„Р„ЮŸtfЏdу.ѕ–‘[Ў”\ й2шЋA/ЦНш–ŽіEЕљ‘ыBЩвКв[•З$|Iяао#Jј’ЃЗН}”УтŒщ8ІЃ_Ч IаэЪлЅuЅЁ9Нц „VџЖZoжoНИ!4ЗїC>хЂг‹*ѕ•U†ЊХg#„є[рR?‘пІ›Bo$"зўОіfХЭKУžЌ=Ёч;>O\є „žZXЁЏЈаW,<ѕPЛвˆ'HVc‘ЖШ†й~Sўіќўчa h1Ж]кfХЌГ{Ьf! Бfї˜mХЌл.nsЮI5“ŸЬ=‰{œ"ЎhT‡QњŠбF‹yb|[O%х§sяз™ъ~Ьљ!-fВšИ„ДLМBѕ{ЅФžа?zќ!ДхЯ-‹aѓŸ›Bџшљ—IЬ%wЙd?ТE™€ЯсПеч-‡ъhjtiџ7zОБgмЅFйowПSїN‘ъHc^œхg—з™ъ№0~Уwo*ѕ•‹N/rKGћЂкќШхК,юlўйNў†Дт'ђг›ѕЇяЖbVI76v,ЭC+8‡ч “…!„ˆ›!ќih‚ТкBћ@„wMНјЈГbVU­jныVž_I\ЛЌКІ[P7 _‚_њј‰§ьЋSj”јaІРО@ёо;ћоКaыўž§ЗГf–fОћЫЛщyщАД š‚#7LŠŸ44z( Б:јv8tуВVщœ“j&?WpЮh5іш?ЊУ(/žзФCПtќЙ˜чњGєoА4œЫ?GU5ў>Šйf&Жг\Ў&.!-гВКВpYxА4ИH[t3o%Ыўіwˆ4!T^_ŽRзЉBЁвP—IЬ%wЙd?ТEйОFќФХЇлoaRешвўјˆ…XKв—мЌИIЅ)yq*ѕ•Dїdмц:кецG.#snЏЙCл ‘†œЬ=iАB'юžгq оœФkC*­ЊOЛ MAъz5iQољš|ч"…ъAьƒK‚“$Ъ=ХesѕЫєDRж*лЫл‡ЩТpmЃ|ЂьOЄoлЅmŸ_љќ)ХSƒЃЏЖ~_ђ>Х&Ќ-Ц–?ЗLŠŸєzЯзё)п[r†j&з›ѕП)=ќня^,Кx"їФŸE.щП$а+№єНгјЪХњеЄIHЛ•6ПЯќнfЌўm5•K& є ,Ћ+ є $)њ$ц’Л\ВсЂLдш+ђ]?l§kн_›њдTќб@њ]’Д?щЇЉ?}ќЕйf>zћЈЛ–wІАЖ0ZэроxlфЖ=r]?:pОрМ Г ˆабЏуїЗПЧ#о>ъХѓšиy"†0ткпGн0|ƒаGЪ—Žю0њдџ=ДeНiФ&БПŸШяЃсљн…Ут „jj…\скЁkэ“№-ы У7јŠ|§Хў[FnyШ­Є/mrкSŠЇВЪВ№§кKЬў-ЩЅтKTЦЦŽ}!і…? џИ\|™4ЭLŽ?Ъй+ЄзззОF}}ѕыС=эCœTаЏ&MТч?ИІОЖbаŠe–…JC9,N;Ÿ‡>2Иїъ^„аТ~ …\слOПњтЪ.“˜KюrЩ~„‹2AЕЁzў‰љхѕхы‡­Wx)\жш’sч’$й0лсI‡g&Ю$ЭCc^gv\оЛ7~"?ТНir#З‘ыzГкPU–е=ИЛХfљщЮOxфЯw6YM|џЊњj•Ёъ~q—Ж,†ЙНцЊЉ, T\нc9Чў˜љGД<КЌЎьэгoуэф./ЅНД}єіs3Юk‹зџБо>iчхgaП…e‹Ъђjђ6^и˜д1ЩŠY]ŠЗ+cз†aњ†ѕХі_еёЧL€–dѓ7ЇMNУwM(§$ъ™ќDю‰-#ǘmцƒз"„ОНўэжQ[љ>ЭCœTаЏ&M‚ІAгowП…§NŠŸДlр2>‡Џ5jЏЊЏf”f.)‡ЭљП.џЗАпТ]Щћчо_їЧ:—IЬ%wЙd?ТEйƒХАѕтжЕCз~2ц“ ‡&азШ„Гљg_8№ТБ”cЛ_иэ'ђћшП9_ P™—м‘bsпъѓVщЂвМšМuПЏKю”мфFn#—5~кМУ{Щ=П&џhY гбЏcЮМœœЊœИq0•OкjвЊ$‡Eљ ‡лЦє92щШкпз^S_ “…mН!ДіїЕаЬРВšД*ЩaQnУИћcžmЭумŸНџГЄЯК(КЬ†ŒвŒqЧQ= mo5iU’УЂм†qїNx[ОЋДи` YЁЛЋўх”0аH`Јр‚ ЈР0”wУ/bйНВo2кю]oˆю,ˆ8ы˜ хfылuqytoїпКЂядCL“*“sBЃЮIХyFm?їж=Щb!ё…lБŒууЧхёсУДR`“ ‰ЅœКк‡~LЏГБйH_gГЌЋЕН8єю& z›йшј=UГ3›ŠьдCмЉ‡86Q/ хйЌXў-ƒЖЦтA*•ъ oхІЕиРуto_NmѕCžœОЮъРез=ф†жV[Н}9ЏNюЯ­.7;DV—›х\R‡˜Уe‰%E?"FXVh2lаd€Чљ˜!–rŒz›Эњ`‹бPoѓS№ъјvV ІЏГJ|xœzЕрvУэ }nЖASљУjГb%Цœ,}NІ^uЗССGєёчjkЌVЛъЌVЌЎж*“ЛxјA(fћ*xех–ІRмaЏЮГ­;Гй\ZZъš0)gлAъУбжмпбĘ16›ХхБ8\qћ[[c•xsиьћЗдєЖт<Ѓ_/Ж›8Ќƒ ЊЬl_ОTiK81O‰bКˆЄrnс]ЃЩјРщфpYRŽІтуЈЉАHх›‘œzЕU™ЎЁЁA(:‡&7/ДrрЭ!Ш|ЙхE&.BHЏГŠ%l„XТжзYН\„ЖктТ#ђW•™§ƒyRBH(b+Тљехf‰ї§PЁ/ !фуЯЕZБЪRsHдƒЗ…ф\е]ЃЏ‚Чb! C5–ШXF^ŸЯ˜1ЌЋЋгъД6ЋЫхњњњђљ||Gџюpˆ‡НННuu:Ь†‰ХbЙ\ЮњплU*• ЯƒЛD‰Ф>\ZZъяяЯуёBѕѕѕ^^^!Гй\YY‰ђ )88ј7O]xpp0†aZ­ЖЎОГa"‘H.—Гйїy­VыЌ‹JЅ’Ых:ЮjЕrЙ\Й\nБXДZ-~шчч‡W„aXMM^ЏgГйRЉ”дТ†ежжжзз#„d2ž ?б`0 „pyмЊзСМаІ<Ю‚š‚f•&HDџїr„bЖХ‚YЬ—ЧвзйМd„HТЉЋЕzћ!Гбf6ab‰н-ѕ:[`Ÿ8Kй%љv1Н}2ИLЮ-P7иЧ„lЅ­ЖxћqЕеЁ›сKш˜Ї FE ‚ЭfыtКšš…B{™„ушpHœЄBUWWзжжњјј8ЛY&“I и‡…BЁбhфёxVЋЕFS#‰иlЖбhФлзГ$ћNB“MЇг™LІ E‹Хвh4ЕЕЕrЙœ^—†††РР@м2•"Ёˆ8ЌЎЎV(ЁккZЋеŠ;ОеееЄжjЕfГ9((ˆХbiЕZ<?1(ШУzэЭ ­ИЋю™œЋ­Ж „ uі8 uV„PmЕUцЫБџ|’е‚хfn]бу9™ЋнНnї!ї‘ЫcY-ŽОЂ<W­6#„ЊеfпІ—f#цйЫђrЙœУсАX,ЉTj2›м:‹УсШхrН^OФŽЉЩdтrЙј>"ЦB„P}}=‹ХТOФBЯ’œ=NЊluѕu>>>‡Эf{{{у›‹єКјњњ–СlЉЁєzН§щЄЖЊЏЏЧѓАйlТ­t8бнzэЭ ­ИЋЮРуєхч™М§Ииџ\F—…XШbЦДе‡/hВ9Ј}МШСГДїGэ§BЋsЮ)ёц”Ёђb†XЪєx]­•*3ёоУ>х}џчЕАX,ц;Ѕ‡Xm$Я’>Ф)j45ИПх+їещt‰Фh2њњњbцA’}4…#„ЌVkYY™[Кm6›Хbсѓљ!ћ0BH(j4ќэБX\SSCМўтYВ{]‰&›D"ЉЎЎЦяPуятјљљбыТБXLœ^SScŸDHхххUSSуыы‹?Ч‰{ŠИxјўkMM[ѕ:˜кšЧљФ"“sЫ‹Ь"Л=N<,ѓu4 @Фя Ј(1—š†„^l?ЛMЭрHОКШ\^dТ0$’А#b|лn"ўC—іПrеIдДПr)•JехjЬ†с>“У!BˆЯч—ЉЫ№їЛНННNohhрѓљјцЂ}w kЕЕИƒ%‹5ЕтюАgIіаd“ЩdZ­ЖМЂ+_&“gбыBЗЗwMMMii)‹Э’Ie $G&Ћ­­Хящѕо?БЌ!$ЙWЏƒI ѕУ?mосН;HгОЪб€gœ_]w ІІ†Ыхт{іa I“АЧI О•јибЉ‡ј‘ЫаааряяяšмМчуMkpнSь?ЬnšмМ№XпѓмІ9^~i xœ@ыЂИИ(<<МG`ŠV;UG 0Тм€Чйє”••eddP§*7єщІХ`hpї3ь@“CПDСk'LeE;>iгi є7˜§РуlF0 ЋЌЌ ЋЌЌФ0 вмиl6ј5˜N6|гZ­–УсVWWiЕZЊыХŠŠŠВВ2Гй, ##Ѓ№ZФ0ЌЄЄЄВВвfГy{{GFFr8„бhTЉT:У0ЉTeџ“хјUўŸИœ"-пск.,,ЌМ\m2™{єшAUummmqqQCƒ‘ЧууЗйl*• џYpЙ\ЮfГё2эЏчˆCчКŠŠ‹ЊЋЊ1 V(4К“ @Ѕ;`і8\!8 CSNDD„ГaI…ЄБFDD„Z]f2™…BADDЄбh,--5™L"‘0*ЊH$Ђ1ˆƒ.єіqnєааPЕZmГйфryDDЎi‡ЩЩЩ РЩ!d2™nпО_WWGЃ,Љ=‰TЊžь–aiД#-„aЗ$­ЫГnщ€ЛZлlЖТТТšš‡Ј КВ%Œъ\*#ЖЉРЮѓ Усц–­\Ж&ЉР.еЄ2‹ЧэK#žsзЏ_oпО=>ЎЋЊЊќќќBƒсоН{ nй‡J}в Ÿa;6r:mфDчnїxфKЇC_*..&єЅВpоћ$єИ[6гTѓ—H№8‘Ыё€ ЌЈЈ ђ8u:]ll,—ЫUЋе……ЪИИNЁВВ2Н^пЉS'‡ЃRЉŠ‹‹#""BЙЙЙбббxG,**jзЎ}uh9ЊђЈЏЏядЉ3—ЫЅЉК   ""ТлллbБ”––тЅЄЄФl6ЧЧЧуJJJТТТшЭb_WIIIƒЁЏЋЄЄЯр–TКгf/€=ЄТа”CjXz!I/K:vŒхrЙъrunnЎL&ыиБ#~ЈT*уттh т ‹ЛUзееuюм!ЄT*KJKТB)ѕ VЉTrЙпѓ(-- фp8є5’к“€І'7‰a™t{ЊnIZ—gнВ‘Z—””X,–„„М"в2ЉcrЎЫі"иyža8ммmDњжЄя`4j’šЅ1эЫ|—Щduuu"‘Шd2ЉT*‡SWW'“ЩмЕ•њЄТ0lЧ&œN=˜шšdŒЗфвщюs№&B•••†&вуnйSЭЃ]"сЎ:ЂпЬЈЏЏЧЗ…фry}}Нбh$ЭСчѓйlЖBЁаы DŸ чѓљ'44TЃЉСууууЅR)›ЭЦуЕZ­KIHЫw <<œ˜2ЈЊfГйfГйbБ№љќШШH<ВКК:,,ŒЧуёxМ№№№ъъj—ђизU]]EдE|о-Ј Ь^€‡O!†ІRУК%$B(22ђ~! Ћењ Ь@…^ЏЇ7ˆƒ.юVŽыVCЋ—L&уpијUlCCƒV[шВFR{аєф&1,“nOе-Iыj’nщЎжеееD3Q§z•`LЮuй^ '†УЭнFteКF33šЅ1эЫ|—Щd:ƒХbсcJЇгсЇ›SЙњЄТxАp4r:ѕ`Ђk’1о’KЇЛCЬй—ЊЈЈАw|#=ю–Э1е<к%і8]lpZ,–ЬЬLћв§?ћщ˜xмгl6пИqƒt_ЊИИXЏзлl6ІO?•я€§§ЊЊЃЃЃKKKKKK9NXX˜BШbБ<ƒ@ АX,.хyИЎЇ{&4‚й №pН$Та”CjXЗ„t(ФнЮр ‹ЛUѓљ|B/Гй…^AAСХХХrЙМДДTЁТЅЅЏ‘дžLzr“–ЩSд.-й$нв]­ёy™h&Š~K.“s]ЖУ ‡сpsЗ]M’&z14Kcк—љА•JЅ*• _ЇЃЂЂЪЪJ§§§ыыыЃЂЂмЕ•њЄТxАp4r:ѕ`Ђk’1о’KЇЛCЬƒСPXXиБcGќ.9UЄЧнВyІšGЙD‚ЧI‰ЭfЋЊЊJHH ZТh4оО};$$„xZЮхњKє‚МММАА0ooo‡cЕZГВВš\xЊЊНММ:tш€Њ­­U*•xGсrЙ&“ Wгh4ƒХbйl6\Y7”ЧуFЁPшБ4н”T0ZХI„qЗR!ZУнiŒ}B„^&“ЩЅ^>>>%%%ХХХuuuјъшВFR{zм“неŽ† AZW“tKwЕvш~nuЊsЉŒ@к^ f8LšЄv0*5ЉЬв$эыВ]8Ž@ ЈЉЉaГйоооЅЅЅF рrКUНњMЛp0lпFNtMв=Zrщtwˆй_офххEEEйЫIщqЗlžЉцQ.‘pW’šš///ћЋ@рх%ЦяЁ0С?РПААаh4bf0ђђђ№x Уиl6‹Х2JЅвљD‡гааасЉЊЮЯЯ3 ј• qН"ї•ЋT*Гйl6›U*ёr‰H$ТпJ1™L………TuљњљЉT*“ЩdЕZёKw ‚J0H…qЗR!ZУнaR5Ђўъ‡JЅВX,‹…Ё}‚‚‚дjupp0ё+}‹к“РeOnЄvЄ0lвКмэ–Є‚ЙЋЕЏЏ/оLfГЙЈЈШ­юAu.•Hл‹J`‡y†с0i’FdиС(еЄ0K“L;LкE&“&ђѕѕUЉ ё[ъюк‡^}†сzСА}9б5IїhЩЅгн!FpяоН   ‰Dт2вуnйSЭЃ]"a“’ђђђЧ‘рPVVŠПœшzuWЉ‘њЮ;fГY(сё‘‘‘EEE&“‰Чу) gVЁPмКuЫfГyќщ/ЊЊНН}ђђђŒFЃP( ž2 UЉ ёmЙм‡а:22RЉT–––ђxМ   *W;$8ЄЈЈшж­[јЛo@•`4 уn9ЄB2Д†Л-ТЄъКК:‡)Œ@"‘мИqW‰}X,–@ АяРє-BjO—=Й‘к‘ТА!HыrЋ[R цЎж!!!………йййјГPЕЕЕЬЛеЙTF m/*ц†УФ­!ьй€uйжTfi’i‡IЛШdВ’’тџЂЂ"ТуtЋ/бЋЯАу1\/Жo#'К&у-ЙtК;Фъыыыыы‰t№I#=ю–Э1е<к%’5~кМУ{wњUŽќNxфмЙs'88X*•:ФSНЃJУН{ЙrЙ/“наGЎДљ! ъЗ6<˜TцР'Дv:vьијB0 ЋЊЊjh0ЪхђЖЇн%№daP…Цid IDAT€Ч€ŒŒ >Ÿ ПA€Ч SмКћЗŠ`ž|„РЛъ@ѓBЗЧ9#ж 4иуРуРу№8=Ё'ыŠI-_ђнk†eSѓG…\ыЫЯrmiJўЋ†–БƒЛх МšЋЁ™ЊшЩКтќзкzN иРуšиЩ8sЈfЮ№; }Мі\ŠћЃ>qяхИ.§МцМ{цPЭЃ‰†9kBзЬVКѓЫЦю і7жУсЏ 7}#IMMmKCЉХдicvp*зИq^сПщ˜2?PЮчђXaќ)oў+=fУмBUЎБЕ <ю5.utW%ДЉЛ FРуШЩјUїrялO 3’ЂВи§/U­6ЏŸSјŒ8sИтъ†Й… z›}*ОЫх|џзjСў§~Щшаk}љу:^?ДГœЊъ§Ћ'Ю lŸ rˆoŸ š4/pџЧjЂ"†ez&œYўEфч+KЊеfЗьI%s~ћБvFпл§Н2ћ{eЮш{ћŸkД&5ынЛnxkєнў^™Яњd­zUЉЏГЙьЄъи‹A/-ѓт{xžmщ•••­ZЕЪ9мЖyr4ГЅЙ“Ѕgbо+яЏMмrЌУWЪ.рМдуVBЏsе]їetЎзZw,)Ж?пгrОџЛц…|!{я_ўЈO\ѕuЛowT|џЙw)]7lљgS‡ŽїЙ”ЎsЗLDЂЗ€3aэџЗPБyA‘[іЄŒ!е­šU№ЪЛAgЪЛž)я:§ •ЏdќЊcx:њEїŒs†п2^~ЊДЫЯ…OХїЏšU`.i WЧЅДЬ{ШЃ"'''..Ю9мЖyr4pј•KИмNћЯкВзо<Ю!гEДx{Ф7›еЯ<чЇž(ъ‚CйK>HŽНБx{ИЫJ#b3–сс„>^K>иКЈhмkў${*…ІАі*ЧN­2yPІЛ"б[€””TХЬЇoџyJлoЄЬ-{Кл^„3їѕ&ѕ›ТНячS[eйћ‘Кћ i#еџќƒ’i‹/Оzп’ЩџЈЋЕк?DыAp)m#[“рТ… щщщZ­6((hъдЉЁЁЁјŽ&ўыж­‡x8))щмЙs&“)11qтФ‰\ю§i$55Яƒћ^}ћіЕЏ^НњеW_ B]О|ЙwяоЁВВВ]Лv!„f`’ЗУ–Гgъ_<Ѓ{sC˜}ъЈ—ќЖПSм˜>рRкІъ!ЙЙЙЉЉЉ"‘шмЙs\Иp!юeŽЃУ!ЮН{ї–,Y‚кПџ‰'’’’œЕМММiгІй‡уттюнЛЄбhŽ9вЕkW@››‹o z–фR„аёуЧЕZэђхЫ1 лЗoпёуЧЧŽK“пž“'O–––.^МX œŸ?x№рЂЂ"†g%''K$‰D2nмИ+Wl*Ži~~ОBЁ‰DісИИИмм\„а•+Wx<^FFюКХХХy–ФPŒŒŒqуЦI$ЉT:~ќxМ†ъџѕз_&L№ёё‰DуЦЃ?б^kРИЋоXЄ>œД; 29Ч9щ—У5iwфї\Vhjђк{•ўrDOВШ§rDгg˜єбZ€ž™KƒЇїЙ§мџљvxJдјвш‘xs4UVп@ЎНk+ѕЙ_ _Р2lбƒ АЊ23У’}ќЙUjK@ШАJmid —ж-ˆї~і)qФbё} №љ6SŸеззїОуычЇеj3фффФЦЦ:„cbbŽ9‚њћяПSRRвггћѕыЇT*ЇNŠa˜I ебщt~~~„Р:ŽЙњZ­–8зeEіZРgcщ9Xњыв$“уёYФсO{ЊH€УВY1kŸšxpGљНыŽп{ПwнphgyJЊТ“>сІH4 ‡Чg-§wФкз эзzšвcЋ„>^іЏ{#„ўјЙ6ЁŽŒf_ЌЗO=іeУ’ћ—ž9Xmsъ@5“>@ЃНДnБѕ4aŸЏЎЎ&RЉ”ЁЧ)ќќќ233y<^||МеjЭЮЮіїїчѓљž%1Н"’J ЋЊЊHІB&“UU1э рqарочž‰{РdЬ^^‡џUСЄДЦиъЅ…ЫЇц{ћrz“!„.Їkw,)^w0O8'pУмТ•{Ђ:vзTXާЇВ8Ÿщ†єk+B^#ёц ›(G>XsѓЏЮ+M Q‡^кцF$•——’"„Оџўћ””<аЃЧƒgё'>ѕz}yyyTTBШ>ŒъдЉгбЃG‡ŠъйГч‘#G а˜$є№ыJЄ$&&ІЅЅс{Ђiii‰‰‰.-@”йЋWЏУ‡ЇЄЄрЯqкпXwРASРqлLаHЂуEл~ъ№гžЊ‘Сз†\нНКtтœ@ЃНДЭЭ!C6oоLмŽw8DEGGЏ_ПўУ?єђђ=zДУщwяоŽŽцp8a„P\\œNЇыоН;BЈ{їюZ­–xгГ$&Œ3F*•ЎZЕjеЊU2™lܘ1ЬЯ5jTPPаЦW­Z%—Ыir:h 8Р?mосН;Р0СхžтЁC‡ Х AƒТm›'GSЯ€7‡(yф?{ юв~'3''gрРЮсЖЭ“Ѓ)xœmpё€ЧŽїо{4ќфh 8ЯqрMћк;'€Ч €Ч А<ћфf“–vnУ’З™юбŠ€ЉлПўњkDD}s4Gc5ІLцчRхlжюзšћv+ЖЄe6UEЭк˜Œ№8aН  OЈЉ‰ вšYМxёЮ;1 ƒОЎ'р–‰ЩигeeesчЮmзЎŸЯїѕѕ=zєёуЧщOyLЇ? ѕ]LMКlвRХ?i@vvЖѓo@пkѓт=i3ЄуЫЅ‰Z~ьАX,№8щ(..юзЏ_``рЙsчъыыяоН;wюмэлЗƒe9 <ьЧи?mP0sцЬ5kжPЅт—Ю‡D ЁЁaЦŒbБXЁPlмИ‘ˆЗX,K–, ‰D“'OжjЕxќёуЧуууy<^ddф_|ASуН{ї’’’$‰@ 5j”Z­Ц0lР€ 2+•Ърр`FуPТ† Хbё+ЏМваа@/BhЫ–-aaa,‹JBƒС№ъЋЏЪd2™LікkЏ тм§ы_‘‘‘<Џ[Зn™™™ е$еŽІ@*;;(NzЎЛТ;ћщЇŸт[рёёёПџўћ—_~ƒŸuэк5zѓRINL&г‚ ќ§§}||6mкфв>Ь{)i+4ІД@ Ж~S“.ыQ•щ,€УŽQ/UМ§ЙIZмY*1šЖg:яшаOШ-аEэFc[&ѓ9}Ђo>{мэЮтЙлyЏaл’Ы •šє“Žw—Ы}f‡2i:‰}—fиšЄъ€ЧщšрррЂЂ"=ЮХ‹3F­V—••5Šˆџ№У‡ЎRЉДZэŒ3оxу <оппџ№сУ JЅrжЌY45vюмљ—_~быѕfўќљ/Нє†a'OžьдЉ“еjХѓЬš5k§њѕЮ%р"Љеъчž{юŸџќ'НHЁЩ“'———гHјілo?їмsИš#GŽ\Дh‘§ЙEEEuuu+WЎьеЋC5IЕЃ)ЪЮŠ“žыЎ№ЮХ&''жззЏYГF*•Nœ8‘8ьлЗ/НyЉ$'K—.1b„JЅЊЉЉIMMuiцН”Дг Z [ПЉI—ƒѕЈЪ$€ЁEКлŸ›ЊХфЁЃ9z&ѓ Йe&Тh.eІŸЯщ{}Lк‚JSRг­Юѓx л–\nЈдt)АsпvЙ<Йl>ћ2щ; бЅЖ&:рqвСхrЭfГѓЅ<У .44єюнЛx8''‡ˆŒŒМ}ћ6.++S(x8<<|ЧŽ*•ŠЁ‹S__€‡{ѕъЕoп> УюмЙV__я\!ž‡^$ќбтtR CBBюмЙCЈJœ[UUEЩхrЊIЅUTvvPœє\w…w.ЖВВ’ШцpHœEe^*Щ‰@XXXNNsћ0_зI[Ё1н ZАѕ›štp9XЊLRмѕ8нэЯMетђP‰б=г3Г™КЈНб\Ъьr>ЇщQєU4f~&ѕ8нъ<зАmЩх†‰ХHvюл.—'—Эg_&M'Бяв [“F№8щ rоуdюqr8‹Х‚‡Эf3ЯхrЇХу/_Оœ””фчч}ьи1š>tсТ…ў§ћ{yy9”pєшбииX‹Х’’’В}ћvвьE"К•H:’Jш &i‡Ж?tЉ&•vTRй™ЩђцЎ№ Џ:ЉЬK%ЙНxізњэЗпj4›ЭV[[KФлlЖЇžzjётХQQQFЃ‘~цюнЛФЅ •HЄ]йAТћkО&Н“FM*эЈ ЄВ3“Бс™№юŽ@*ѓRINЯа>єН”Дг U ЖSS .‡ЬTe6еЇ[§ЙЉZмy“TŒfэ™.'фюЂ.ev9ŸЛьQєK‰g]‚ЙЧЩЄіж?l[rВЂQ“^†ці8=[iLDЅМЋNЧЪ•+?љф“•+Wцчч[,–КККSЇNЉ]ЛvнДi“^Џ/..~§ѕзOOIIYИpaEEEyyyjj*џњыЏЯž=ћоН{‹%;;{Ъ”)Dў›7oтWl‹…F0ƒС  …Ba~~ўьйГэЏ™–-[ібG-_ОœЯч“ž‹‹TQQБ`С‚ЉSЇв‹фЌ‘Г„“'ONMM-/////Ÿ?>еЙє…0бŽІ@R;3С]с=ƒІХщ%Ÿ>}њ›oОYTTЄбh,Xр}Јz)i+4І<Њl=ІІ\ C*€~чЮЊxwћsSЕИƒУdЄ5ўѕ †*‰Ё.ЭT2=АЧщ‚ПБјп_Ж'ŠЛЄn ;sЈfsЊŠy†ДЯ+?˜Q02ХїШ­ј њФ­?u@,жмсw[‰н„4ђБ[TъД5]JвTЂЖ~SрqЖX,ф%уєxVКёpє_U1ЬpяКсЫuЅџЙ;lЂ\цЫхђX{Š7ŽžЛ6L xœ%г Ж•O[фуяјєТ+я5F€Œ_u/їО§Д0#)*ћ‡н•іIеjѓњ9…Яˆ3‡+Ўn˜[и ЗйЇтœ=YW№?"оjСў§~Щшаk}љу:^?ДГМ ЭхАцйЦXYYйЊUЋœУН­šцmm€Чщ!z5уЗКw'ч N–3Ь№ї9]П‘ВІуN–ў‰yЏМtО6qЫБ_m(ЛpМ–H}ЉЧ­„>^чЊЛюЫш\ЏЕюXRl.ўМ&ё$Пц…|!{я_ўЈO\ѕuЛowT|џEeЋ2~NNN\\œs Зе“,аz€7‡\рќдcdЌpыOa 3”›сќFжшРж–Ні~№рq>Ё˜.ЂХл#ОйЌ~ц9o<ѕDQ<Ъ^ђiDrьХлУ]V#˜БфўЖkBЏ%ŸFl]T4ю5wЭuсТ…єєt­V4uъдааP|W џПuыV‡C<œ””tюм9“Щ”˜˜8qтD.ї~ЗLMMХѓрLпО}эУЋWЏ~ѕеWƒ‚‚B—/_юнЛ7BЈЌЌlзЎ]!’–/_ю Ю/ПќB*iМеj§ёЧџњы/›Э6bФˆСƒ;”FšСl69r$++ !”˜˜˜œœЬуёNДЗƒѓ!BHЅRэкЕkШ!ƒ rА•s‹ „l6лёуЧ/^Мh4ІL™"BUUUiiiЙЙЙVЋ5&&цЅ—^’H$іѕ:ДUљ2€{чєяІ7Ћ4ЫК.#яиБуЃ2§F Ёоv§R§ІљЊЏж—bКЬа˜I=бŒпъцєРхM Y–’‡‡лПW”ЄWS^dЖZ0„›й.vв ?ћУ.§М n7x |nnnjjЊH$:wюмСƒ.\ˆ{™„ƒтpˆsяоН%K– „іяптФ‰ЄЄ$g_-//oкДiісИИИ{їюi4š#GŽtэкU фццт[kž%9@%iќЩ“'KKK/^,Nž<щ\i†уЧkЕкхЫ—cЖoпОуЧ;ж-›пИqуР“'O~ъЉЇlEк"Ё3gЮЈTЊE‹‰DЂ#GŽ;vlтФ‰ЁЯ?џ|Т„ 3fЬАZ­'Nœјўћя‰rЈкŽД|юЊЛШ‹нkˆtе7эN}[У0C`(O­25­ЕU–ЄЈlтYЬ’ЬКZ+žєб[*eŽqSZћѓšnc=.™ЛлlŒЪє zh_M(f›lШ6qтDooo>Ÿ?x№рЂЂ"†g%''K$‰D2nмИ+WЎиЛ8x ??_ЁPˆD"ћp\\\nn.BшЪ•+</##w€тттёюˆУ^ŽX,Ц|>пfcъГњњњоw|§ќДZ­s†œœœииX‡pLLЬ‘#GBџ§wJJJzzzП~§”JхдЉS1 ѓ ‰Й`ЄёZ­жЯЯFMв :Žˆєѓѓгщtn5Чљѓч{ѕъAj+ЊЉ­­]Лv-‘‡Хbžт?ўXTTd2™ьуi -пAгmђnќƒy 3Ly+pси{#&ЫН§2ѕ—ыЪ<~]Нч`щЏ?hюƒу˜ŒџРKјiЩWœи–ЭŠБ9,Яj'u4IuuЕПП?JЅЄ'qЏ™ ??ПЬЬLъдЉььl>Ÿ{o$1Œ4^&“UUUP^]fJЅDiUUUЄъГйl“Щ„KX__oŸєц›oюмЙS,2ФйV4’,XАРллл!ўЋЏОzёХ;uъ$ŒF#ўф€0‘“’=ї<ЩЦjалЎ_Њџш-UЪќ@†:<%šёŽbVџœ7V…є*KиwЎОZ_vіHЧчьСsGоˆиžїFe_ЌпЗEНэчЁЇGЩЖ,,šП1”ЧgџІ:7лр|zh4џЯSкЇG{ГX-d7‘HT^^HzˆњўћяSRR№@cХŸдыѕхххQQQ!ћ0BЈSЇNG:t(BЈgЯžGŽ0`@c’аУяхP FпЋWЏУ‡ЇЄЄрiїЭ‰I3$&&ІЅЅс;Ќiii‰‰‰Ю’„††ž;wn№рСƒ!--ЭоЖ>>>oОљц'Ÿ|bГй† ц`*žyц™ƒ&''ћњњЊеъгЇOOŸ>!d6›Й\.—Ы­ЎЎўщЇŸ\6%) eРу`џц8_ШŽю,|iAр 3§™gHўG@‡ЇD{?RЏŸSЈ­Йџ+—ŸІ{ў.TtМhлOv.-Y§šвjСњxM[ЄР“–}ЙіЪ1й<>ый}V|uюћ,‡гпкЖюBЕЪ„aЈe~мrШ!›7o6ИџфpˆŠŽŽ^П~НЩdъж­лшбЃNП{їntt4‡Уq#„ттт~ўљчюнЛ#„Кwя~єшQтqLЯ’MM!iќЈQЃŽ;ЖqуF„аˆ#œK#Э0fЬ˜У‡уŸЎьж­л˜1cœOœБZ­AAAііЁbиАaПќђЫ'Ÿ|Ђеj aRRRŽ=њх—_z{{<833“О)Iqh#@БЦO›wxя0№Јp~§йC‡) ќЛ?іa€оVOВ @kі8)iОпŒh™MPzrrrшшmѕ$ЫД6`xФИмурqОЧ х*ЭfнyоЋЧЫ Їіе)‡бЩdВ№№№ 6lнКU&“НВч+™L–ššК}ћv‘HфссёфЩЬ…ЂввRŒ"Ю7€T*]КtЉ­­­‰‰ЩиБc ЙєФФФ!C†˜››7hа`Р€™™™ŠoњхяZ”оО(ІoиАСЩЩI №lхјёуэлЗ744tvvоЖm›jѓЪЫЫчЯŸoccгАaУѕызWjЃ:Жa˜­[З:;;zzzоЙsGmO•ЖЈZDг&ZЕjѕрС.Уџ§пџq7255ЕЗЗ_Гf.K‘Л!‹'OžЬT>>Z‚DœќƒЋz†Нyѓцбббмэєєt;;;е‚ХХХ666•}’zіь™<]гVœœœЖlй’œœЌЉЭM›6‰‰с)вДQл@DйййђžъыыыqЊ-Ђvўљч˜1cX–]НzЕЭ/ПќТВьшбЃ>ЌЫQ\\w;66ЖiгІ:>бh­™gŠЧMг№ кЃGš5kV^^ЮВlyyЙГГsRRRu–Ђ&Mš$$$pЗуттt-oвЄ‰|$cbbДюUžooя;wЪ‹‹‹• Њ­™Ї§ќ @Sћuйwt”`cc#‰X–‰D666‰‰‰ќ} uttМvэZЅжSЫЪЪ ИлŽŽŽБББђйlвЄ‰юНжTViхыђю]гŽ iБщq*ЖAыZRЌ№ѕ>™WіyFыQ…юTљ‰‹ЇЮЊэ}Z_ХxaеV”ІщЋь†Д>яij‰ŽЫЌВC­)•зŒв(eeeЩ+WКЫџЪŽˆГ*Ч8ѕѕѕ•2sщ/^ьоНЛЉЉЉRКюORКlхкЕkƒЖЖЖnбЂ)бггу—*lTЧ6шX›. PЛ‰ТТТfЭšБ,лЁC‡ПўњЫЯЯeйfЭšщ2D$‘HИлхххЊБІЦh­YЧ)Ўк 0`Яž=,ЫюоН{ќјёе\JэQн_Ы5ЌrЋ4MЭЁC‡\]]%ЩИqу6oоЌZPmЭ<эч_šкЏЫФщhсТ…JяW-ZФп—-Z,]КTS…šжžжcœjgS>,КєZSYнGыЊЋђ*UЪ u-)цНOце™kЕЋЈšнЉдOUлћДЖDЧEЈћŠвБ ж щђЂЌЖ%:ЮKe‡ZSўJ­™ЪО”ы8Ыˆ8+q:99ЅЄЄЈ}ГgЯžММ<™LЦ}rHгГќmwVV–ІlšЖТ‘ЩdG•Ÿ/гњюPЧъи†Œ85mтНїо‹ˆˆшвЅ ЫВ]Кt9tшPЏ^НtХw~qqqŠяPљGCkЭ:Nqe3s7Nž<щыыЫВЌЯ­[ЗЊЙ ”Ž Ј=ЦЉuU(О5е:kUž™LжЁC‡E‹9;;‹Хbkцi?џадўš:ЦЩд|ќјБ<%11Q~ШSm_ˆ(99ЙUЋVkжЌбt˜DэкуoъŠ+>јрљ2P<атшшXЉcœjЫж`ФЉiБi]ЅЊћ/џZRЬ_žЬЋ6зjWQ5ЛSЉ'.ž:ЋЖїщrŒS—EЈuEщrŒГRвх…CmKtœ—Ъ5џњбqЭT3тдT-ЎЊДYГfݘ1#!!A"‘мЛwoьиБђы34hа AƒЧߘ1CžпЪЪŠ;Юqww_Зn]IIЩГgЯfЭšUй­Œ7юсУ‡мћ?‰DЂZ№Ѓ>њф“ORRRђђђ‚ƒƒ+ЕQл ‰ROЋГ‰ў§ћЯŸ?тФ‰D4aТ„ЙsчrбeˆhўќљBЁP(?^щQMЃЁЕfMSЌЫhPPPjffцщщYЕe і—БcЧ.XА +++++kўќљК/ХqуЦq#™™™T§ХЉij†YЖlйкЕk—/_nhhЈЖ%Њ5ѓДŸhjПк‰S]еZЏ"кГgOзЎ]х)яМѓŽ——WDDЯ(5mкємЙsПўњыїп_хЕЧ)))Й}ћvPPаЏПўњЭ7пp‰cЦŒ т>ЮћщЇŸVЊзšЪж M‹­ВЯ]ZзRнy2зД–t™kЕЋЈВнЉЮOUлћДЖЄВ‹PгŠвњ‚U… ё?яij‰ŽЫЌВC­)Ѕж UяKš4V‹cœ•=Ц)•JW­ZеМysƒіэлsgBY–=x№`‹-єєєš5kЖyѓfyйяОћЮЬЬL~їж­[žžžњњњЭ›7 гєЖFгVvяоэъъЊЏЏпЎ]Л'NЈ6ЏЌЌьгO?mдЈQУ† јс‡JmTЧ6h*ЅдS]о iкФэлЗѕѕѕЙДgddшыыпЙsGЧ Ђю*Й>њH~`Iыh­Yг+ѕNэшr`ј—_~'OžЌк2ИpсBЗnнд~”gтФ‰ЦЦЦЖЖЖ+WЎ”АOыЊ‰D~ј!WpЭš5Zg­ЪSУВьО}ћ\\\TOЅёдЬг~ў Љ§j'NiUkdE^^^ЧŽSJ<|ј0wЉкОШыOKKsssћц›ot\{JУЫНN˜™™ЙЛЛ/^М8##CўhIIЩ”)SЬЭЭЭЭЭЇL™RRRЂ{Џ5•­Сcœš›жUЊкўЕЄ˜џѕ>™kZKКЬЕкUTйюTч‰‹ЇЮЊэ}Z[Ђу"дКЂ4ЭfeWЛю/šZЂу2ЋьPkЪ_Љ5ЃiqъxŒSSЕЬШцFўo Ž\BНС}dф-ьxяоНWЌXбЋW/ž<бббLLLЌk:tшиБcЧWKѕЫd2ЉTњ ^я^\лkщUюАѕiqъcdъ‡3gЮhz(88ј‹/О‹Х ,6lXjЖL&ћѕз_уууЧŒS{[Й}ћЖНН}­2М%kщьАѕrq"тЈџœ[Зn]ZZ:dШљћъ===ggчˆˆ­п"^нКu[Нz5VB§іjжT ЮЊ@эТA@Ф ˆ8q"N@Фљж*Ъ—ŽnџP&UџФqwEЫЦ?юяxзЧ№VЧЛŸ{ћŸH1ƒsSЧ ёфдН’W\3"Ю№чЏYЩeЇїчЉ>Е7wЖlћЎІџwЕЭ…bЯџ]kгбзtnПИЈНЙuГ/Е_Њ§экЗ‡Мћ55šъyЫЧоDјxэd2Š ЎолтчЏS§G7T|(9^Мfюг­gZЗloЬЅи65;ЯжЛЗљЬ^Бm:™8Е2ТР[Ч8Ећїp^ЫіЦО§,LЭѕю^.V|hз†ŒР9ЖђpSЎe{убsmwmШЈЅ&н:Wјa—hПЗ;пћѓз,Х‡r2ЪWЯ~кЭфЖПн!sž––Шхpz17Й?yКTТn§"u@“Л>†З†ЗОП74Г[ЋtLЎj‡швггП§і[елoƒКsPѓmy@Фљъьо”9>Ш–ˆчиьќсЅ ђъЉТОЃЌд–ъ3въъЉТкhOь’Я'/Е?›яљУсVП…Є_<–/tBчGэЛšў“уОѓVлтщ–%ЯЫо`;sџrђє•3Ÿ6ќяКл…bЯogЯсС_Въд,ФФФДiгFѕ6МЎYаЮЊkwWT”/эдгœˆо}пъ‡р”д'eŽЮ†мЃщOЫšЖTоМiKЃŒфВ*lQыч,ЗЏJŸў…CЏсVDфвбxбцfЌЯш6а’{єxJGю†mС’Аf#\,кьЄuЃЭ\Œ&-БчnЗяjК$ЌйЦ…)УЇ7Ўlу/^МxъдЉ‚‚{{ћёуЧ7iв„;>Ч§ЛqуFЅЛмэСƒџѓЯ?eeežžžњњЫ2((ˆЫУХ:>>>ŠЗПћюЛiгІйллбЕkзКtщBDщщщлЖm#Ђ*<Д|љrХОdgg8p >>^*•КИИL˜0СЬЬŒˆЪЫЫїяпчЮ"ђєє1b„жq "™LvьиБ+WЎˆХтіэл;жШШˆgCђAP1Mѕ+’JЅGŽЙ~§КL& шеЋOЫƒ‚‚OŸ>ŸŸocc3zєшЌЌЌЈЈЈœœ{{ћ‰':88ЈЮ@mEœэћЈЦ[АЬ}™ж<­[З~]Д{SцИOmЙл ŸоxЯцЬљ?4­Н-*zT‰оњЗшгЕ/рљЎйВq‰мmБHЖѕЫдSћr3SЪЅ–kГ.OВVМлбзєItidllќЯ?џDDDЬŸ?Ÿ‹2хЁ’в]NBBТ’%KˆhзЎ]ЧмЬЬЬмм|фШ‘\ КŒУ•+WFŽieeedd4x№рЛwяђoЈ:у|§њѕQЃFYYY>\kЫЧŒceeehhиГgOБX(П›œœЌ: •‚Гъ|ќ$LK*ѓ5R)mЫš0пŽˆКє1?Н?ЏE;5ЏСЇїчuэk^­2Зв;лоЂЁžšFцˆmпаІbZгŸ–ејжхWБ(u󘘘p7 e2]cжF*bzkkы‚‚е 111ЎЎЎJЗ]\\іяпOD7nм7nмЉSЇ|}}“’’ЦЯВlRкшуЧ9’’’RVVFD Уpщ………жжжђъ8љљљЋV­’ч‘WЈiC<ДŽsAAМ‘r<-755•WЈtW^Пт, тЌ’rvџOY‡wj““’ ž7юS[3>ШvJǘо#Ќ”.WOИ/кšЙ§R­\cсеЫќмŸyJчС9ebжР№EМђзџeЋцш12)+аcЊЖuЕf5хфф4nм˜ЛannЎ6тT<џЫн622ВЖЖО}ћЖAЛvэNžЧ ЕKŸˆ№Q Јнˆ“ˆтb, 6рsœ€ˆq тDœPщW­XqQЁЉ‘@Тххх‹ЫЪE%ЦqjŽ„eež 0ВРЉт1ЮФј‡OЎэЯЭЩБДВllнШбССЄa=sуЦM‰DŠa€ъFœЎmнѓM\Ф&шыš˜šЧЧ?>yъДОсХK—Хт2Љq'UљЌКЁaQ™^ЋцDd˜—,IКГmы~ ‡ЦF bcу]\Zѕюеу •8ЦYR\$‰хw“Ÿ5’­џлгˆ)vєщ7жЗuR\lК0ћirr'OO‹БUЏUыvJ7о=цузуMiѓ›5Ж5екзоkžдvлъьњЌћ“[у5ПY{дХˆ3іўх‚˜(БИ4ўёг ЏД)Ояs€1Ж$CххЬš3Ып-эiBWяŽл95в{ЃŸЊ*е’V­лЉўеГ'ше!ыТ~мћ .МНš{ƒ^уСW}bGЈU•8ЋоВ­їБQIџк˜ъЕJњлЭЮ0_аH‘жXРАFzŒЅу‡ѓvlЙЋaIyНUуX…—К7ые1##ггУ; `}@TтЇЙЙешРР€]}%їоБ5X`є(57?OHЌH–/Эx"ЫЭvjуЁЇЇ'агз7ВЈц{ыУGŽ6Њm{ON]ч~”““У=”8uњ,N]нкy xєиqy‘н{ііёаІ­{Џ>§і8ЄXсщгџ :в­‡oЗžыжoPМАщєщ†нЖНgїњюлЗ_ўЮО G+еŠмАWŸ~mкКїёА{Я^Ѕу•кжПgO§{і”ж-ўіля­ZЗkнІƒoЗžЫ–YTTЄK;KKKянyрАкщ`YжХЕНМ…ЛїьэнЗ›ЖюНћіиЉuт*;ЄќQ­Jг зрVZЕnЗуЗџuяблХЕ}ЗŸgщjjЕНNL|ьзНWYY™.>СвТТееЅe gžЏюWlфАЃƒƒ>щйу]ююйsџnоќу§ќЧпЭ*7†kp+­ZЗћћФ_-ZМSуэчYКііvђl^^фЗНННЂЃcИлї<:<ˆичИчk"zє(кзЇkѕЧQг3lttŒRЋj{FŸ$%­њ~ЭЭ›З Y–%ЂММ<еlћ‰J[ЛД:d ЅEх>пЕ‹Зќnз.]bbƒДNъ4FїAЎцVˆШйЙy-еЌiщђ ЕІ^wщтelbrцЬй>}z%'ЇЫЊF$еl№гЇЩп|ЗъЦ[м6ф{OGфxvЦWГ>•&‘ ˜ˆШддTщЎHTZ# Ћћ“Ћћ>[йса5тŒ{pГ…йнgeЉиЋEу„ћІзу2ифy4ЗН—y+ЧЈ…Ѓu‹†”˜] tђсчЋЏЇGDqб2sвЋfЫдОбчK?:d№ЅЫWЯžћwеЊщгЇЬœ1П*™ŒНvх‚ЅЅХл0ЃСС‹юн№хЫЦŒЅЏЏпКM.юд4Ш 15лMЇKСoLЭnE МііыbкдIПlлоЇOЏMœ0ЎЦМ`qЯнWЏњЎQЃ†,ЫККuдНlЅvЦкRўIЌSkѕ•Mn•їY€J<§jyy`ЗўЛїєYzwпЎ’ЧзNоyњЎyz'g‹ЋБ™ЧŸY6jьnQЊgd№Фс=k[{.м$"—6žц– kЏбэкЕ>mђцымњcИ<§Ц[ђлзЏпПћoпЎэЉгgдVхцжцЪеЋjŸ‚y5нЕiуЊдЊклчQt Žahh˜˜јXSЖ‘УьпW")?tјшŸGŽЧФЦ•——ыИ —VзЎНшШЕыз][ЛшђЊІЉ›•mŒRUК rѕЗТЃњ5kZКЙ5В T7тtmыще{фаўz$}ќ4ЃЕEY['‹ O­:Оc?ШZhfйАиgVІи UsћWгт)гf^ИxI$•””\МtЙiгІђ‡ОќњлЛwяI$’џўЛћхзпN›:™KџєгЙпЏ^{єиq‘H$‰.]О2mЦЧмCЯœўХ—п\ОrЕММ<)щщТХKЙt;;лЋWЏWџйvк”IŠ­њтЋoUѓшО­ЩгчщђrKDG.((\НfOЮFњњt™8~Д{ЧvOž$ямНяŸГRSгЕnbв‡ПќњлџўЛ[1д_}7iв‡ZKёwГRQЊJ—AЎўVјUГfMK—gЈyz­ЇЇ7щЃ.ZђюЛнѕєєjМСяМуМkw„X,Ž‹‹_От+Хœš:"ЧГ3ОоѕЉЛЪ6ЌŽOn•їY€Jбў9Nю„ SVbe 6ibu5.чqБбCGуь›eN=ч zR†)J8kэк‡е7ЎэјС„А№ŸямљЯааалЋsи›фM2)hўЂgЯRцЬžйџљ?_ŸАаM›CУ–,]СВЌЛ{ЧiS'qѕю§^ЙDВъћ5ёё vЖЖsцЬтв-^АhIff&ЫВК| ^ѕт!ЎTџў………ѓ>]ž‘бЄ‰уЬSW|ёЕRNнЗЅЫ5CыжЎўlЩВE‹—кййЮœ1§ьйЕМчœ›7snоLTZ—xщђеQ#‡ђ6lH‰HМ`ёГgЉMš8~ ЫЫ…žЦ Œ,,ЬъїрTDœ‰ДїP&“ЩX"Љ”‹5†Ёзј)–}~˜“•Б$“ЩъїрPеОў‹8‰ˆ•Б,Б Уpпы :YVv*Ж  ўGœѕћЪ!VFЬѓиŽa†aиŠ3лЏC ћќ0Ї@ `eИl оŽˆГю;~ђ4 шзЇ eeЌŒ^Ž8ЙаяuПФrЇеŸGœ\лъЋѕЖб‚рOА› тЌœЂЂтј„Чй9Йхххњњњцf]М;ещ.ВWъpџ2 УаѓHЏIS'"z–’Ќt—ЛЁJўPзЎ]ьпЯ%>тњѕыђz”ъ|с>ЧЉм6~йй9—Џ\KNI‰JŒŒlmŽNDiiщŸ:“ЃЏЏoiaёб‡уЙќšвЋІfkЋЅЈ”Ћ„a###;[›Ў]Нš6‘?њ$щщ•Ћз322‰ШЮЮжЇЋЗsѓfŠ›4q;z$—В'"ђYjZ5›ФUыэеЉЧЛнtь#ЂsЈџЇючu /_НihhшеЉЃ……yI‰(>сё+;-\Е ЩdR"’ЪЄ УЫО8ЬљRр'SК›’œФнnъдœˆфwх9Џ^НірСƒЖmнюнЛЧ…›JѕАЊЧ/Ÿс$†aYж€ d2)Ї22…{"і›˜њО­M^^ў•ЋзЙ"'џ>•“;aќhKK‹K—ЎЪыб”^5еЉmсќOjuf•Ьš“››їзБ“ћ"ŽжЌYS.мђзqoяЮб1БC™е A<ћРлq ŽЙлЙЙ^Иtѕо§‡=н---‹KJbут;{К‘­MjZКP˜ХQvЖ6ŠеЊоndeееЛsaaб?ч.ˆХe\Ђ.•илйЅс‚l'ЇІ\КОООD")..173Ћю{brrrOŸ>Ч0ЬЛн|ЙњЛwѓнЛяРЙs†DDчЮ]`І›ŸЏвКђpяшсо‘ЇU `кЕНtщŠb….­Z>xјшкЕ›яvяІј6ЌЧ ЎFœ]ы4lhйГ‡_LLмеыЗЪЪЪ є---Итюэ†nпЙGDŽŽіл+VЋzћасум]SS“іэкш^‰‡{;}}НЄЇ)м J"6dO›e2)CŒDRND2FР0Œ@  JЅžЛ:І(%кк9ШoЇЅ&Ы’L&cY–ЛŒнРРP&“ђОЃЃ§ф&žПpyџC"QЉ‘ЁЁ-WD&•њѓHqq‰žžž“S“~}јгЋFSmњї50аПyыЮХKWˆшѓ% дЮЕвнCўED66ћєцвЛљuНrѕњa?+URkжmфЦgќИРцЭ*ОмЊe ч1ЃG\Иx%ќЇ_‰ШооnЬш-оqждBўФЪEœЦШША][ЗЋзnШ+аПЏЁЁСнЛїЏ\­ј~Ўз58u3ђƒЙ_ЬџАї№жэ;я*.-U8КЩ‚Wп™LЦHч>Шiд С_Gўьфщё6ЌГ•пЏ#ЂeKb—x НПrЩЫВђ \@ x-]2™L&“U\ЇЎpЭњлђўWЦМХЇ >w‘%†=}}іљЗ# ˜зqV˜AEKˆ!ЖОўs+–}† р­Ž8U/$Џ_'w\“с>BЩXЏўЇєЊ@&c$`‰eLE{ъћраѓ+‡ъsаcbм 99ЉYsg}}§злSљ7WЪd2ЉLšœœdbм ~>Н ŸуьдЩуЦ[Ф’ƒЃЃжЏРЌm У”——ЇЅІ>xpпЫЋ– МgНчхещЦЭлWЏ]Ћ;M233EИ oQФљ6\Aь№р5Fœў=ƒ€к2ђƒЙь[)dзYЉTђђ_yiiqШЎГ<ЅrWЎМфюОˆUќ“уюъыГююlTд‹Ф%KXccжн‰y)Г’дTжпŸ54d]\иsчдЗA(,ББй\\\&O)..ГЕн,–­цўѓЧФd{xl76^ЗdЩYю!y6ЅЬDЋЗlЙйЈбFkыM{і<ф@yUЊМtзе•§яПŠлЗoГmкЈO\А€55e—,aY–нВ… еОЁ—хх•NštДaУŠJM-єїпchИжХхЇsчž*еЉЖGŠ"#Ѓнм~бз_ЃX'бъЅKЯЏѓєм›#O$Z­ЏПЦн}{Tдcž&Љ#Е#›УMмвЅgЕv_Э(Љљз:žКъЂе”јъЦSУNЋуx­^ВфЌБё:wїэ11й\bttvчЮПЌщмљЗшшlХЬŸ}vждtН‡ЧvžОѓL‡RёкX6ююлWЎМ”Ышр­Ž8KKEЅЅЂввюЏА0'#ѕ‰кˆѓйГТ Ўћљ§юяПgыжл™™ХšBFŽDТ^ИР:8Мx‘ZЙ’--eWЎd§§љ"ЮЁCй;йвRіФ жХEcуG>єлoїфwwьИ;fЬŸš^щћінГjеЅвRЩ7п\TŠ/U_~ОљцbQQYDФЃцЭУTcS­/ЋjzehШŠХ,ЫВнЛГb1kdЄ1‘eйЌ,vк4іФ‰*Lшјё‡'O>*ŸšчуЙчЮЅЅ’']\~Њl„daБсќљфђrЉRЖ•+/•–JVЎМёђМЫ.\Hvpхi’кmЉŽ<ЫВп|sБДTВjе%ЕoДL‡ІA~}уЉћаЉ]ДšVђ+Oў}^{Й}їїпУ%іюН›Kќц›‹Н{яV  ФГfфщ;Яt(ЏЅe“™YМuыэО}їtыіЧ† зŸ=+Ф * тTqŠD…JG|S’bдFœњњkќќ~W<ЁщечшQжЭегc‰X†y‘-?ŸeY6?Ÿ51с+nbђтИЉМИЊЈЈЧ~~ПЇЇ !Ba‰ЏяяђƒCЊЏОЦЦыђѓKЙЃVZ#ЮЂЂ2юхŸaVWuMiˆ8YVMФЉ˜xі,ћс‡lZZе6kfіCnnЉЪxЎ—ПЪ*іHЧЉwян;ў|zїю‡ђƒЪDЋЙёЬЯ/51Yџ|оунм~бг Qмк&iАTGоиx]^^Х†t †4…ŠƒќZЧSїЁSЛh5­фW4žеŽ8UћЎи#cуuŠ™ ФZїbžщP*^ЋЫцбЃ,_ппѕѕзрxшПЭŸ(Je……ЙRЉ„•Б,+++‰K‹ецLJњ8"тбфЩЧЬЬ Fr>МЕ‰кœSІPx8 Db1YXhoЫ’т…[zz$QƒZJѕщу98PI‰rЂ•UE=вкЕtј0ЙКв“'dўRы(::{Ъ”узЏЇ•—/Т @­Тч8k†T*+,Ь•J%ЌŒeYYYЉH\ZЌ6gRвЧ&O>fff0j”ы№с­mlLtпюWНџ>ݘA‹г№с/ТMЁАфрСиШؘттђР@зШШa˜;Јm8Ћ^3Š rr…щ9™Љ9Тд\aZ~ЎАЄ(_mNGGГр`яK—&†‡ї Eўў\КЉ)=x œ9,ŒЪЪ(,ŒКwЏHёёЁА0‹)4”||*§ќ(<œЪЪ(4єEY?žvь Щ“_$њћG…Ђ№№~/N іvt4Ум@mУ1Юš‘Ÿ“YR”/)/'b‰ˆeY@ˆярeЫ–VЫ–љ.[цЫн "oo‰^:Š™›K––фъJ{їОˆA'L +ЈCкЙГ"qЫ;–ОњŠ‚‚^кD@=JнКНHЙsg2& ^1|ŽГдШїqж8‰„&NЄіэiљrLМN8ЦYњxЙдСян40 //кКѓˆ8ЁvрŒ ŽР•C€ˆq тDœ№ц`˜юOгЃ"УЕъuK|ЕЇъOДз8–§Œ'Вф­й—e?уў­Љ~ЊЏqж{7уRыЉšОfїЙ:ј•ŸКРYѕ:G&“Оќ'‹Kј‹$&ц­ZuйУcУУЩoШ1 -YBffфщЩW•›лЖ;w2‰шіэ 7ЗmЯЫ†0LˆСZЇN=с)Ўіl{\\ЎЇч“ѕŸ~NўУ„„†оВЖодИёцˆˆG•#M§Œ‰!//24$//Љy‘љѓЯЩФ„:uЂИ8žZ=bDыcЧˆшиБФ#Zs‰,ћЫ~VZК№Ч§?ќ№(Oq.ЇRтмЙQ#FИцц™›*Ічц–>}:;,,рГЯЮ*FЋЊa+Wч‹š5ѕsіl1‚ iшPš=ћEК™хцвЈQ4wЎЦ:‰ЂЂЦX[ЯœyВ{ї7оHM-ТjЈ#?˜ЫBВыЌHTЈ4G)I1!ЛЮЊfжз_учї{ttЖR:‘šš‰и‚э Иq#­[З?X–ѕѓћ§Ц4.ёшбx7З_єєBˆV3Ьj…:WЋ­D)ниx]^^)ЫВљљЅђ‡ˆV•Б,+‘ШыЌе~Гљљ,ЫВyyЌБё‹l\b~>kbЂKХeљњўЎЏП  FрsœuŽT*+,Ь•J%ЌŒeYYYЉH\ZЌ6gRвЧ&O>fff0j”ы№с­mlLxj67зОѕЮэгг‹ђвг‹;wЖчЇL90hPKБXjaБсхƒšЪЇяugjj@Dzz w˜Rѕ:ЄJ_гЃѕB"о BaЩСƒБ‘‘1ХХхЎ‘‘УАjЮЊз9E9ЙТєœЬдajЎ0-?WXR”Џ6ЇЃЃYpАїЅKУУћ …"џˆч‘=xPѕ мjўќгC†Д’Ї”–JllLФbщЗп^RjР•+ЯДVјюЛN›6н‹ЅaaЗyЃСЯ”ўДFЌЪ§єёЁА0‹)4єЅO„…QY……QЗn<ѕљћG…Ђ№№~/N іvt4УjЈ8ЦYчфчd–хKЪЫ‰X"bYV а#т;xйВЅеВeОЫ–љrwƒ‚Шл›DЂ*~wаˆЎ=zьќїп ђ”ѕы{КŸeщыЏЛ+цќюЛ{‹ŠЪфбЁт…Aєќ ehЈџшб‡ОџўЪ‚оSc#ЅкЯА0š0VЌ hчЮ9ssЩв’\]iя^žњюм™ŒхP˜‘Ьќп DQ#пЧY7ЩdьсУёK—ž{єhкЋ]уЕџѕЄР Ч8ы–>^.ѕђ{7&„aЈE Ћmл`–qдМзљЛ>8Р №КсЪ!@Ф ˆ8q"NxгTљз†T^H—‡Д> uЎUЏЗjфЋ=kщЛ,uМtaBЊs‘;W\Й’§ћ)$„юнЃвR"ЂХ‹)4”цЮЅŠ"NЛ—Кx\Oеє5ЛЯеЫЏќдЩў§4w.mлFН{“Б1‘™§§7TDœххd`€ХPГpVН>“ЩЄмУ02™T&“ˆХ%ќEѓV­КьсБƒa*NšЫopbcЩг“LLhщR•de‰lmЗ”””ЫSJJЪэьЖde‰&„ћSЬ›ущЙУФd§вЅчИyеЬЁЁЗЌ­75nМ9"тQЅG$$„BCiа Šp“ˆfЭЂ~§hо<Ўjњљgžв;V­Кœ˜˜‡Ѕ€ˆ*”W(#ЂђђВ’’ТќЁкœЉЉE7ошжэYГNZ[GEaйŠѓщђœ9shєhЪЭ%ЭПєоИБqЏ^Эіэ‹‘ЇьннЋWѓЦYі3е“дsцDн&77ШФЄтјЂ<›jўммвЇOg‡…|ійYyHЊј'Я)ЏсEс{ї(9™Ши˜zїІ˜ZЗŽŠŠhсBš>\\hЮžтQQcЌ­gЮ<йНћЮoЄІaшdфsYЈBv‰ •І;%)&dзYеЬњњkќќ~ŽЮVJ'RЮilЬцчГ,ЫцхЉyT.*ъБŸпящщEAˆPXтыћ{Tдc…jWП\чКќќR–eѓђJRЪЦЅ•Б,+‘ШfuЅХа§ъ+6'‡-(`Пџžэм™eYіьYіУйД4нЋyє(Ызїw}§5XfКРч8ы3ЉTVX˜+•JЌЌlrВгЫJEтвbЕ9“’>Žˆx4yђ133ƒQЃ\‡omccRMїщуŠЇ;hщR Ѕyѓшћя5• KŒŒŒ).. tŒ†5  œUЏЯŠ rr…щ9™ЉD”+LЫЯ–хЋЭщшhь}щвФ№№~BЁШп?тyxGМ”гЯТУЉЌŒBCљ6Э04uЊћкЕзЦoruъдŽ<_Ѕфчз$<ќvY™44є–RpљрAЙгюŠ|Й‡ЇяПЇМ<*,Є ШнFŽЄЅKI  ЄПџІ-[xJћћG…Ђ№№~/N іvt4У@ФљЖЫЯЩЬЮLЮЮHЙ~ўЏсГќœŒтТ|ў"-[Z-[ц{чЮdюnPy{Пtха–-ДkYZR‘Ж1NžмСШHoУ†>ЦЦњ“&ux‰ЊЙ$hЫџ]ЛZZn,**SЌ!(ШЫлћџjђ;8ПњŠž‡ящ€W gеы3™LњђŸD,.с/’˜˜Зjеe SqќіБ† IDATв\~ƒKžždbBK—ОHє№иБjехФФ<Œ9 т|Л”W(уўJJ ѓs„jsІІmмxЃ[З?fЭ:imm5†e+ЮЇЫopцЬЁбЃ)7—LL^$FEБЖ6ž9ѓdїю;7nМ‘šZ„С9œUЏЯXVbllЎ˜’’Ѓ6gѓцс]К8lп>аеЕ/вў§ddDѓцб_T$ки˜Ьœщ1sІGttі”)Ч-њЇМ|ЦqжRЉЌА0W*•А2–eeeЅ"qiБкœIIGD<š<љ˜™™СЈQЎУ‡ЗЖБ1б}CBaЩСƒБ‘‘1ХХхЎ‘‘У0ј€ˆѓ­PTSVZ*‘”Б,K,+‘”ЫЄхjs::š{{'$фэйѓШп?тЮЩDdjJPЛv/rњљQx8Shш‹DџˆРР6сс§ZЖДТА"ЮЗH~NfIQОЄМœˆ%"–e="Оƒ—-[Z-[цЛl™/w7(ˆМНI$zёQЮ-[hьXњъ+ zQŠ OдbF~07ђ[0ѕO|'@ѕсgНеЧЫ‘%дјv$@Ф ˆ8q"N@Ф P?џLццФ0Џ`CwЬЭ70LH]­cС0ѕкNНВ%ЏО ДЋЉЏіќъ+:ž<<ЊŸШП…^‡ ]<~‚‡‡m-‡L!,ћїoЋак%.CНŒРді]e‰дР "Nx#мŒK]<ЎЇjњšнч*qІЇW1мЌЌєєЂк7Ё–fю-xЕpVt"“I_ў“ˆХ%ќEѓV­КьсБƒžŸfй—NЧФ—’—ХФT$2 …†’Е55nL$/ЋxƒУ„0LЫVмPL40XысБудЉ'\b~ОxђфcmRЬ™–VadДЎuыŸџ§7ЙвУФ5бР€<<шдЉ—в—,!33ђє|)ЇR4збў§дЖ-МTГк:UGYSqн7”–FddD­[гПџVБяJKDЋV]NLЬУ^ №С1NаIyy9qПЯNDххЅ%…jsІІэнНo_ДЉЉСШ‘ЎQQcHсфАтЉдйГiФКx‘жЌЁйГщєщŠєм\zњ”ŽЅХ‹iЬѕe5сЮН*„хnKЅь•+ЯџLMCDГgџmdЄ3нЦцХЭќёп“&u8rdдйГOЇM;;CѕУ ђšЙ/эхš(•в•+HЉ/!-/~)ЇRhХ_\Ћ)SшшQђё!}}эu*ВІтКoшуiв$:r„ЮžЅiг(6Ж*}W™fеAŽŠsр@ьЬ™'E"ЩЈQЎЃGЗqt4У PЧсwеAЛ5ЛЯЭойии\11%)fчХ4еГэkЛtqиО} Ћk#хеіr8abBщщdaAљљфр@%%yŠŠШд”ЄR20 ™L}Y-ЫњхˆѓиБ„… џ‰Э‘JY†!™ь3"27пœ<лЪЪHБ Љщ%%хђ-r9+си1ZИbcI*хЪПh}A™›џˆ№зЅѓ}њPVѕщC]Ка!dbЂБNЕЃЌЖИbh(oƒкœІІSј|ь*зїЪOsttі”)ЧЏ_O+/_„ ŽУ1Nа‰T*+,Ь•J%ЌŒeYYYЉH\ZЌ6gRвЧ&O>fff0j”ы№с­"*вZ˜šщщU"Фф7eЪё№№€AƒZŠХR ‹ <9ѕє‘hAƒњŠСЋJГ?уй…‡г A$“…ХKЉ†\•*Юnцwт:EЗnбO?Qh(]ИРWЇъ(Ћ-Ўvžдцдг#‘ˆ4Pг0]њЎ3ЁАфрСиШؘттђР@зШШaи=qB=QTSVZ*‘”Б,K,+‘”ЫЄхjs::š{{'$фэйѓШп?тЮЩjsњјPXSh(љјhi€Љ)=x@экUЅёЅЅБXњэЗ—ф‰C†ДZАрLHШ{Ыћіu^Бтќ—_v333д_ЊлйиXLп~[Е†Њ/юшHWЎЏЏr~Ѕ#‚4`Pїю4lXЅ›ЄЖИю9ћіЅ+шЫ/ЩЌvЯqћћGЖ язВЅvL€7Ўфчdfg&ggЄфdІфŸхчdцѓiйвjй2_Mс&……боНdfFћїг?ji@Py{Wёk‚жЏя=tшўцЭУœЬЖ •Ъ\\~RМrhыж~qqЙЖЖ[+Е%:”š7'''-9е^ЅЉјwпQ@€rч/^ЄnнддidDГfбіэUl’RqнsnнJqqdkЋ§ТЃJ] ІтЮЩЫ–љ"мxГрsœ ]M}'дЄоНiХ ъе #uЮЊƒv}М\Yж9gЮ` рMГъPЛpŒъ‰DњгО“ЇЏм-+—`4qдМ_іG? уPЯрЌ:д!Q—ю`qд"œL@Ф №&љ+lyѕ+щпНSф†ХUЈЊFЖŽщЈOУˆ%ˆ8@Нёƒz,^џяЯўN—`B—Bmž‰EкМгtнЂЩК$џ(щ>›ы™›ОјЭ*SуЛз.Јl:вПтзЄFњћЪ7§Wиђ}?,^ГрЃnžmЊЙqдu -ЬS2оˆІягѕащ+К$BMRFvžЕ•ЙЙЉёMK,ЬLZ˜І s+[IŸŽzž@аЧЧ]žјўья>ќ|уЮПў8јНw;З­эŽд6\ЋoћЦ {xЕ}ЗSлOV§Т’ЪdOS…П8u'њ1OСї{zOxП…э9~ўцC.ёЏАх‘_zџ=ядЬœyЋ~!ЂF–fСщрв<3'ѓнJ ‡ИИZsjнКк 5mхЏАхћN^кЛkrzVШЏR3sфеўЖ\Е1vжV-œьCЖP›hafўХЌ)ЫЗˆЫЪЙ‡Œ "7,&bF‡hMќѕлЙГП§Љ ЈDБrЕur9ѕѕЊуID э=Дw—gйЋЗэ–™Уѕ%.)-M˜гкйё~мгП!Ђ&Ж>›6ВЉѕŸgЎёАЉБбєQ>юЎf& фЃздЮzсфaЮMьž<ЫXЗуPJFЖІ ё ’-ŸO?ысП7Іgх*Eœ6 -[99HЅВw;ЕMJЭфfJэŠR;DtыQbїNnФ0ЗЦ7sh,Џ\TZі_Ьу-;N _Нjw ЕQЛЇhъ"Nx{5В4ЗsлюмJХe—юD/пМSULчІKІќpщFžЬLL^ЖйЛƒЫфс}”^Г?XВaЪ№>мэйcœКrї›№ˆ­чM|ЦWaŠЁbЈS>ˆЛЁ*m]ЕBЕ[ypˆЫЦ,X;МЏЯЌ1§ПиВ‹Ќ†єђ>zю†LЦЊM,(*ЙѓЄ{'ЗгWюrНлЉэљ["]яЦ&)…›DЄЖN.чђ™ЊуID"БXоЃЯ{Жчј†ЯІЬ_Г§›Йущї#Dєёичo>;рвшEы~щяїёиЫ6§ЁiCš†Nu6—oощчйfюј†њoGŸПљ0'П‹87Д№jпrЯёѓ=НлўЇ8U˜ЃiEi?Я\ћlъ"ZНmџА>>J}Œšжмб†ЇPлЕ{ŠІŽ т„ЗзŽ•ŸФ>љџію>.ЊjпјwxRžЌ$OIыљeBкU1EIS Q)Ÿ8iy„›bЧФ8v ›˜ІЂ "zЪ<BжЉT@МЇ@HхjˆЃEз‡y`€§ћcг8ЬЌйГDQ?язМxЭ,жоk­я^{ѓeЯо3зЖяџš?MХ“>џь›sІ<ѕ33 ЧYУ—9?ДДЉO—НћЦlэђдcyЊ–Ж)Gљ—уF>#ыLKf‘Р:M=ЭiЈu‘ОљWЁКНу›О6m‚p7lП02фƒ…™'‹Юєљї…ŸїoXЙpMьД у>џц;"_ЈЯа: Х“9ЂŸ*ЏбхŠыЖжљ’QУџГ;]ноёѕПN/zйG Dюcœ^я“&U‹vсШgž^ПыПјЋгф  ОzeѓбмЂЃЙEO?сА2шх7fOі‹!Ђ›ЗъџјЄƒг0Ч IўОюNzЊќЪ S#PUSџ[uW]лаГ‚9цžbh Ш8ссЕ$2ЮлuдЪ —U­m'‹/”\ЌW6Џ zyЧС#g~ќЩвТќажw…завІ&ЂЮNN"щVЎjiг~ййЩЭ^ё‘бЯf8ЧiRыІ’[ўEљИМЂ2AщžUќђп gјO/‘Hќ}Ч?:ШіЌт"_ЈЯа:EЦ“ЧqџГ—QъYCЬащ{ФЮЦУх9љИчАЪ+*‹IЪрЫoTзЭŸюuюRE{{GaщOSх.яЧЅ˜"њxпaCПzіCЏмЈо)˜aю)†азpчє_5ѕ_ž8ѕ—ЭO8xє;›п^HD––ѕЭ–цѓ^ђКS •\ќeбЫАЎ6sљ‡:ДевІўЁCФЮ№qЕА0Ÿсуzсч+‰Џ™™d†Зы?Л_ђЈ_Шq”uВdЖŸЧwg~œћЂgV~ Ч™PШlнPMCёдŒЈДћˆt”§|eКЗЋ……љL7сЄџдЙKKчLdgЃ]Јјхъ WK ѓ™>nљ?ЁУ+tLОН№;›„ƒGWoўћ—'Niо‰ОyЋЮожКшТЯ|gl­ђяЊї2МЌЦ8§)lсє/Žџ МS0ТмS  Џс'мЎWеІЫK=–GDЩйы‚_•H$ Мел;RŽ„-˜ўMЋЌ,-ЈG7 ‰їе‰Sл"оДВДаn…YhgcЖe5—‰Р хуFў|хЦЭšzЃ…йgЇЩ“вГЧv:^pЮдB&fMCё9Ђi™kоœН`†Зб;‡}+pjRєrўrОЁ„ƒGџВdжЂ—'ўzѕцц}_š:&Эн9:nмЊ#Ђ—ўLDХeхеu MЊоG€Я­[Zл~Йz3хHЎўmC:;s Ь=Ха@њšdNPXњgqє}šън_˜ї1m^НdOzжХ_Ў-1ЁУ@њЮqмпўђё>‘…№ F ›њ?\Ч §ˆЅ%ўъ‚гН€Œ OјŽ ?*є+эЗšjкл[ѕd˜šЕѓO Ђ”š7ўјGŠˆ ˆˆТУщ§ї 6м5YЊЊšО”žЎhjR:ЇЇЯТŽ `МЋw›ВЁІЖъFЭЭkDT[uНОЖЊYYЯЌщшhю–ŸП(1qjU•ЪЯ/еа:e2JH жVŠ'™ЬHlmЉДдx?--Э^zщ™ˆYTд„вв*š6œ;Wu№р+ццЗД)SўДn]ЎRйІxщ<юrф_yхйwо9Q]­ъ:Ч„„тжжŽјј"™ЬQ;ј?ќpULM§ќRь09ŽшћящгOщ/!ŽЃyѓˆˆhУЊЋЃЦFŠЅБc ЎЁ/jВјљЅVUЉЇž<Й(<мЭбб;2€I$s‚Тв?‹C рюи”ђнЌ1жЭЪњvЕšˆ#"ŽуЬЬЬs*m˜їАœИ’nЇмЪЪhсB:žFІhфHн:кЯџњWкК•T*#чљsuцц’#ЧФx8‘о <>23лЈН*О№цЭцЗо:–•ѕ‹JеNwъ•tюn1ё$h}}ыŠЧПњъr]]ЋІKeeЗ.ќњќљЊбЃ‡8№ђШ‘|х}ћЮП§іqЅВMгsC5%’=][9;гO?бќљB“К.ЅІ& Ѕ/ПЄ–’Щhз.rvfoјОЈ Ш8с~—Sxљп—Џщ—ЛŽp4щг‘р>‚ы8сЎš,Ьрaƒы8' у@Ц Ш8'Р=Їѓ‘”€Œро+(ИъсБq@Ц аWЖn=Гj•т€ŒР ђђК˜˜—}ќЫЕkЩЦ†\\швЅлu$Šˆ ;;7ЎлВПўZ_\ќльй]_iштВ/&І ММQ@Ц @зЎ)Зm+”Ы?ЮtpАЮЮ~/ЗЗЇкZzѕU г]фњu’ЩК•lп^њ‚ЙyзхŸййЏ98X/[–9aТmл Џ]S"Ю} пЋ§—ЅхЧуЧнЛwКГѓрлSVBѕѕ4h54ааЁддtЛМЁьэЛ­ЁЁЁѕљч“/\XjooЅГђ‹oНёЦб3gЎЋеЋj€>…яU‡ўЋЂ"$5ЕlЩ’#vv–sч:8 b#P_'н$Ђ={Ю>ЇnVU5>|)=]бдЄ tNOŸ…8є5МЋ§—ЃЃ]xИ[~ўЂФФЉUU*?ПTОƒS\Оилж9nM_ЄАЦY‰ЄыбГХљѓŸџќњыЏїнV+,,,,,ФпбџVэоГgЁˆС]&~C`“!їэыЌБЙЃРсїЎСчqТ}щЦ rqyј†ЭЇи}sjЗІІцЪ•+ќ“СƒcŽнsЗnнrrrB@ћŸЉTzŸўWƒsœp(/Џ‹‰)pqйGПŸцуИn'ћ ’JЩЪŠЄRR(К %Š'zќqJM%ЭВкOdd(FкciљБDВQsњЎs6QS(‘l47пФ^ПЎ|ёХд6;9эўўћ+w9b..ћbb ЪЫыФ/’——'“ЩмннOž<)\3--ЭззwђфЩYYY|IuuuhhЈ‡‡G@@@QQ‘іПуюююѓчЯ?uъ”№экOXI\\œ——з‚ КtтФ‰РР@wwwэЖ***‚‚‚d2YPPPEE…ўi ч: )•ЪшшшI“&iЏS|ы1с8Юh?™]bbігаиѕ#Яь<3JRЉ4((шНїоѓїїŽŽГѓтЃd(t;vьЫх .ЌЌЌ44—jkk}}}U*>}zбЂEТУдя 3žЬšЬA|MЛў:ХЯdfыНœЩг[L@*++,X —Ыууу…зЩь'sqf@ЖІv? ЌФя›Н јэЮ‘ЁХѕŸ3CзћУЭ уњЅЋWccЯxzюїѓ;ИsgёЭ›Mš_uЋщыЫ­_ЯЕДp|РљњоЎѓСœRЩЅІrУ†q†–5dа имм+ju‡ўЏˆ>в/ЌЎn>|чж­Їљ—ўў”ЖДД;V>bФ.~)Gу"b7o6эмYљф“ІІІ˜˜.y{{ЗЗЗk/œœœмккš””Ќпч: EFFFEEеддhЏS|ыТƒ2кOf—˜˜§44v§Ш3;ЯŒ’ЋЋkiiЉЋЋы?ў8qтDсЂпySЃЄ4~DЩЩЩЁЁЁ# џъЋЏ8Ž[ЛvmFF†бжu63ž3\gGd“ЦЎПNё3™йњ™ЩLbВwя^~~2Ѓ*мOцтЬ€lM1G0ёћц N˜Ё34"ц‘Vџ93tНœ ЎЎЎШ8ЁџВАифщЙџтХ[F“.kkЎОžу8ЎЎŽГЖО]GЉф8Žkoч$“3N_п”1c’УУsRR.45Е gœ­­э^^Ÿ‡†fiJllЖh2K‰ЄGЩ%QзУєŒSЃЌЌкУcП…Х&сjjЕzтФ‰ЕЕЕЕЕЕ'NTЋеGццfў@ЬŸ‡у8N.—ЛўNS˜——7wю\777эB“2N§†zеддЄ]ТьRpp№МyѓЖlй’™™ЉRЉјBOOOЅRЩq\ccЃЇЇЇбПг: yyy544шєG|ытџЎ0ћЩьГŸ†ЦЎyfч eœќOсТьМЉQв?"ЅR)—ЫFtтФ‰Х‹зжжNš4Iгжu63žЬšЬA|MЛў:ХЯdfыwd&ыэљ)œq2ћЩ\œцжПoі2t†2N§аšŸ"3Nfшz9\]]q'є_!ЉЉeK–БГГœ;з9 РiШыѕйк™›їфЃcЧ^=~ќзЂЂпvэ*‰/ЪЫ[(Pљ­ЗŽ 4`ћі)šss‰JѕЮРкoОыu{ёЋ6{ЄЊЊљ№сKщщŠІ&u` szњ,сњEEESІLбМ?~МЁЪжжжDdffЦ§оC33Гќќ|+++эjбббkзЎ0a‚Z­ііію>2N"тjT§†ииt›Ь.ХХХ:uJЁPdddЄІІ&''ѓб_›D"щьь433упxhˆ}Й’шжХ‚˜.™ДN§Ш3;o(JfffbfГѓНŒ’ЁбщШЫЫk§њѕлЗoŸ4i’ІwАuЃ;‚Ш ›Dтg2þΘЩтbТ‚†'˜ј][d?{ГoіХ35t:‡_ёщ§dРuœа9:к…‡Лхч/JLœZUЅђѓK5TS&Ѓ„jmЅјx’ЩŒЌжж–JKЗniiівKЯDDШЂЂ&”–V дмАЁрмЙЊƒ_17ПНO™ђЇuыr•Ъ6эќRчбwЁѓѓK­ЊR%&N=yrQxИ›ЃЃp§мммхЫ—ѓ73.[Ж,//ЯЄцмнн›››Е лкк{ь1ЕZ””Є]>dШѓчЯы1ЫЫЫя`˜]ВААЫх‹/^Жl™ІЙбЃG:tЈ­­---mєшб|сO<‘““гвв’’’"мЗЗwlll]]]ЯZйOё˜§ПNfчХG‰ё щ_p&раЁCjЕˆДлАBIDATњаЁCcЧŽnhкДi_§ѕ+ЏМвƒmФŒ'“ЁЁ/ЦЎOќ6bЖnъLгOё;vlzzКZ­NKKыСc.Ю skъЇўСЊ—ћQ/7БIѓSџ№Ы 3tН?Ќ!у„ћР№сFFz””,1T!!ввШЮŽ22hЧ#k[Й’ммŒп9Фп 4`РЧСС™{їNз.д~BD‘‘пџfoЋ]ИsчдЫ—kџ№‡8§;zNє­O%%K"#=†TфŠѓђђфrЙцмOnnЎI§ZЛvmeeЅŸŸŸіХусссЋV­š1cЦ“O>Љ]9$$$,,Lч№:ўќ   ;xЬev‰.“Щbbbоџ}О0""";;лЫЫ+''gЭšЎТТТ6lи0}њt{{{с†""":;;ДпКxЬ~šДИ~?ХЏ“йyёQbD|CDtіьYс RЃЁЁСЧЧ'333""BИІL&{њщЇЕWЫlyП3žЬš†v„ОЛ>ёлˆйКI3Yd?ХdѕъеЧŽѓёё1њП ГŸЬХ™anMC§д?Xѕr?Oр>K1ћ;ѓ№Ы 3tН?ЌIц…Ѕ‡œњ­рррЅK—§ŸDќЧtttЌ[Зnј№сoОљцƒ1vєњ9dœ№€ŸqJЅв‘#G&$$=љwю€О пТuœ€Œqє/ОјТллЛ?_о›О1ПЏ/КWkFoЅћnУэоMіі$тc'<@vяоНgЯžѕЂ+ў#0яљŸg\г§!)М'­\ѕ№иЏ]EЙЙ=ќфЉШ8с~uыж-'''ФњТж­gV­rг.Йqƒ\\dœ№арпnц8Nћ}чŠŠŠ   ™LTQQЁ]9..ЮЫЫkС‚šэпjžНїо{ўўўббб|с‰'нннХПС­#--ЭззwђфЩYYY|IuuuhhЈ‡‡G@@@QQ‘іˆмннчЯŸъд)сыc~ѕbЊЈЈXА`\.пБc‡іи™бoEЊEѓu—ЬBц0ХЧ“ЉTЊ?ЬЪЪJ~DёёёТлBЉTFGGOš4ЩшДa6ФьМјЙФ s~ ШŽ;фrљТ… +++gŽˆ%fчѕЃ$ўУЅЂпyC]вйю†ZяхгQ^^SртВOSђыЏѕХХПЭžэЬПфПZуК}Ч‚BAR)YY‘TJ ijЦЧ“ƒ=ў8ЅІо^–єОŸСХe_LLAyyщ№А›ЦєcЎЎЎк/ƒƒƒ“““[[[“’’‚ƒƒЕЋ}ђЩ'MMM111њ jžЛКК–––КККўју'Nф ННН‹‹‹ллл{мУЄЄЄцццЌЌЌ™3gђ…ЋV­:zєhkkk~~ўЌYГДыwtt”””L:еа 2b бDI{ь†VЮь@]]ППџ ™У45ž:a3$$dяоН:#bŠŒŒŒŠŠЊЉЉ1:m˜ 1;/~.кюњѓга\тћ™œœ*А8sDЬ(1;ЯŒ’Ё™ <9ЕвяМР†Г#м‘ vѕjclьOЯ§~~wю,ОyГIѓЋ•+oнzZЇ>QЗ—ООмњѕ\K їСœЏяэ:|Р)•\j*7l˜Сe9ŽЛyГiчЮт)SЪхŸЧЦžЙzЕGux8!у„ћ,уєєєT*•Ч566zzzjWkjjѓGБЃЃƒџЩŸ@хџxЯ›7oЫ–-™™™*•Њ=lnnцџ‚jж)—Ы]Ї)ЬЫЫ›;wЎ›››vЁIЇ~CLкQъYЦйжжЖtщв>њHИ9Lёёd„9Lцˆ˜МММЂ™6Ь†˜?—˜aЮOCs‰яЇRЉ”Ых‹3G$~Л3ЃдћŒSПѓЬ.‰пz9Сx›<=ї_МxKЇМООхщЇw44Д gœжж\}=џяgm}ЛŽRЩqзоЮI$BЇFYYЕ‡Ч~ ‹M8ЊУУ ŸїЮ№ѕќ666к/%IggЇ™™™JЅъv)‰™џSГЊИИИSЇN)ŠŒŒŒдддффdS{emm­ГN33Гќќ|+++эjбббkзЎ0a‚Z­іііж—DФMњ ‰g( LызЏЗЕЕ]НzЕp!s˜туi( НІIгFП!fчХЯ%f@˜ѓгд>ы,.>2&mї;ЕNсю ьК~ѕn‚§~BHjjй’%Gьь,чЮup2Ф†ˆіь9јœНН•Б С.ЗЕ%"277rQUUѓсУ—вгMMъР@чєєY8ŒУУ зqТ}fєшб‡jkkKKK=zД@Э'žx"''ЇЅЅ%%%ExrЙ|ётХЫ–-+//7tНšIмнн›››Е лкк{ь1ЕZ””Є]>dШѓчЯы'C:oܘ1‡RЋеiii=ШО}ћ._ОУgT…Ьa ФS‡Ё€ш;vlzzКЮˆ˜МННcccыъъz6m˜:f@LТoИC‡;жд%fч™Q?ы DПѓЬ.кюњ­›:ј;ЌЃЃ]xИ[~ўЂФФЉUU*?ПT"ъшрŠVЌ0ОƒЫd”@­­O2™‘ЪЖЖTZк­ФЯ/ЕЊJ•˜8ѕфЩEссnŽŽv8Œ2N€ћ@DDDvvЖ——WNNЮš5kj†……mиАaњєщFП7™Пџ@&“ХФФМџўћšђГgЯ џеАvэкЪЪJ???э›ТУУW­Z5cЦŒ'Ÿ|RЛrHHHXX˜ЮЫљѓчѕ,х}їнw333}||ZZZ„ТМc#!!AЁP№ƒ*\ШІЁxъ3}ЋWЏ>vь˜бd.""ЂГГ3 @ЛKтЇ Гѓтч3 &ihh№ёёЩЬЬŒˆˆ0uG`F‰йyf”ФЯ:Cбя<ГK†ЖЛ~ыНœ`:†42вЃЄd ed(^xсЩaУ]*!ввШЮŽ22hЧ#•WЎ$7Зnw•”,‰Œє>ќQНс!'™–ўY /88xщвЅїћчKЅR|м&6:ЏУУclьd™ЬГрюРuœэмЙAx !wоUxРс'6:Ш8'2N@Ц Ш8ю…нЛЩоžD|л уЁ рЊ‡Ч~э’Ј(ЪЭЅž}—!ђTdœКЖn=Гj•›vЩфт‚Р у0]yy]LL‹Ы>MЩЏПжџ6{Ж3џR"!‰„8Ўы OЁ Љ”ЌЌH*%…‚45ууЩСœRSo/Ћ§„чтВ/&І ММё@Ц Ќkз”лЖЪхŸg:8XggПІљеіэ…ЁЁ/˜›weˆзѕfКц -_NГgSc#љћгђхЗW[[K••”@ќ7H3—%ЂььзЌ—-Ыœ0сРЖm…зЎ)БEю9|Џ:мa––?tяощЮЮƒЕЫZŸ>љТ…ЅііVнІ Є[ЪhcC7nа AT_OC‡RssWЅ’lmЉЃƒ,-ЉГ“НЌЖ‹oНёЦб3gЎЋеЋБQю-|Џ:мa!ЉЉeK–БГГœ;з9 РiШ"кГч\`рs:щІ>CЄ­-‘ЙЙ‘{ŒЊЊšО”žЎhjR:ЇЇЯТИч$s‚Тfx@  tу|у­ˆєaЦID?]*E8рŽУНъ€Œqbфг‘žuzž2pр@‡С#G>7sЦK3ІП$СZРЩ8щї;Šдjuuѕ­’’ГЩћ>ЭјтЋ]‰qVVV%і]uKKЫЁCŸ|щЅЉЉ)ћЙЮЮНћ>еќ*х`šя”iЯы;eZjZКІќYЇчџљѕЗЏЬš;ъ?ЧЙМріп+kjj4ПЭЩљп—§чŒ|оХCюГyKlGG6РCqj˜››Џx;ьыЏ№/ПќђŸЛvэйКyуљГ…[>о˜ИыŸ_ЋЉМ{їžПНyЖјє‰уGэьь"џХ—чцžќ$>a§џD?[˜~(хвЅŸЖmЧЦ@ЦйхЙчœ+*+љчџьѓшшu..c---ЧЕюяџLSsћЖ-Ў/ŒГДДоMSю>~МтвJЭЫgžљšч<2HЅRёЯ,-ѕ$"юwИ чme/ўiи0q‰);ььфNџїШ#ƒАx&ПЋоббŸјђЫ3ј—#F<{њtЁцЗЇЯœqvat%џљќЈу9'}dœЗЕЗЗџілЭc™й ОND‹_тЫџyбпЂџчьйsэээgЯžћ[䇋џйшкVЌл№бЧп9ЊRЉT*U~СKп СЦx Wџј№ŸПhб‚™3nќЌYЏ4ЋTсяМ{ѕъЕЇžr ўЏW~?§)РгC–П§“ј„ˆЕы8Ž;vЬв7cc<$s‚Тfxxу­ќ'НмYј^u@Ц Ш8q2N@Ц `ЊЎЯуф8Б€О№џŠФ„ѕРА*IENDЎB`‚Scrapy-0.14.4/docs/topics/_images/firebug1.png0000600000016101777760000012654711754531743021231 0ustar buildbotnogroup‰PNG  IHDR‘Xt ѕ sRGBЎЮщ pHYs  šœtIMEи  §УHb IDATxкьнy|eў№'Mši)mI/J/Ž–ЖЅД…ТŠŠœ.‹œR”CФYY@AaAј)‡Иž\Š.ВыR9$izŸєЄGвћШб\ѓћcи!ЄЩdJрѓ~ѕdц™gžyžяLОy2IyгS–ŸќdpTNш@Ю €œГ g@Ю ШYГrVфЌШYр>$@!„З™ЧВ–zbЪаљ(Й…r,ь•пCнx'MНŸЂхA8щ0Я н‘йАП/Т!<8=iЬГ!ЗO“`ўЩбFюF/Ё‡я­@Х<+tЮ™Ђ3“ŽNђ§ЇЏѓЮся„Пљы›FЪШЌ­iЋYvzYиџ… пŠwˆg|1#З.—ЅЖˆw"x›yщђtњщБЌcМЭМcYЧшЇщђtоf^Ф;ЊџdЮЩБџ+к*ђиъ1щшЄМК<і#*o._№ѕ‚ОЛњ пНєЬЉgЪšЫL3x:‰?,=œp(СcЋ3Ы•_Ÿ?хи­Ђ­ЂБџ{2чЄйиёЌуєз-Ў1ћcЖЇn7PгjY:гД*њqЧ?Ž}’_Ÿ?љшdгvrhkн˜t8‰З™їAкІ…щ#ypЄХЊ:HЇ:Ÿe@ЭjSщT[н0оѓ-Oз-Ўcџ5і›ќoЌ`ЧfћС"іvВ‡Еfp іpЕяDшT35+ЯЎŒмщіІ›Ы—ШН‘/|џТеЪЋ\’7=eљЩOір fFЧщkŸHnyxЫњ?Ќ'„TЕV%JЌl­4]ы!єH]˜:дЈХm}ЛшАє№С)XD™їеМ#™Gц™{фЏG!‡Є‡ЛxбˆEЇфRПХњКљJ–HBНB-6 ЌЙ,ёPЂBЉ0]ш'ђЛКш*Н ]чВ„eћпo:ЗTж\џ~|НКžeцiдсQW*Џ˜Ўz§Ёз7ўq#—Ю4‹……|aћ†v›}ТЅ\šщЦ#™Gц}5/Бoт•чnкуŸ=ўУѕ˜qДY!НkŽЯ2 ІНд nјгGЪRduъ0YzИуpиqйlgЇТ†оъЮУРюСДm6cќgуЯ]?Ч=Maž:'д+єЬSgTыU kж$Џ!„’ЂWНњѓЋ•­•ыЦЎ+YQЂнЈ­{ЙюРфmкЖu?ЎГVлИ№q„zЂХHЯѕrё:[t–žXЂ—‹зЉњ“ћ%g-Ывnдf/ЫNю—\ЏЎпєЫ&k xэчзJErПфьeйЬ& ЅтЕŸ_3-іCбgчUўCIНFбЏЏ›~йTЏЎOь›˜љ|ІvЃ6ѓљЬ„ ГЪ/?w™.ЏнЈНєь%BШGВ8vІй+:ѓїСдx„чТwљrц—\њ„nЇйrhkн83ff€GРеЪЋЬьxekхŠџу)єœ;ЧbULFТEЇ:Ÿу€ОіЫkYŠЌяˆгsOЗЎkU­W§<џч)ƒІtIlпс!tЊ,ac­У€%\э>LйlЦЯ%?B.,И`xе ќ‡RЖTЖkќЎ#8Н—Ц<+АЬ?u\ўы‚_Ч†ŒЅ—ДДЗxmѓтѓјњWѕ„ ЗƒЊлЊ;Vш)єlYзbq_ Ѕ"`gРџ!щKгЋјmЬПЦь|lчъsЋщН ;0,Sž)_#Л‹ЙдOЗ0ч…œС}гЋrjsbіЧzV§НЪbпЌiЋaй„Ўѓђs—“њ&uм0ѓљЬXПXzI–"kШ{C˜ЎSъ”›~йєUюWЭ:ЃŽ.#pш6ъИtІХQј$§“_/ђ…_ЭњjТ€ \њФккœ€dщ“Эч7oњeгѓ#Ÿп?i?!dлХmы~\З$~ЩЩ:T;пfКкр]С•­•GŠK“l‡н‡а{;йУЦZ38†KИк}"˜ЖЧf3ЂіFхзч{ =ЃХбžI}“fХЮ яЮeМ0Я 3Кпш[/E.ž„цfЛZU­ХMZЕ­жjѓљХњХf+ВU:еї…пћИљМ˜јЂ›ЯщТг**[‘=ФАrЏ ЯР[}ВlHЉSеqйddаH‹Fѕ‰b–Dі‰4-АьєВПэ,n,f2BˆоЈчи™Ы:Жры.|—ЏgM'Ќ\њЄVYkёmbщ“Ѕ#— љТ#™G”:%!фує !KF.щl,qь|Ž*WЪ !'э:лwxмлЩ%l:В6УеюЁSЭјxкЧC§‡Жj[ЏT^9•wjныэdісƒ5јнш>kŠ…№ЬаЊжЊтХ'Nhу"Цe*2%е’г…Ї‰xDШ>жџБя О›4p’2а7tЊўТ†BfЎЈАОТdНѕqяSгVгq“>ю}Ќ5Mь.ЎnЋ.j(bђ€ќК|гЇђNBŽM?6%rŠШYдІmѓ|Ы“{gšљ"ч‹”ЏR\ЎпЬљІS}"‰- M,нш/ђŸ3ыгŒOgЧфех зйXтиљд_ф_йZ™V–и7БЫcћ{;Й„Хї~6ТРVИк}"tЊI}“в—ІWЕV5еДеœ/;Пџї§~к№Ь№gl#цY ЫL4™’ђUŠЌFІ7ълДmi5iЛ/яЖіuђ›9kј8њЅZV#ЃЇ' ˜ЅШЂПоЮмЬЪНўEп,ЪЉЭбѕЙuЙ‹П]L?`МЕН?жџ1z“мК\гMшх,ш:Л8Џ.Ooдg)Вž§цYгЃ"Šœœ‹Š|НРюާwоПч~9зUрzzюiг„•KŸŒя?оь}ЛˆЫNйЛёoI#„Мэ}z’lIМIV!_H‘еШьш|Ž:m№4BШœ/чœ):Ѓд)5zЭoПMџ|zW…їїvк ‹ЭАЖТеюЁSЭHў љ ф`K{KbпФ'Ђž˜8p"1™eџщYмЯ ^Xяg5[nКPЎ”><КЄЉЄc,п nеЖњlїqvrVыеUЏ є”+х;].zЃОamƒЇ№ц<“Эњ-ОрyЛzK—HУz‡Yм{iSiТЁњƒQг9Їп§Nob­7l~›ў іІВtІщcсBгOŠMЋВй'%M%ёяЧ7jЙ ЧnLў љвKЎW!_Xѕї*‘Гˆ%Ј†Ю|g‹й5ЧЮgiIзўnЫpи}БЗ“=lЌ5Уfи WЛO„N],шфA“Пѓ-БѕЫа˜g€.у/ђПЖјкЫc^ŽъхТwqсЛDњF.‰_BїйOЁgBP‚ZЏц?,а#ЎgDр^“и7‘IXЙзџљŒЯ“њ&Й ммн' ˜pqсEkй!$Ќwия‹~zиг'A€G@Ъа–„ƒъšњlъЄ“DЮ"7[RпЄOЇ}J8нМяюНЩяЭ6П—K/ОЫ№€сєЏwйЧbТЪБOТ{‡Ї>›:qрDІŸЯјœЫNmv#=еЊбkц™ЫžАBЖ?К=ИWА}Яq@}м|Ў:еŒKЯ^Z4bQяўЮNЮєкеЩЋ9ž˜gАгјЯЦЏHZ16dЌH(ЊhЎxћвл{Џют7$уљŒс№K›JћПлпHг–Є ~7vЩ†peр;Xv:w§œйЄ œo=ђж}рKП[њОф}њё”ASюRТ WSИ7РNп?ѕ§И№q"g‘Г“sˆWШœи9—žН4iрЄф№нnќзџђ!"ск po8:ЬГrVфЌ€œ9+rV@Ю €œГ g@Ю ШYГ gфЌШY9+rVфЌ€œ9+rV@Ю ашИF.‘ 9+8ДkяЧЃ Ћро@Ю €œГ g@Ю ШYГrVЧ„џƒсƒќї‡e„ёѓ‡?ЛѕttOBЁЈxŽЫюЇШgxx8 р:k–їпџюяьЬЛлч—н0Я =OзЎПјя\wgwч‹ЇђtэzŽЮьЛsfпш@ЛЕЕe2еКu•/НTсШэDЮ =яЪї…Ъ–іФЧ$Œ lж\=Sˆ>ИЋ(*žЂтѕњCж­ „=кахѕ#g€ћЪЯЧГ!јkєўMљщX–щZf2ѕЧЃы&|–2№њ)3УJ`ž*›5НіѓŠ?|№TФџЭ лНт|№џЅUЃŸС>ххк JћіЭ ЅAAЯcFqnЎ§ =‚Яч _}5ЂбPІЋlj~ОfЪ”"4‘(mьиќ“'щгСєдшдytцLѓЄIEООщЮЮв№№Ь7пЌ6oэїГ@Ћ­hЮJ-ѓЛ§cEQ^}мГRЫjoДˆƒ{™cnxЕщџžџ.§|)ѓДКИБКИё‡eŸWЎFoCg••isŠ›їЋTWы>ўИўЬ™цЋW‡† ™bГgгкл 'O6ўќsk}НўKє{і(ммœЖoяKЉЊв%&цVVъшЕuuњ“'ЯžmNM:д нЬ` ЊЋuћіеBттм™х6ЕЌL;fL>чЉЉmЉЉmwxMœXФ<.-еnиPEYП>^‚yVшa?ŸШЂ(’<5Ъ‰Яу œ’ЇFQљхD–Y1й/Ѕы<љiбŠЯ+Wгй'“ƒвK˜ЇYЉх„Э_Э>QёїO‹VьјЯќљ› тЎ‹ш г?гЕЏНVЅPш““=ВГcДкйй1ЩЩ …ўЕзЊL‹-ЪЫ‹бjGьлBЉЏз%ЪЫ‹QЉтоyЇ1љдѕеWЋ*+uыж”” бjGде ;p Є­ЭИn]%ЦК?ђiП~™лЖеєщ#xћэ`f­Э@нДЉЊО^Ÿ˜(ЪЬŒжjGdfF'$ˆЌэ‹уy*–п~9+иЁOAMЎАА™*,lЇ—›sъpЋп$b)“oЖјљ ЊЊtХХБсс.ш^шY<‹ѓцљєяя’œœwъT“VK …<.* ЊЋuEEэLкšŸЏЙУѓШєЌсн~Щ?uЊ їГ@ikвќ~ЖЧ#{~[Фм“њyхъ=П-тёШе3…Ъf_І8ѓ !Ѕй г…І§яgЊVэ€И€‘уФ‹ „ДдЋбс`‡ЧыEYДЈ,7WЃзSЙЙšХ‹Ы˜хv˜<й‹’’R*“ЉєzЊ­Э˜–ІкН[>rd.zzЪшбЂЁCнккŒчЯЗr дёу{B/.ЫЫгшѕTV–њйgЫюоyd0P˜g€ѓыW9:­aиŸТќCoЛйд?дkШB3.”§њUюу тXjш;аЇ,ЇіхЧ>ЁŸв7ЖHЊ $Uf%‡§) vиМ9шћя›SSлЂЃГ™…}њ6oВЏТз_њЯZSSлттЄ‚™4Щ+#C§§їЭ>к‹K nкєэЗЭПўк6xpv7œGљKoЬГ@Ёo xфЉЁW=:o!фчу™ь5Ь[џ'п@OГ…o~ћдИЇ†њ‡іц œœ…ќ ў>S–&ќmя$t8и!,LјћяƒŸ~к7 РY р8ЇЄјўўћрА0Ё}њћ;_ЛѕђЫQQЎ..<^dЄы’%тK—Ђалаƒш‰егЇ›9jhЈ055rв$/‘ШЩЭЭ))IDп KпРнхчб{я…рw гЦ/\БТoьX‘ШЉЂBїілђН{C†ИedDпнсош1ЬЎъ6јЗР`џЁЋžвЕџырnGўЙs-fK23еwщфBЮ =$ ;Иw#џЬ™цЗп–_ОЌдjЉР@ч1cuj‘ЇgšЋЋtТ„B…BпЉЎ0њ?ў1џјёцiyЙ6((ЃЙйаƒСцLГЗ7џХ§|}gЯ6›.g‰dkнвY,Ї†щ8šErVшaгЇ{+Кп~k#„ќђKk}НўЏѕfжnиPUUЅ+,Œ-(ˆ)/зnмXху#xђIяƒk™2жЭыуэЭg–lнZ#‘ЈЄвСrљP77ЇЕk+щхO?]Кys`kkм… ‘—/+яАmжАяeгІЊьluZкр’’!7nhэш1kGЧR€KЇБ7Лу@0Ћ~љЅ555ЊБqјДiН/.П‡bяЧ[gЭВ< =оЮ†жџлrўќ ЦЦсsчњLœXє§їЭ?ў8ЈЉiјŒо‹—йш+W”Rщ`Ѓ1оŽ]Г`9ъ;iАХjЧяuсB!фЦ э‹/Vайчљѓm?о‹ЅŸЛ­OІL)ZЙв_Ё&—‹Œt]ЕЊЂS]af§њРз_ЏfІ3_НzХ ?//~џў§ЕЫ—ћB–-ялWЫё:Рв-]ХtЭ"‡LOYNєBЎQѕўћЕSЇQ5~|ССƒЕЬrŠЂ‚‚в 4єуќ|uпОщEхцЊCB2t:#EQ:1,,ЃЌЌнtЋааŒМ<5§ИІFыяŸN?юз/cЯyEE{—Дy`ідт^˜ЕССљљj;z‰aэшипfЇБ7лт@аъыuєcЅв HюЁ№$t‡tдйуeя=ГЇ„\ЋЋЛU‰йSІN–ЎЌд2еv6Њ-ўй<ъ;iАХjПўКqжЌыEmлV-ЫЊЅ(jцЬып|гФвЯна' (•БXжЉЎшx•HHШ=rЄžЂЈ‚Mpp†RiшСШП~]#ЫдjEQjЕA,–kИGrЧnсo6O гq4‹фЌаѓ9ЋFc L?zД>00]Ѓ1š^Тј|‰^3ЋащŒЬKТ„ …Ч7PuьX§мЙХf>@bz•фёn.ПzЕmЪ”B__YDD&§вx'mГvйЕИг#В–'qЬY­ЭУgя4›ЭЖ8ж:сžряŸ^UЅЕИЊГЧЫо{жђіЇ6кюЈЖЖаОQЖй`‹еЖЖBB2(Š2$ћЛяš’“ѓ(Š Щhk38BŸЄІЖŽ›'Iйwdsˆ™ЇN5FFfщѕЦ9sŠп}WоГ‘Пzu…Y*ЙfM—HЖж-ъ[юЇ†Yф g€žЯY)Šzы­j@Вm[uЧщНТТ[яГƒ‚nNЋќ№CѓшбЙE•+•*;ЮtоИЁЕЖSЃ‘:}К) §л&H˜Щ’К:йежl/]8Яjэшl>{ЇБ7лк@мг9ымЙХяМ#З6ЯjЧёšѕžЕс˜їиш;‰j‹ э;j› ЖVэŸџœтDCbb.EQ‰‰ЙЇN5>єPОƒєIпОщЧ745щFЊЙYпйБыјРhЄ† Щ^ГІ",,ЃНниƒaOOЌ–”мšI-.О5эЪ~Аж-ЭY9žf‘ƒћYР!МђJ€N7ТєNДYГ|VЎЌP(є …~ХŠŠйГ}шх=жЋЅХИwЏТУƒчnЖевЅтХ‹ЫЎ_oзыЉЬLѕьйХєђ9sJrr4:EQDЏЇLОРГЃmУ†Йэм)WЉŒ••КЅKoнЧim/Дљѓ}_|БтЦ mS“сЅ—*8ЖЫбй,РоiьЭЖ6Ќ_Грй\nэqw}+hлЖšƒыєээдеЋЪ'Ÿ,Жяx-іžЕщЊО“ЈЖШŽQцв`kе>ўxЏUЋ*цЭѓ!„<ѕ”ЯђхЬЭЌ=о'j5хъЪsuu*)iЗv—j'ПuDжЏмБCОaC PШыСгфјёЦЄ$QX˜Yю2rЄшФ‰F›з–nщTУ8žf‘ƒyVpˆyVkЫU*УТ…Ѕžžižži –ЊTЗfЊurКіУЭЗ2Ј­[ЋCC3œ%ББйєтєgт‘‘Y$&&ћьй›^Мxqܘ1vДM*UЦХх’ааŒ§ћЬr‹{aжjЕЦ+Ъ}|dооiЛvеАЗСbKЌЭУgя4іf[kѓL,GDБљИлффЈЇM+ън;M(”$$ф~ёEƒ}ЧkБїЌEЧЙ:›нUQmї(slАЕjгв”DЁаQ%—ы‰LІr>љїП#"2љ|IHHЦЛяЪя|ž•ЂЈ/Оh80гєж 9MFŽЬљў{ѓћ%ОљІ)!!зцuРZЗАє­ХћY9žf‘ƒœt=єаO?§„6рˆаџш“ЛjъдЂЃGыяЫaъžсMOY~ђ“=ј\КŸ#ќїkПqнms„6tЯ!ќQш“?MКќЛ­oxеd6Žй6ќз+t/љўшO&ŸюВcG0!D.з1ЏЏ?ж4]ииhXЙВ"22лЭ-ЭХE™§Т хWЏ*;–ЄŸ9гьёoГ€ЭfуqBШœ9>Нzё…BоР.K–ˆ/_Ž2-ЃRЗn­‰ЯѕєLsu•Ž›џЭ7MЯ‹; і}™жЯ›žВќф'{0~а#Ьц{ ъЦ нлoЫїьQ т–‘§ПK1у­m8~|сЙs-ж&LKZЛЅoЫ– ѕы !UUКФФмЪJщZЇддЈЁCнlю‹}-8fјЅЅ ~ф‘ТЦF§Пў6ОЏЕ0++гЦЧчжзыЛpєGЪЛrхЖtэѕзƒ6n фЋ6лУ§МcиŒ›l6чˆ#ˆ‹Ы•ЩTИЬœщ=n\Џ‡і4+аа џгŸ ВВд6/ЊwьћК­ўщ)Ы)€BШ5‹nnвџќЇ™)УчK,nH?vv–rэТ…VƒR* 2™jзЎš#r:–Є‡†fœ9гЌRєkжTаKшЯ>[JШЕuыn””ДkЕЦК:н BЎMœXШe_ьkС1У/ kЏМrУZњё3Я”r-99/;[­еГГеЩЩyмуVkМtЉk™cеf{lxЧх6упf›ЭЦ9т~џ]щу#cЎЗƒgmнZ}у†–)А|y9ЇO7ЕЖT*УЯ?ЗL™Rи1~ю<$иїeZ?цY ч'К,:|8єйgћаeј|ž^?ТкDQTTv~ОЦг“эшœ”$š5Ы;<мЅcIњёЏПFŽыAЏmi1xy٘њƒ‚2ЊЋuущЩoins_ьkС1УяЋЏњЯœYььЬћїПћпЫZ˜fддшrrbvЅзцфhbbВ9ЦaGJЅqгІЊЏОjЊЈаъt7o x:н.БjГ=vЬГкŒ›l6чˆƒЈЏзјa§ž=Šђr-НФннщдЉў>к‹œQYЉЛ|9*)IФ?wьћ2­9+є|вРМvRЉЎжэо-пЙS>xАkNN ]ЦЩ‰ З^_ J 2^ЙЂ\МИ,#ужGKяаЁаgžёЕ˜Гъѕ#ј|ГG'Ї[œЅz=eБЉ\іХО6ќОјЂqЮœ>Ÿœ8ё—ПєЖŸд` tКСЭрбщ(Ёkv4~щ'Ÿд[‹4›БjГ=vфЌ6упf›ЭЦ9тP4уПўUџыЏmgЮ477 rЭЯaZЋсьЬcŸ; і}™жя`€Cх$(Шљѕзƒ!зЏЗпМN9Ѓ‘(•ЗюиП|љЖ[“’Dщщб••CЯŸ§ћЛ8;ѓ\\x‘‘ЎЋWћ9nЧо§§Џ]‹zљх€Ј(W—›Е-Y"Оt)ŠЫОКА%а#.ьsр@(!dхЪŠ­[kLW…‡ЛЄІFMœш%9ЙЙ9%%‰>џ<тNт№НїBцЯїэе‹ятТ>мНГqbГ=w#ўmА чˆ#јрƒайГ} ruwwrr">>‚GэѕэЗш„•ту#Иr%jЫ– И8w‘ШЩХ…—œьqъT;bЦ&і}™жyVptјп­аcXn Иџр'г~€s‘œ№€№ИŸ#їГrVфЌ€œРQр{3шm€ћѕLЙ'ЮИl$rVРыф>иN:ИП‡9+мџзGіНtэЗhy<‰щпниђ†.”—Ї™>§КЏoК‹‹4!!їфЩЦћј“Хрьк&qмЊЇЮŽЭkn6Ќ\Yžщь,ѕё‘§х/зќБѕ>Л№2ооВiгЎ3џШЪA.VLѓzї–ЭœYLџw_фЌ]ŒЂт™?є†#+,lшЁ‚GщUPгвЗwoШёуHd"8YЬšUЌгQ?§4HЃ‰ЫЯ;зч7ЊяЫ`0уѓѓcЃЃ]Ÿ|Ви1[XPытТ[ВЄ 9+8аћўjУТ2…Bi\\ЎLІЂ—ЗЗS ”ŠDi;vШMЫ›mN?8sІ966G(”†…e>\ЧЌ2Xтё$ЛwЫћѕЫtr’˜nk0PыжUњљЅЛЛЇЭž]мкjАVЇGЧ~˜ХХэSЇyzІЙКJ'L(T(єьхu:jеЊbqКЗЗьэЗхьэЗIЃ1.ZTцх%ѓђ’-^\Іб™UџќgПКH”Жpai{;еUт 6mЊZЛ6рљчХОО^R’шфЩі>сё$яНW‘хт"ЭЙxБэЃъ ЪЂ(3SЭГиuуЖcˆZJГаэ’Аcі5ЉуV,Эсћ™ЅU\ЪšŸnнО=8<м…Яч‰Х‚™3НљeЫЉGБxжЂˆхŒыі /ёѓlм˜­6›ДV@­6ЮŸѓЂ§ЯжаўёљЧ70л–—kƒ‚2š›m_ЃќќяМгямЙфЌр@~љЅ555ЊБqјДiН/.ЇnмXY[Ћ/)’ž>јЇŸZlVђєгЅ›7ЖЖЦ]ИyљВ’ќяЃ.Г‰Ѕ+W”Rщ`ЃёЖЉІ­[k$•T:X.ъццДvmЅЕ:Лќ0ЇL)ZЙв_Ё&—‹Œt]ЕЊ‚НќІMUййъДДС%%CnмаВЗпІ ЊЊЊt……Б1ххкЋ˜U.ДefЦ”” ‘ЫѕЏОZu7:Є§јcыЌYоэ“џўЗхќљAУчЮѕ™8Бшћя›ќqPSг№3М/.cя:kѓIf!Ъ2”ІЁлUб1ЦьkRЧ­X›=Т9і3KЋИ”5cЦx,^\vщ’R­6v|ŸгёдГvжЂˆ%КК_m­~ыжšQЃ<ИŸь6nЌjn6”–ЩЬŒОxБ^И~}рыЏWџзЏП^Нb…Ÿ—Ÿ[bЭЛљhzЪr  ‡ryP_ЏЃ+•@B?юл7НАPC?ЮЯW›–ЗXOП~{іШ+*к-ЎežVVj;Ў ЭШЫSгkjДўўщ,uВ‘щЧУ4ЅTФb{љррŒќ|Ей†жкoГѓƒ‚в nurпОщLІѓ 4ССvtˆ#$:бт*–>ЉЋЛ5"fO™ВжuжтжlЙЕЁ4 н;‰L›1i_“ЬЖb l›{чвЯ,­тrPж46ъ_yхFLLЖЋЋ4$$cХŠђ†=ЫЉgmwжЂˆ%ККэТkњчэ–•Ѕт~Y`/аЗoњѕы7—j˜ђ ЙGŽд3g„RiрruR(tѓч—<ёDEQ˜gGсуsѓПIЛЛ;щѕ7?K­Љб‡‡ щЧ.6+љђЫˆsчZ†Яэп?ылo›­ rюИАВR•MtЁPш:UЇщДЫ-ƒѓЗпкў№‡|4O"ЅеещйЫWWы:і†Еіл$—ы#"nuВ\~kяLч‡‡ kjьь‡хы+ ПлбЉ>ёѕН5"fO™ВжuБ ЅiшоIdšЇХГЏIfXлцоЙє3KЋИ”5Н{ѓпzЋoVVДJїУ•JуЬ™Х,ЇžЕнY‹"–шъ6L44 _ЙвџoЋр~ АЈЉб‡†о\&dЪЏ_№њыеѕкkU/ПьO+ыєЊ„Ч“ ежf|џ§мŽ. @PRrѓ8ц!D рЉT7?gЊЏПu1MH}ѓЭ€ккa{іє3§ –‹Р@ч7†2—rцЮ;Љ“Ѓ™3‹—/їЋЌj4Ц77Ї(лM-.nчи~›ќ§ourqqЛŸŸ€YХ,/-ењћ К­CКЧИqž_|бий>сШbзY‹[ћ†В;ТОшъl`wOЋИуёHT”ы;яєЛpЁ•хдыь™uчбе…МНљkзќі›ВГЇЫб••н ~ІќдЉН…BоКu•—.Е-Y"ц˜U77?y2Тпп9+8К9s|V­ЊЈ­е+њ•+oЭ цЖsЇ\Ѕ2VVъ–.-7)_’“Ѓбщ(Š"ЬlGяоќќ|Э}-]*^МИьњѕvНžЪЬTЯž]ЬR'1НЫъŽЉе”Ћ+ЯееЉЄЄKђ1Оя‹/VмИЁmj2МєR{ћm6uж,Ÿ•++ НBЁ_БЂbіlfнљЕЕњ—^Њ˜;зЧО1]nэqиД)hлЖšƒыєээдеЋJцлг,}Т‘ХЎГЗf!Ъ2”ЗŸw12эk’йV ьЮти*kЭГжWўsСчŸ7Ъх:ƒЊЈаОђJхЈQ"–SЏГg—шъЖГЉЙйАsgMdЄ їЫ{йГ}ўўїuuњК:НщЬ<YП>pЧљ† BЁ]-Ч§Ќр їГZ\ЎVž~КФЭMъч'ћч?k˜хRЉ2..G „†fьпЏ`–;V™%HbbВЯžmІnйRхс!ЕЙ/ƒкКЕ:44УйY›}ќxK/^3f Ыuъ0џ§яЦˆˆL>_’ёюЛr›хЕZуŠх>>2oяД]ЛjилoГЉ*•aсТROЯ4OЯД… KU*S`ћіj??™ЛЛtўќЕк`G‡PEБљИЇффЈЇM+ън;M(”$$ф~ёEƒЭ>aПUšНыЌХ­YˆZJГнu62йoЖ6{j_“ЬЖВиo№эь9Ыё Ќѕе?ЖL›VфэЦчK‚ƒ3.,­ЉбВœzжvg-ŠlFзн>›LУ@$’Ž_РмЬхВР^@Љ4Ь›WL_Дп|Гкйљж]ћ_|б0p`ІщMфЛ"gАУC=єгO?нЛMmm5ˆDвГCКѓЭ"gёƒ<ˆЙЙъ№№LцщдЉEGжл}ЄМщ)ЫO~ВŸ?@їЛwџ“ЕпЪqР#ВиTƒЄна!?D&њЪбЮ&Ч †ЮQ їыЅGdP(‰qћф“А#м~w)-pЈ`РНB_uчОѓŽbЫ–jЦ8ujяїпѕ№pтё$aaТ'"Eіч˜g‡п фЌШY9+rVфЌ€œ9+ g@Ю €œГ g@Ю ШYГrVфЌШY9+@зij2МѕVЭшбyоо2ggЉŸ_њјё…яП_‹žЛŠЧ“№x’~§2U*cЧхшИ}љeу˜1љiє%wєшМ^(wЈS cxгS–ŸќdzŠTЊzт‰ы7nh;ЎЂЈxGNwМ…Рq !ЏНИiSŸ}ж’RТrЩu„S c0Я =ЉЖV?qbбк‡іќх—AmmqЭˆТТиC‡B““=а?а=vьWThб№€иЖ­†ђюЛ§š›‡ЗЗ((ˆ=p $)IфPЄЈxГЄ9+єЄ]ЛфrЙю‘Gz§ч?ƒўє'O‘ШЩХ…7`€ЫsЯѕIMdŠ•—k,(эл7C(”e<ѓLiY™жєэ8'9qЂ1!!зеUъэ-›1ЃИДT{ъTНD,Nџлп*кл)гђ'O6Ž›/ЅyxЄMšT”—Ї1ЋАу›~fЁщГТ55КeЫЪУТ2…BЉXœ>cFqnЎ…šЎKHШѕ№HЃŸ66VЎЌˆŒЬvsKsq‘FFfП№BљеЋJDH7xъ)•ЪИvm%KіaˆШтё$щщjњщБc <žфиБњizКšЧ“DDdбO1жау 4„9s|zѕт …М]–,_ОeVьЬ™цI“Š|}гЅсс™oОYm4Й‰†Ыeйцe–уuјц•ї@ЉЩЮЩбќњkфиБVgUЫЪД‰‰Й …оtЁŸŸръеСЁЁBг в”ЏЏ ОўЖM^~9`ћіО,х%’[ђxФhŒ7ЫYЩџ>ЈъXНМЊJ—˜˜[YЉ3]хсс”š5tЈГсВeт§ћkMЗ?О№мЙ‹3 ’Л‡ŽЪЪЁ‘‘YmmЦддHzvпьCI›УКhQйсУu†.Zд‡2o^Щ‘# sчњ9N9tЈnётВE‹њ<JСXC‹‹Ы•ЩTИЬœщ=n\Џ‡іДxjtДeKањѕwrY6НЬr)`zЩХ<+єЄ’-!$>оЅЬkЏU)њфdьь­vDvvLrВ‡BЁэЕ*гbЃG‹ђђbДкћі…BъыѕЃF‰ђђbTЊИwощG9zДСД|rВGVV4Sa}Н~гІ[:9ёXšФЄєЇWЬгW_­ЊЌд­[PR2DЋQW7ьРЖ6уКuЗЭс§№CЫйГ•Ъ8fлŸn%„\Иi0Ф+•q2YєЎ]С#FИ#BКAPѓ† „•+oP”…6‡uм8OB=Wj4’Гg[ММјgЯЖ Г|мИ^taŒ5єИC‡B}|EEэ[ЗжŒW§ж[5fяЪ!ЁЁТ3gЊTq УзЌёЇп€uъВЬ~™хRрЖLѓЌаƒмнгдjЃJчцfѕ-t``FM.''f№`WzINŽ&&&;0аЙЊj(ѓ<##zШ7BH[›бг3"“EцFбы)ЁPЪуƒсж[vі љ|ž^?ЂуФћw‚‚2ЊЋuСг“пв2œйъђх(Г[ЧЂЂВѓѓ5žžќшhзР@чЄ$бЌYосс.ˆЛŠD­–ŠЭ.,lџшЃАљѓ}ЭзцА*њ€€є!CмвгЃћ­mܘќ;ƒWЏОA€0lXNfІZ.& 0жр ъыѕ~XПgЂМ\ћПЋБгЉS§}Дsj˜~жвb№ђ’1FŽ—х;/€yVpссBBˆDЂb)SWЇ'„ xыE~\[{лЧR11nє›WЖии›—B€G1оіsF6*ЄnŸpЃ'Ьl2kЃЕе`њtфHѓIЕ?:д­ЕеpхŠђдЉІuы* ЪўшЃzDHї yџї§!ыжUЖЕ;;Ќ~~‚иXЗьlJeќўћС‹/њљјNŸnVЉŒййš!Cмш„c ТзWАzЕ~~ЬО}!ГgћxyёU*уђхІeF™ОC3НrМ,пySШY 'M™в›ВysЕбhЕLŸ>BHaa;Г„~L/Пu9ыp=уѓy&9ЈљкŽ2Y…“1‰RyЋM—/sњŠŒŸŸ€R\Ым3`vѓ@Ч†б’’Dщщб••CЯŸZpў|ЋRiдjЉтті>Њ3&Ÿ.ѓиcН!‹•хцjєz*7WГxqГмn‹•хфмVсјё7+ BЖlЉnn6ЈеЦsчZRRJЭ6 y„™ьЖтЩ“Н!))Ѕ2™JЏЇккŒiiЊнЛх#GцВ7&99ярСК–CbЂћOєž8б‹}Ою†нЛћЙИ№vэR˜-ч2ЌtVzъT“LІš0С‹2aBЏЌ,ѕЩ“ф7МbЌСA šГysѕљѓ­ЕЕzƒjj2|ѕU!dа WŽ5pМ,Г\f90…ћY ‡]ЛІzт‰ЂЊ* ї вѓXЅЅк„„\њЃ(гwљПџ>8,ЬќыЅџ›`[bёћЊоо|Љ4šЎpѓцjkп`ъ><‡љm#fЙ\Ў=:ПЄЄнк†ж~Љлb“&OіњілˆЛЇуpќу•oНUc6ж6‡•вкj№ёIwvцЉеЦЊЊЁЮrЙ.00УХХIЏЇ†б­bЌСq"ПЃЃGУчЬёБvЅ2]ШёВЬr™хXїГ€9в=++цЭ7ћ&&Šzѕтѓљ<БX№шЃНЁ „… џ}№гOћ8 М€ч”_цЪhЗЯ?HJЙЙ9ЙЛ;M˜рuёbSс† ›7…… ^яоќЧыuњДy>Б}{0=kЪппљкЕЈ—_ˆŠruqсЙИ№"#]—,_КХо˜K—Ђ-ъгПП‹ГѓЭ­VЏіЇ) Кгњѕ}ћ:л1Ќžžќ„wЕк8l˜[` 3Неˆю11QФ$Ќkp|:{ЖЯ AЎююNNNФЧG№шЃНО§vАrСёВЬr™хXрЖЄѓЌ№`Ю1рч0zъ2kЧuX€n€žНЈ=˜{чЙѕƒkИЯ"пцО:ефЌ№РНBcž€ШяйЫ,цYЌ8њeжŽы0ОƒŽ9+ gш^їЪ—ЋюEчЯЗ†„dЂ‡rнN!gЎжЌЉмЛЗnFђ а§Г€УНЦпЛ/ќ]ћУ.w;sjn6Ќ\Yžщь,ѕё‘§х/зќБ•}“ЬL5§IяќŒЧ“ФФdSд­%EЂЃГэ>4ЛГyOђју…І-y3рМ<Эєщз}}г]\Є ЙєПНu„“—Ч“˜§няsvь в;ф=иЮN :rV€дЌYХ:ѕгOƒ4šИќќиЙs}оxЃš}ЦшьЬЛŸ:СннщЛяš™Ї_н$ѕЬ+Ѓ`п>Х……э=T№Ш#Н bZZтію 9~МбqšGQёІїA‡фНїjwя>p жhМ79+8ЎттіЉS‹<=г\]Ѕ&*zцџюнђ~§2œ$„іvjС‚R‘(- ƒ™30 +;[M?ўјуzњAvЖzР€,ю5 дКu•~~щююiГgЗЖ8ЖœЧ“8P–)Jутre23Qa:Oc­~ГfXЌЭкцїbЭЯ?ЗnпюBџПм™3Нљe“›.ZTцх%ѓђ’-^\ІбЭj6лKЯііxљх€элk˜ЇлЗзЌ]Р<Еж$‹!GLцЂЌ2‹§ћCію­ЭЫгt\eБь}~цLsllŽP( Ы<|ИŽiо?џYуяŸ.Ѕ-\XкоNй<б,–чЗл`ЭІMUkз<џМизWртТKJ<aжЋfO;ž&\Т‰уЙЩЎЁA/Ї76L—јћЇ76юpПъД;qцLГЗ7џХ§|}gЯ6wь^гЇлiёб…ƒnњ>™йrVp\SІ­\щЏP “Ы‡EFКЎZUСЌКrE)•6у !7VжжъKJ†ЄЇўщЇКРјёН.\h#„мИЁ}ёХ њ•уќљЖЧяХНц­[k$•T:X.ъццДvm%їЦџђKkjjTcу№iгz/^\NўїСБщ< K§ІЭАX›ЕЭ;юХš1c</.ЛtIЉV›OГlиPUUЅ+,Œ-(ˆ)/зnмXeVГй^zМЗэ6}КЗBЁћэЗ6К“ыыѕ§Ћ7ГжZ“,†ЧаЕІwoў!))%:eЖЪb3ићќщЇK7olmЛp!ђђe%Sе… m™™1%%Cфr§ЋЏVйl­ХђужZ,њёЧжYГМ;;|fЇ ЧpтrnВѓё<љЄїСƒЕЬ’ƒыцЮѕёіцпс~;еiwbџўкхЫ§!Ы–‰їэЋЕ9ЭlжN‹—ˆЛ1шІ;"гS–S=„kџ:–T* bБŒйЊВRЫЌъл7НАPC?ЮЯWг›§uуЌYз)ŠкЖ­Z,–:TKQдЬ™зПљІ‰{ЭЁЁyyjњqMжп?нцс0ъыuL§ФЌ{§fЭАVЫц\:ПБQџЪ+7bbВ]]Ѕ!!+V”74шщUAAщЗКДo_ 5›>ю‘ою’№Ѓ(ъ§їkЇNЙѓуq IDAT-Ђ(jќј‚ƒkMЭZ“,†œЕž7=d›‘ГfMХКu7ЬZl{Ÿїы—БgМЂЂнl/LГ 4СС6O4‹х9Ц­Х6X#Ht:#{чt<ЫЬN.сФёмdПLQ•›Ћ Щ лЌгУТ2ЪЪкя|Пъ4Л]ПЎ‹ejЕЂ(Ек ЫŠ‹56{лtЙЕKD—КщސГ@чЌ, SS[ЧŽЭ‰ЄєыgљъЩчKєњ›>ЮHЏmm5„„dP5dHіwп5%'чQ’бжfр^Г@ 1}ЁbŠqЩYЙ,ЗVПЕЭЭžrмм&Ѓ‘ЪЭU?ї\щ#XьR‹ЏЏІ{ЄЗЛ*ќ4c``њбЃѕщ‘K[ 9.Ёk3rклёё9ПўкjГь}~ѕjл”)…ООВˆˆLц!з,+ЫY,Я1№,ЖСџєЊ*mgsV–b=ЙиsV‹Ы'L(<~МЂЈcЧъчЮ-ю’§vЊгьЖzu…YОfMEЇкiэбхƒnК#фЌрИ9kпОщЧ745щFЊЙYoэъi:щUP aжўљЯљ'N4$&цR•˜˜{ъTуCхwЊц~§2nмакq8Џћжъч˜Гrмœ#Ѕв J˜Й гyФ  ѓЌ=вл]~oНU-HЖmЋциУжBЮfшr9ВГеƒgЕДшm6ƒЅЯ™w#ЇO7Єwœ7-,д0sc,dБ|ЇЯЌ жЬ[ќЮ;rkГqJЅ~\WЇу˜ГvіфВ#g§с‡цбЃs)Š5*W*Uvс~9vš}ш‰е’’[SЁХХЗІ]9іЖЕKDW Кщ<+Г#мЯ ŽK­І\]yЎЎN%%э‹—Y+6gŽЯЊUЕЕz…BПrх­[ёМзЊUѓцљBžzЪgљђ њV?ю5/]*^МИьњѕvНžЪЬTЯž]lђе{О>пЛ7??_УЅ~.ЌmnЖk­§ѓŸ >џМQ.з TE…і•W*GбЋfЭђYЙВBЁа+њ+*fЯіБy,=влІЫ­=цт•WtКІпОbo’ЕуК6лэКtЉјХ+l6ƒЅЯчЬ)ЩЩбшtEНўж ВtГkkѕ/НT1wЎЭжZ,Я1n­ЕСblкДm[ЭСƒu њіvъъUх“OоЌvи0З;х*•БВRЗtiљžЯM.{ЌWK‹qя^…‡?.ЮНKілЉNГя8~М1)I&d–„‡ЛŒ):qЂ‘ЅЗЭкiэбUƒnqG˜gЧg§їП#"2љ|IHHЦЛяЪ­НуWЋ O?]тц&ѕѓ“§ѓŸ5ЬкД4Ѕ@ Q(tEЩх:@"“Љ:UГС@mнZšсь,‰ЭІ?Є(ътХ‹cЦŒБcžuЫ–*)ѓдZ§чY­mnЖk­§ёЧ–iгŠМНгј|IppЦТ…Ѕ557ч‡T*УТ…Ѕžžižži –ЊT†Ž-1лKїї6EQ„›;~\zиZШй ]›‘УLS_`Г,}~ьX}dd–@ ‰‰Щ>{Ж™йЫіэе~~2wwщќљ%єМћY,Я1n-Жe4srдгІѕю&JrПјтfЕRЉ2..G „†fьпЏр8Якй“Ы,žЭЪXЛсўаЁZ'Їk?ќамUћэlЇйq Œ™ѓ§їцŸкѓMSBB.Ko›Егк%ЂЋнтސГtкC=єгO?ЁЕhџНu,НiФО›LœбФ)аЭэчMOY~ђ“=јКп=њo~Ќ§ŽcЮНеZюэПУя^ єР§q tsowс   pџЅ#8œ­ѓящшЖ–№x’NэЋГхˆќЛк | Щ RIptШY9+rV@Ю €œ9+ g@Ю ШYГ gфЌШYГrVфЌ€œ9+rVИаау**ДЛw+~јЁЙДTЋгQўўЮqqnSЇі~юЙ>шИ{x<‰щSЇ\gЭђўћп§yїюQT<и}љeуЎ]ŠєtU{;хэЭяппeФї}ћB9yгS–ŸќdzЪЙs-3fЗД:ЎТK/tgЮЪxсёоН!їDуЭЮфЌРХgŸ5ЄЄ”мНKngуcyм=ЉЌLћф“Х--†iгz_НЅVЧЉеqйй1{ї†$&Šа?а (*žЂтѕњCж­ „=кpO ЦиmлVCyїн~ЭЭУллGФ8’”$r№8DЮ =iз.ykЋс/щ§еW§DЎЎNЎЎNббЎ/М Оr%ЪДфёу <ž„Ч“ИКJcbВЗoЏ1(ц=:'9qЂ1!!зеUъэ-›1ЃИДT{ъTНD,Nџлп*кл)ІЖšнВeхaa™BЁT,NŸ1Ѓ87WУЌml4Ќ\Y™эц–цт"ŒЬ~с…ђЋW•ЏћŸЯ Оњj !DЃЁLWБG !DЅ2nнZŸыщ™цъ*;6џ›ošLƒ“rјp]BBЎ‡G3ЙkГZі˜7­пь)SCyЙvС‚вО}3„BiPPЦ3Я”–•i™Еtс3gš'M*ђѕMwv–†‡gОљfЕбˆpИЯh!sцјєъХ yК,Y"О|љЖK.{№p {.Ёn1˜“’ђx<Щд™юї@OŠŠЪЮЯз\Оeѓ-ўЈQyWЎм–5ОўzаЦФЪ'МОО‚њzНщ’—_иОН/!ЄЊJ—˜˜[YЉ3]ысс”š5tЈ!dќјТsчZ,N`Шю'ІŸH TuЕnпОкmлj’“=RS#щ26ЃЅЁAџЇ?deЉ-F Н‹eЫФћїзšЎВYmgcоtwєуВ2mbbЎBqлYрч'ИzuphЈXП5bЫ– ѕыїБИИ\™L5`€ЫЬ™оуЦѕzјaOГ6ƒ‡Kигm†КХ`>rЄaоМ’ФDбm“гS–S=ФеUJШ5•ЪР}­жxщR!з""2щ%„\#фкшбЙyyj­жИoŸ‚^2jTn^žZЅ2МѓŽœkССtљgŸ-%фкКu7JJкЕZc]юР!з&N,Є 8;KЙvсBЋС@)•™LЕkWЭˆ9Џћ 'f}њШ.]jcЪиŒ–хЫЫщh<}КЉЕе R~ўЙeЪ”Bг]єяŸyіlГRiр^-{Ь35[<"њёќљ%„\KNЮЫЮVkЕЦьlurr!зцЯ/1-šqцLГJehhаЏYSA/Alмп~џ]щу#cb~№рЌ­[Ћoма2l—АчъƒYЋ5ЄrM&SбKnмаbžz’Л{šZmlk‹‰œ:~TФМSW*›6U}ѕUSE…VЇЛљёЈ@РгщF0х32Ђ‡ q#„ДЕ==г!2YєАan„Нž Ѕ<1т !AAееКŽёєфЗД gf==љббЎЮIIЂYГМУУ]0^їх+ wбq­ЭjйcžpјV``FM.''f№р›Ч’“Ѓ‰‰Щ tЎЊЪўѕзШБc=ш--//ŸЯгыG <юoѕѕњ?ЌпГGQ^Ў§пеищдЉў>к‹K№p {:m†КЕ`оМЙzгІЊчŸяпBйЖ­їГ@O B.]jc/ЖlYљЮђттvцХ›ЮDMЫФФИ1Ÿ:бbc]™WzBs—^m­от^Z[oўvСЧ‡ ъжкjИrEyъTгКu•ƒeєQ=ЦыОDQёFcМB1ьгOУћєдзыпxЃšYk3Zфr=!dФw–]ŒiОжfЕ\bž]]ž2pр­їZєcГ]-2M#!Ь]ГpѓѕЌ^эŸŸГo_Шьй>^^|•ЪИ|yЧрсіCнšЅKћ…М#G”J#!фуы‘Г@Oš4Щ‹ђЪ+•єU‰Щ!ЬоpŸ:еD9v,М­-ŽЂт[[у:VхдсzЦчѓLъМЕмЯO@).ŽЅwdњGHJЅЇGWV=>ђФ‰ˆeЫФz=ЕaC%Цы~ХуБX0ožЯ7п  уMЋЅ8F‹ПП€’–ІbЉп49VЫ%цйѕщ# „Ж3KшЧєr‹mуё WWЇeЫФЧŽ…gfFBJKл9—Ачъжјћ;Яšхгвb8~Мсђee^ž9+єЄUЋќ}|‰*>>їуыЫЫЕ:ЅзSf7ьг?"пй™WTдО`Aщьtђd/BHJJЉLІвыЉЖ6cZšjїnљШ‘ЙtффМƒыўŸНћŽoЊъџ~n’ЎtЗtввBщftPЖЂ *C@†‚‚BQЋ >ˆ ј@DХU§љˆ€‚ ЈйШІЛt/квtЄ{$i“мћћужвф6 …Іэч§тЅЩЩЙчžsю7щ7Їїо66*GŽЮšх0c†=Чjє&cЦXfемLŸ;зЄgДЬ™уHYДЈ(>ОЁЅ…–ЩшK—šчЮ-ИУ ь4цЭЭ)BHJŠЮЄсЁ‡ь!+VgeЩ &+KЖreБЊњВaУ2Зl)?wЎI,V(•L}НђчŸы !ЊSb: §УОгPчц—^r%„|ёE5ћk.œЯ ньтХцйГ и_EuФ~ђЩЂ}ћjuНкё\(ю’ЪJљ˜19EE­м vќф=vl0ŽWoЂѕ,Кз_/{чŠеЋ]?ќа[Ÿhбџju6Ыѓ„ААЬдT)Чюnоl‹ŠЪвxgѕы'И~=˜='• G~G?ќ0pб"'}‚GџАя4дuГjсђхKKžЙ9…uVшfуЦйdd„Оё†GX˜аЦ†ЧчSЮЮ‚1cЌ_{Э=))˜­Г{ї€ЇžrЖГу[XPaaТ}ћоЩнмЬ‚жЏw ВДА ,,ЈР@Ышh—Ы—лoЊrљrаŠ§ќќ,ЬЬк_}ѕUЗ;м)єьВаяП7ш-NN‚ЋWƒЖnѕ Z[ѓ,,ЈБcmŽѕЛУ ь4цЗoїђђ2чи…ЏЏљѕыСK—:ЛЛ› ”ЛЛй’%ЮЊœњВЏПіYИа) РR(фёxФЩI0eŠнБcƒй„UŸрб?ь; uю`f—Ze2zёb'ЌГ€)КyГЭЯ/ІIrrАгнEзяЇL~gŠ№ы#ъˆ|“ž…œ№C~ˆ|гВjUЩ_ˆ­ЌxгЇлэйууь,@Ю ІхѓЯ|ўљѕ\ƒІ9+ g0]~йб імKpб’ЩЮRя>4<@Ю €лH9+€щ‘ъŸЃcЪœ9Zџ\ЪНь‰­mђфЩЙ™™2“šЈ.Мд77Wцс‘&“бхѕѕЪaУ2ыъ”}ч‚zѕ№Л—™њв3ђmm“mm“gЬШOO—іИ/Sw'zйїRU98Є<ўxЁЎ?”œК?c˜HšŽЬЩb9o^aїіфж­a<`ћФEНuТ,ЧЗљц›ђЯ?eэшШяƒсЧўЛ7{ЬЯo4)wж,ћВВaeeУfЭВŸ<97?ПЕ‡Ю›Цc€;‰Јмм!Ttt1rV0хя§ФеUАi“GFFћš“LFЏXQloŸboŸВreБj]АААuцЬ|[лdKЫЄщгѓЊЊЊoъ~XщэЮу%BZ[™eЫnZ['ЛЛЇНї^Ѕў=БЗчЏ[чž™йI7T”JfУ†2WзTЁ0yсТТІ&ЅЊ?;vTИЙЅZ['/_~ГЕ•биPы@юП?чРџЪ|II›ЇgZCƒRЕ.BQ‰Ÿ.іѕM77O ЯJI‘АхR)§дSэунБЃ‚{eУї>ЈЄе†вжЦ|і™xн:7ѕ5]sЈkШЮU kКЪ ДЗоНђŠ[tД‹пЮŽэђЪ+n[ЖˆTЧїіїEЂњ;=4†v^у]п0dHІЙy’ЏoњW_UѕFО­СŽA^[ЋpqI­ЋSЊ6Љ­UИЙЅже)uХм=ZИжB­‘Щёi`\,ЙК >њШћЏПЕ6ŽœL…XЌˆ­=к†}њЦ"‘Hž—7$77ДЄЄmгІіъ>šПzЕ[Uе№ЪЪс–kз–ЊZИzЕ%))˜І# !›6•‰ХŠЂЂЁЉЉСЇO7ъп†хŽaaBюnЈФЦV$&J’’‚++‡YYё^{­LѕвљѓЭщщЁEEC++›7knЈu 7zМ§vЙ*›|ћэђ—_vЕЗПmэѓьйІ‹ƒъъТцЬqXЙВ„-мДIда МyshzzШ… ЭмcŒˆdqјpЊdџўк‘#…–ъеtЭЁЎ!w:W=‚ЎQш*74вNžlzт gѕ’'žp>yВ‰{ JЕХНSC;ЏёЎYКєц–-MMсчЯ^ЙвbмЊ7и1Шœѓц9юй#VепГЇzёb'GG>Ч[ ю­Gм 0аuдŒŽ%ŠЂt5Nц.‰aК ! ъџ“oмА/yzІццЪиЧ99вў§S;nовЂtqIQ5UVжІzЉџдММ7'$AџžxxЄЕrwCе OZvЖ”}\QбцціoUrse^^iъHTTжО}5Њ­ZZ”ъ[’PS#Wm%$ЊЦ[PаОЛМЉ,-m5h5žЊ7Ј5ШГВЄЄБ“ —гООiХХ­мo%|Tvyфsq­…К"SзQ3(–T­UUЩŸzЊhжЌ|­cК™ъdИккАеЋн^zЉ}нДВR1h9ћxа ‹ЪЪіs.]jОяО›dŠJДЖNЎЎўїl}OO3еуŠ ХРџnЎgOh:2?H@€erВ„Л*eeђ   іїЖююiUUrеKЊ h^Q!зиPз@6ntћэrЅ’yѓMбњѕnBЁцЕ“SћŸ0 y ЃЏOћю|}Э;ьиђљфЬ™&BШёуЮЮ‚‘#­5ъшšC]CюtЎL9ќTK˜КFЁЋмаHsvцkЬLeЅЂ_?Уў,ЅЎкywЭсУƒўњЋ1,,ЫЯяЦБc ЦMЉzƒZƒ<(Ш24дъ№сzBШЁCucЧк `Ю§V‚ЛDы7( t5Cc‰ma№рЭЭє_ ак8жYР$ОюГd2кв2IЕ2ЄО’фщљяВжЕѕѕ šfњ,DхцЪєYgU=.-muwOmlTptC}Ётж­6­ Њ6ЬЫ“u\ е5šf†ЭXЗЎдз7­Е•ц^oS_`3h•a˜_~Љ›6-—a˜Щ“sуу:ЖЉku Yз\ѕ”№уG@i‹nп^Ў^В}{љOВ‚Dvea˜ъjЙЁсmhчЕі–І™пЏwwO5b;іSk?о0fLУ0ЃGg%%ЕtњVТGх]|­G\ЃPWdъ:jХ’ЎCЌб8жYРT44(wюЌ l_4ZАРiѕъвЊ*EU•тх—K.tbЫЅRЦв’ВДфЕЎ\YЌЋЕE‹œжЎ-‹UUŠеЋKu/Ѕ•——љјё6?ќPЫб •UЋ\VЎ,.(hU(˜єtщТ…џої€э€XЌXГІtёbЭ u „ЂШЦяНWљЦцц”žГЗpЁг+ЏмЊЎVTW+дЯёхяЃ:мК%џпџjФbљДivњЯЁЎ!sЬ•Ў>Ј—ыz|щ…ЎrC#эЭ7=vюЌќт qcЃВБQЙgOѕЮ•›7{АЏnЕsgЅDB—•ЩW­*QmхрРЯЩ‘uzh э|‡#^”™)“Ы†!Њ%ќ;9К‚ќЁ‡ьщO?­ВБс‡‡ ;}+ѕФ@ъДq­…К"SзQы’XвhыЌ`*'Z['MšЋ:{I"Q._~“Н‡хђх7%’іЏјGŽд ”Юч'іёЧ•КЂЄRхвЅEVVIЎЎ);vTЈ^НpсТИqу:§ЂќxCDD&G7Tѕ•J&6ЖмЧ'ЭЬ,qШŒjUЖo/wuM “žzЊH*емPз@†љщЇZџtѕГ;]gmiQ>љd!;оmлЪЭЬЙЧЫњпџj‚ФяПЏбкІЎ9д5d]sХбBHЇяё:ЋЎQш*74в†IM•L›–kmФЦ|JŠDѕRRRKxxІ@шу“WЅjmыV‘MRЇ‡ЦаЮkЬРў§57‚ФааŒ?џlа'„ИзY9‚ќЫ/Х<^Тёуџ.№sМ•L<zю:Ћж#ЎЕPWdъ:jХ’ЎuVЦ‘Г@пђрƒž>}КЛђ!=Эœ™џУ5Fož•%80§зцМoFZпHп $М)TЈЙKb}ї –ЧроЛї‹EзmЯ{ыŸ№1…ёšь­цяъ$єšH3рoємоЎŠ%˜кУобŠJ4ЂYŠJєѕ5?xpPЧЋј;ѕбGU[З–ЫdєЬ™_|сccУУS П^3ѓ!Л{пЌГ€ЉУ}9+rV@Ю €œ9+ g@Ю ШYГ gфЌШYГrVфЌаPT"E%ВzФ ;jЬ]sшЛO0pяsжoВ§zDW—0L$@7Т:+t”"a@Ю }Tњ…;Ю @Ю }Лx™˜(IL”t|UWЙєiŠЛжY{ё'ѕЖЖЩссYяО[!—3ЕsђdЃŸп /Qu–6€‰;|ИnмИ›d3Г$Wзд1cВ_xЁ9+€іtЁ[іkD6Œ,ЄhnІSR$6”­YSjа†11Ѕ……­ ƒ)„žсћяkчЭ+МtЉЙЅ…V(БXqхJK\œј^~WDЮ =Юgг9О ЉPD”–нАСђУЕЕ—'#„TVg›Т”‚‰{їн BШЧ{74„ЕЖFфцљќѓЃFY›xЗ‘Г@ї0ш{Ж\Ю””ДЅЇK“’$ЉЉ’ТТV™ŒVЏP[Ћ`P“’$вŠ ЙжE/е +[YcСЕЁA™Ÿпšš*IJ’ЄЇKЫЫлС:k_РчS^^ц›7{BdВлЂЇЂBўќѓ%ООщццI..ЉѓчfeЩд#™І !ФЭ-U=T:нŠ­ќеWеQQY66ЩЊmѕй0>Осс‡ѓSЭЬ’LпЖ­œV{CH$tllEdd–­mВЅeвјё9ПўZЏgЧ /ШЭ•B-rВГу››Sўўбб.WЎi„йЁCuуЧчX['ли$?ќp~vіmqRRвЖlйЭў§гЬЭ“<=гž~њfqq[ЇЎŠsе99ьгК:хъеЅVVЩI/МPrэZЫm?5pЏ+ш.пцдГYcdЄPkrЩ–ЫхLV–LуCYZYЕёЮЮ–ЕДм–Хzzšyx˜i4еёЌ]хъ<ш€ƒе‹П8Б+ЃJ%S^.џь3ёЛяVŒksёb [G$’™UV&WпаЦ†wёbаАaVПвА­щЙеѓЯЛЈџB–a" нЪж­ž7zАпп&LШНqCЊQAЯŽA_ž•’"<итёЧ'MВ›8бVŸ5ggAbbА9!ЄИИmфШЌЊ*…zWWСЕkэtExЧ–йШœ:5яЏП;юT§Xg€юLTЊЦ?ѕš"‘\.gмнC†XED‡З0РœІ‰њЯн  ЫШHadЄ0"BdIЉЉQtмЉ*9f+kфЪццдрСссVУ‡[ЙЙ !ее ‚uж>Š•($y{ЇПћnEП~‚їпїRНКyГЈЌLОaƒ{QбаЖЖˆъъсŸ> Й™оАЁLуgЊњ‰nХ:~МёЯ?§[ZТUлъЙЁy|МПD^[Жn!фЫ/Ћй—о|ГќЦ щ AПџ>ИЉ)\" ?s&рбGэ ъєn_~щуф$ШЯo­˜4)7$$уw*4ОЩBЦŽЕЙq#Є­-"##tьX›šХ[o‰ў 3QU•bьX›ŒŒPU…Њ*Х›oŠИ#\у§ЂzzцL!фќљ@Ѕ2ВЅ%<%%фƒМ""nћˆЦ:+tе:Ћ.lN™–&еz7ŸOТТ„„šfD"y}НВ­Q@Q„§МгXЪеКВЫZии№йЅ’II‘В`Е/|qR_LКt)0 Р’}ъщ™V^.яXЭж–пиІоŽњ‚PЇ[Б›\ЙЄqЁžў§wрјё6ьKJ{ћ>ŸR("!^^ieeђŽ-ы?ш jjп|SѓЩ'U%%эПа yGњM™bЇ ГЬЬаррі7BfІ,44УУУL$Fё№HЋЈsTасZп/„  Œœ™­-?$Фк…й IDATвУУlд(ы ДPЏƒuVшўtAЕъЉuљSЁа~=ЖRйў ЄЄ­ВRбкЪЈŸУjФEмжж|Е„˜R5‚uжоa"i:ВЊjјџў7А_?AMтПџ-WН*+ДnедЄфhSЯ­FŒЗс˜1жъщ&ћ-‹}ZYЉ „h,Pнсp їqvМњЊ[NNшgŸ XИаЩоž/‘а11Зн1УппBуБ*~ипAqTасКќпџљfедЄМzЕхшбњ Ъ2О§ЖFНŽ‡ К+Qј6Ї^Ÿš%—3C†XZXhџš]_Џ$„ hюрРчё(v‰дЈ4ZgWqМzћ7(тт"xђI'??‹БcГ­okcЬЭ)BˆЋЋ@$’бXђсІчVьWЃ;мP#nнмeeђфdЩШ‘жFw њKKоѓЯЛ<џМKiiл€щ7oЖЊПš—зЊZFЭЫk%„ИИДч§њ **ф+єы'рŽp]FВNM ‰фљљ­ђsчšттФoМQієгЮЊ:Xg€юЪє]МДЗчBnоl“Hh†!J%#‘а••rеХЮьj(GQекJЋ_ЛЊ+1•HшЛбUшщЦŒБ6ЬЊЙ™>wЎ‰-yф{BШ’%7SR$ гмL''K>ќАrФˆ,ŽvŒлъN6T™3Ч‘ВhQQ||CK -“б—.5Я[аUэC/0lXц–-хчЮ5‰Х Ѕ’ЉЏWўќs=!DuJ kХŠтЬL™BСdeЩVЎ,&„LjЧОєаCvl…ЌЌл*Ахмиoƒ))З6vlіž=еЪ‘#…Гf9ܘaO:ЌкbК‡ўыЌžžfMMЪцfZзyјЕЕЪ‚‚V§ж(Љ”Q5её–ZЛŠуеw<ќА}Zšє?ићо~лѓФ‰І‹›УУ ШъŒлъN6TйВХуьйІ7Є3fфпіЁHO—ІЇkљeдыЏЛЋ?НxБ944Cѕдб‘џц›žџ„™ч4\Миђo…~§[ЖxvКїр`ЫдTЉ*йиЫ—[._nбЈЉ‘cК‡ў‹—ffTPЅ›ЛРв’Ђ(BQФв’ъч"`я@0Рмй™ЯчŠ"B!oр@sŽжММЬйoљwЃЋа А+‘ПџоР>us3KHZПо=(ШвТ‚ВА -ЃЃ]._тhФИ­юdC''СеЋA[Зz†‡ ­­yдиБ6GњuUћа |§ЕЯТ…N–B!Ч#NN‚)SьŽМh‘“zЕ4j”Е•O(фMŸnсBЏoћЇЋЏЏљѕыСK—:ЛЛ› ”ЛЛй’%ЮзЏЋ*pиОнЫЫKГкхЫA+VєѓѓГ03kЫW_uлЗoрmХИot=зYЛю}pMСд~Х„s л>cіyєˆЎ. *РщНѕЇ2ЈCœ#ђM№=Ђ KфЌаmC=e?Ш‘ŸєЉШ7ЭuVœЯ Іў]]ИЧy­ ~ЉУ:+t›Ї(*‘§ дІќ_)€ю_>Р5X`тpn˜ъWj,pтa\ євp2t[ŠJDЮ а=?к{qіР=4ЄMї~6юв^p(с^BЮ няНї*‚ЄїоЋ4ъ]{§E%ЊџгиEЗџрЇЈФаа іOрўг7’atЧИgЯtЎэа8.Н вt…г]šѓ.›tm‚>‘яш˜2gNAQQы=Ž|фЌаѓа4йН[ќс‡^Ÿ.Іщ^;Lі:\гМ—"ђ~ћ­Aѕє—_ъ­­ћФѕу‚Л_!ћ6рi:2'gHHˆхМy…Іпg„tГјјGGў‹/К:; ўќГAыWsѕ50Ѕ’йАЁЬе5U(L^ИААЉIЉЊљс‡•оощ<^ћ&Ÿ.іѕM77O ЯJI‘Ае [gЮЬЗЕMЖДLš>=ЏЊJЁu/•X[ЋpqI­ЋSЊ:S[ЋpsK­ЋSъъƒЋZw­ЕчКvп0dHІЙy’ЏoњW_UsrXПо}ћі егэл+^{эпП<Ўkз­­ЬВe7­­“ннгд—ЩUc‘Ы™ЕkoЙИЄ::ІМџ~ЅЦЋ†Žєžщt\њ SџHcШdєŠХіі)іі)+WЫd4ї,lЌ;*ммR­­“—/ПйкЪh}ћhэМжw™СitrєJы  н{DQФеUАi“GF†”{’Й'31Qтх•ібGUК‚Ёcфы E]@Ю н/.NуJyўy—Я>w\ Зп,06Ж"1Q’”\Y9ЬЪŠїкkeЊЪWЏЖ$%гt{ЭГg›.^ ЊЋ ›3ЧaхЪЖ№бGѓWЏvЋЊ^Y9<0аrэкR­{!„89 цЭsмГчп.эйSНxБ“Ѓ#ŸЃFЌvtмЕжžыкщвЅ7Зlёhj ?>№Ъ•ŽBsч:VUЩ/]jfї^SЃxь1GеЋКvНiS™XЌ(*šš|њtcЧfпzK”‘!MN.*zыV[Ч єžщt\њ SџHcНё†H$’чх ЩЭ -)iлДIФ=KF;О9==ДЈhheЅbѓf‘жЗжЮk}—œFЧ!GЏДЪаНїMbБ"6ЖbєhюIц˜Ьп~k˜>=яГЯМќВЋЎ`аљZЧQ&s—Ф0нЄ @цт’"•*†‘J•..)……2і%B4А||вВГЅьуŠŠ67ЗTUЕВВ6U5Bjjфьу–Ѕ@иqя--J—­{aŸfeI H“Ыi†aфrкз7­ИИ•ЃК’ ў{€КzЎkЇооiŸ|RYZкЊоˆжBŽю1 ѓХт™3ѓ†™:5wЯБzЧtэКџдММіу•“#э8"/ЏДœЉжн1в.Їq\Tыt\њ г HcЦг357їпіяŸЊ$kmPзœЋ†–›+ѓђJгњісшМЦЛЬˆр4:9zЅuPэНOб{GЧф7$м“Ќu2 Iјєг*OЯдkзšѕљˆжиVыQу8ЪXg€юД{ЗX,VXY%STЂ•UВXЌиН[ЬНIY™<((ƒ§“Л{ZU•\ѕ’ЇЇ™zM'Їі?›"ђŠі_<]Кд|п}966Щ•hm\]­риWPehЈесУѕ„C‡ъЦŽЕ0РœЛыЉњŸ4ЉЕчКvzј№ Пўj ЫђѓЛqьXG!ЗЇžrО~НeџўкД4щвЅЮњЬyE…bр@sіё Aл,/—k-7nЄwƒжѓY;—>У4(в!••ŠAƒўнieЅ‚{–ŒІкРцкп>зx—œFЧ!GЏДЪˆНїЊ˜Џ­ [НкэЅ—JЙ'Yзd~№AхSO9GEYыѓ­O(rНqАЮ н…]X-*њї‹{aсПЫЎКЗМНгnнjыtyIзгў§SЈ­ЏWа4га шt эјё†1cВ†=:+)Љ…ЛF/}щгsюв4ѓћяѕююЉrєфwЪ‚Фwп-зsзъы‘ЙЙ2#жYiзЎ6i-з5. БЅEЩ>ЎЎ–sгаHѓєМmqзг3UŸ№И“uжМ<™њjюtоˆр4"9zЅuPэНЏ­ГЊ?•ЩhKЫ$юIж:™„$”–ЖœОcG…бZЧQЦ:+t›ъFВіѕ5Wћкm1b„ѕСƒuъеј992егUЋ\VЎ,.(hU(˜єtщТ…†]ю*•2––”Ѕ%ЏЈЈuхЪb]{Qyш!ЛЦFњгOЋllјссТNћ@Q”Ёѓ kзtэtбЂЂЬL™\Ю0 Q-Тi-ьД{џљЛ\Ё~еKgЛvZЛЖT,VTU)VЏ.еКfіт‹ЅЗnЕез+зЌ)еsBŒ˜^ѕr]ѕЇk\У‡[эмY)‘аeeђUЋJИ‡ihЄ-XрДzuiU•ЂЊJёђЫЅ :uкOуFЧM,VЌYSКxБ“Ao“. NЃуЃWZeшоЛ6ŠzІхЮм“Ќk2НМЬЯ ќњыъwоЉр†Ž‘ЏѕЈqХжY ЛŒ‘љЧѕ…ПўZ•ЅўЅ|ыV‘M’ъЉRЩФЦ–ћјЄ™™%’qр@­AЋ•GŽд ”Юч'іёЧ•КіЂОљ—_ŠyМ„уЧT%КњpсТ…qуЦКєХБkѕЇКvКM`р  144уЯ?8 ъž>Л–J•K—YY%ЙКІьиQбqmmєЫ/—89Ѕ8:&№A…žыЌ†N/У0„NwzVŸЊКЦ•”дž)$њјЄХХUqгаH“H”Ы—пДЕMЖЕM^ОќІDЂфž%ŽЃйqPъЛлОНме5E(LzъЉ"іwїЂЋѓ]œFЧ!GЏДЪ НE=§|VkыЄЉSsUg ъšd­“ЉzЕММ-8јЦлo‹8‚Ёcфk=jG™šЛ$цаwŸ`НрЮMœ8qгІM>ј К‡ўc6юYЯ)*Qџ{ы"Šz.фЌа}@НыЯлшњЉi"У4ёюнџО~=їhvoЯёg#ztф p№ зd!&ŽBxЯдєм uVшЙs…kАI g@Ю ШYГ gфЌШY9+rVфЌ€œ9+rV@Ю €œГ g@Ю ШYГ gфЌШY /` рnˆIфЌ`вОˆФ$@WСЙ€œ9+ g@Ю €œГ g€ОJ@)ШЫФD€ЩТ:+УЯ?ФЯ?ф<@Ю Ц+ШЫzž g$Ќ`ъЈЙKbЖoz&K@С)#`ъ9+С­Р„с|V@Ю €œГ gаM`мf-ЭMж<eQ_пак&—J­x­ЂZУасaУЬЬЬ0ГаUŒ\g-ЬЯМyэp]m­Нƒ}?g'OЁc>JHHT(”˜Vшўœ50dxƒ00#ЗаL АА0Zлцч?yJ`f~ёвхжж6Ѕ™+t #Я 07З”ЖёћxBЬыKХ)_}~иЮc•…enn~AAЁПџр‰NРќР3`UввЬЃ[UO KЫœbчдNT‹чшЉ ЧчхVˆkJJK#ТУisЋнр€P=ЋлНь(єЉоvћЈ9:аЫ бeЪяV@яЯYso\nЬ9бк*Ы/*ЙpёJPЫ ЯœŸ)+{BSD._Въ…US‚ЫK FŽ6,дл‰пЃ?J ъЩр€аŽџ№кыSвєCп0˜1шщ 87Р/$ъ?OиŸwБц.ў+иЭМчФЋ,яЧЃ >eяЙєЅW‡љ§рxџ …BоF,њд<цчfмƒMњ&жYmmŸ?џЁћGQЄtЕЭlДШе5д‹ #Ѕ+ђ••7щКя 0>ŸЯу vwјэљзcПЯœ=/dHxXФЈ˜WзжжВ/>ГbUXФЈраАYsцџўGМj“§~œ4ezPШ№'M=ќѓQѕO:ѓшЌЙСЁacЦMиљў‡ъ—ˆ:ufіc‡ ПџЩ?§tXѕннˆS­>ђрЄЉA!У'M™ОџР+эыќй“чЯžьtп~ћПСЁACЧŒ›Аё7›››ѕщЇL&KП‘yшч_uUр˜^Ѓ›х>jЦ5ЋuJu….мЫр€аoО§nќ§§‡tyџ9fiџ'Nž2|тфi<Ф{……EcЧ?ижжІЊ#‘HFŽОЏОЁЁ ;\ZzkхЊ†…E… _ўltMM­AQФёn5ЉHгѕТ= ]nZ;G#]lІ],=glp@шО>0qjphиУЬINN=zєзЩЭ ›ѓиу7‹‹ё# G3ј,›Єыг›Ќъы9 М\mJђ ,…ŽJŠ_š(В"ФL АwАПУžэйѓе–-›‡ вддДуН6ОёжюИ !ЯП№в“O.ўшУYYй_ьљъсгйMОќjяЎо ЪЬЬZНv­­ЭCS&BўўћтЧŸЦmћя[!!С•UU[ЖlлѕбЇЏЌ}™rцьЙ7ЗќwћЛлЂFD–—W|ђйюљѓчцчf э’uаПNœŒл§ХGю ЪШШ\НvНFCїЕlХKy™мuЄ2щŸќъээѕыБп7МОIЁPlw›ЎЪ4M—”–хцц•‰*|}МЧŽЉЋІЎщ5Еf;NiЇGЁKіBљћТХ#‡tqщзх-ыšЅЃG§т‹Џv}ј^hhШ™ЋзОjee5ѓб‡uzа CBC~љхиќљsй–џˆ?>iвƒіі]исбЯПЙyуЧЛо—Ых}ќYь;лппЙ]ЯУЭёn5Сжњвщ(Д~ИuœFюFК$иL?К š1BШёПўњўЛН..§>џтЫgVD2флН_ВOпzkыЗп|‰Ÿњ=5wIЬЁя>щ4 jkk‹?y>@Q0Є5+ОЮЃЎЎ6Ъ•‰ tПQкXзЂ ђtьЧoОщё`“У ›e!~оЖNС Бъhp@шёјc~~ƒиђ††ЦёїOLOM „„8}ђO77зŽл~ЛїЫёуЧЖ/IžП№ёЇq‡~ќВhёв77o d_ЊЎЎ™9{оЅ g!ѓ<ёмЊ•oq`PЉu}”н|оу‹WПЃоЋхЯFЋЦЈё SчЯžд'gUQ*•СУЏ_НаёеккКœмќМќ{;ЛР@ПAО ‚czMГYѕ)х8 ]И—СЁ§љл AЕVОУ–uЭвьЧ_ГњХ їпЧОtімљ?ўьчУ9F}эZТІЭo§ŒЂ(BШт'ŸоќЦеЛЃK:|л7(ЉlТƒSЎ]љ›{ Њ8о­&iК>@8FСёсІ1мtaА™~tщ9cƒBЯžўЫЫЋ?!ЄЅЅexјHѕЇЃЧоŸžšhшЧ;єМuжГЎ8Ж–гљ'%ойe•г<Є‘ОnЩљ•љѕT€ЋеpIl‘жмрh—яът -8n4]`пяNzІўqloo'•JйЧO,^0sімiS>|ии1ЃнннTеFŒˆP=ŽŠ‘У>О‘‘1kЮ|Bѓіѓ”’••=fєЈ;ŸG]Ÿ€йй9НКлGєfqqь;;“›†!„дззwЌvјШ1ЉTря7kц {;НNфа5НІйЌGсїBёѕѕЙK-ыšЅММќQ#ЃT/92'w5їЈGŽa%ž>}vвЄKKo†щ˜Rмa‡KJJпо›Фž—ЂzЛщsИ9о­&iК>@ИGЁыУЭ FК0иL?Кєœ1B›ЁBЌ­­5žJЅ2ќШшЭ9k^Fт 7›ДВ6žВuФ ~7ЌЏчUMwЉѓqН–W•Tk1Шгy#)Ќi{OylьŸOЩЫІm џ{Іѕ!фѕ ЏЭšљшЅЫWЯž;Л}ХŠхб+ŸхnŠІ™kW.иллѕ…#КfЭКєonоИрёy h(›ЙъšdŠP]о‡žеlю…Чуu{џѕёь3OљеоI“ќљШб'ŸXдх^ѓЪњ ї7vЋ““#У0СУєпж wЋЩFї(t}ИдˆЩлнˆ.=g zЗNЎСЂ)^RjzIYХј1ЃEзŽЇ”мg[сkw5З*ОЬТоЉпp;пТьІЧЮЎюlТJё ЗЕwМ{ YёьВwНєШOŸ~Ж[UžЄz|§z‚ъћ§а“ЇNkm*88шЪеЋZ?"9R=§jєъюэЋ}с';‡2оcццц……EКЊЭѓшŒi“ љб_џхX|Nnž\.чnYзєš`ГSЊЯQИѓНpИѓ–uЭ’ПџрkзўЮЕызќ;ѕєiSЫ+*’’S.]К2uъ”.я№б+ŸэзЯ™ЧуЅЇп0шpsМ[M0вt}€ш9 юi4Ў‘.œ‰Ў.y@яЯYCТGLœ;kЦ>Q•TиЕ…xлЩџ)v6а§agБНcЫшUU­fƒ}мяM—?}ст%ЉT*‘H.^КьххЅzщЭ-џMKKW(ЉЉionљяГЯ,cЫ_~9цwпћ§xЉT*•J/]ОђьЪчи—ž‹^БљЭЗ/_Й*—Ы‹‹K^]П-wssНzѕњ>Лќiѕ^m~ыПышПЏe+^ъДŽПџ`BШ_'N566НЛc'GM''Ч1ЃG>ЙјёсУBoо,нЗџЇ3g/ˆDКъыš^lVcJѕ9 wОnwиВЎYzzщ“onљojjZћKom}њщЅŽšЯч?§д’Wз§чОћЦѓљќ.я№РО?ь?иккš——џЦІЗ :мяVŒ4] њ‚ckЄ gЦDЂKџ€о­ѓѓYй_ЪPmГVa‡ЋyЕE- ТЬ=­jлМУ&Ф№x|%E5œuœФЌюv—.y"nїž””TssѓЈ‘qŸ}Єzщ™хOЏ^ЛЎЌLфссёТѓбгўљŠ?vЬшИO?њјгИџlиФ0Ь№сУž}цiіЅ‰+БяьШЯ/psu}с…UlљКWжМВю?UUU УшsЖ~ЧЫАи­ІM{ЈЉЉщЅ—_ЉЈЌьпп3zх3›6oбЈЉџОєЙњjч{яОіŸыжopss^Йтьйѓ|kсё|}јњ Ъdљy…—._7w–жšКІз›е˜R}ŽТяEЏяˆЦЖЌk–fЯž)‘JзМВОЌLдППчsЋVА—uw{ЯŸћсЎO.˜7:МунmЏmxунэ;нм\Ÿ}fйлџеџpsМ[M0вt}€: ­гh\#]83&]њЯ~Ђєnњо7€–6жœјЌЙ$ЗNnрпъ="!чV‹аcжД‰<>ЏЅЅЙ0'б{аp‡ю .э‰pдКЫщ3чўˆџsчŽwi€ш€Aпћ№Ќьœ'­r”6ї7ГTЌ.œЛ"tё}xь(ŸGБЖЖ1Г а#TU‰wОџсW{vc*бН-g%„№ЌyжŽ„cПЦ‡њћcњzœСЁVVV}ИгггГˆ.ш…9+‹a›ц`7bj'Ну=ŽцЃЦ<шƒgшE9љ )ЈЦušpŒиfЮ €{‡‡)фЌШY Wk?Ÿѕќй“˜ 0нœ•§ыЌІ›Гўrфч^<ТКњњ'NДДД"„АЙy7цш Ув~k[™LzцєiЧюћkЗ=)gэпп­PI+Ќ,-ј›ЊRЄ›•ТАЩЋ•Ѕ…Mяž|€.ЫY e/!Mг4CˆRЩfЋE‘n<‚aўYjeh†а4нЛ' ЫrжоІiBC3 a(ŠbSЦюI[F•ИЊї :ЯY{ї5X MЈВCŠЂ(Šbк?п (B1џ,Еђx<†ЦpњхЌІ/ўј)BШєЉ“Œи–fhr{ЮЪ&н‘>†=9рŸœ•э[oѕў‡ŸB^Yѓ"оfpЏsжцц–ќ‚Ђšк:Й\.ьlmFFE˜є™іkžиџR!EўЩћ{yBЪn•jЬЮžѓиѕызUэhДљ/Š=ŸUГoмjjj/_ЙVzы–T*ГААpuщ7оBHyyХ_'Oздд {;ЛЇ–.fыы*7NзЖv—ђZЖŠЂ,,,м\]FŠђіъЏzѕfqЩ•Ћз++Ћ!nnЎЃGEљњ PпАЯ…ЯeKіђі!„ЈžЊj^Нz-###$$8==MX5кa:ЎЁўГЪJ(Ša3bFгJюAUV‰<,ZЭžѕˆ›ЋK}}У•ЋзйMŽџuВІЖю‰Хллл]КtUеŽЎrумIkЏЎ}ёЎY kWПPWWџлЧ:tdўмйxБ ыЯG~эящБт™Ѕ„_Хџ|фзЙЭTЅ­„В2QuuЕ‹KПЪЪ*6aэ’.edf;Z рып Ю€ОL&k]ѕ\LP`РgŸюrwwЏЈЈјПяОюљПњ2ЮТТТАѓYГsѓ•Jха!AŽŽ„[[›№АЁьKrЙ<§Fv•Иšтъвoш 333BШяёџў…-33ГрЏў„QyeNnОD"e_zxњdŽFўљ‰NB EFfNEЅ˜ІigјB ќ IDAT'ЧЁC‚­Ќ,9њЬе'УЖ@3 Ѕ™Г2OЕ–ŒˆŒќzяояэјъЋН#"#5ЊiiфŸSів+Š0Mўп_’ЫхS&ЯшящAщзЯљ‘‡ЇБ/ее7B,,Ь­…Т)“TmЂЋм8КZkkk;sіяММЉLFYїЪK„їоџXUaн+/БO;ОфтвoіЌ‡ьэU…;?јDUгh<ЯййiвФ ћКtљЊ7!фђ•k У<№Р}666„&ŒпЗџЇ+WЎєѕQmшщщ‘”œ:mъфЄфTOO‘ЈœtХIоž9ЙyCBƒеЃЗЕЕэдщsљJЅвлЫыЁ)эьlЛvzЈ}?ьїѕѕйМщuіщ€о›оиАaуц~8И|љS†хЌuu „g'ЇŽ›Єgd—WTFF c’”œF(>Lѕъ№Ё!ііvч/\ЩЪЮѕіђ$„мШШЂizь˜('G=awz##ЛLT1zdЄ™™ря‹W“SoŒE8“VŠŸЏZdэИдъ=РW§Љ@ yЪDЧ’bž_ЕъљчV­њѕиБ={>_Жьj7aдД†щdђйeПо^Ћyѕї,)Нѕэџ§рщс>p oDјp sŽrушjэФЩГ™YйcF;vŸЧSпdъC“† UuXНчГfЮ`hцзптOŸ>?їБ™ы_}yЧЮ!ы_}љЮн‘‡Л!ЄЂВŠ}ZY)&„ИЙКАOнм\е_eyьЗјЈЈШьœм™Ю8rєЗ.ЩY##ТЮ§}qшѕюОgЮ•WTЊ7˜—_H9јгЖL$*яњyш™JKoљјkќ4є№/-НE(ЪАkАЊЋkjkы\\œ кЯзLрFŽŒШЫ+ЈЊЊ.(МY%ЎžpџXƒ2“Љ=Ј~кWЮњя]QЛ2% (ъљчŸ[Лі•>xпа59еYLgз`ѕяяqѓfIiiйР>/YZZLИќ„ћЧ—о*лПџЇ[ЗDмхЦ1ЂЕŽ ЬїREE%!ФЭН§Џ‹ЙЙЙ–•‰ЊЊФю„Њ*1!ФннUуPFEEќyќфДЉ“ЛіжcaIЩЉћ"„Мєт*KKK|6шO@сQ<=k‡^Иt5§Ffdјp{{ћ‰$7/?2|8!ФЭеET^!WГi˜›Ћ‹zГ;98ŒŠŠljj>sюBkk[ЈO#юnnЗЪD……ХўькmЇщ%ЁЂТў_ЫUXМлŸѓ:$.ZKž|bё“O,жU­у& ЁиыА(ŠV-rOўїпЗЏьр_'NЯ|t†Л›[}}§ХЫWf>2ƒrшчЃЃGєpwуŠвЏŸ3л”Ўrушj-РpFfVbBђИБЃyЗЏ(kьN§iaсM6MїііbЫBЁhi‘икимщЗBежж:uŽЂЈћЦaл?nЬ?§|юм…Y3&„œ;wЂЈqcЧhФUи№aaУ‡qŒТ<Š744фвЅ+ъ њіЫШЬКv-ёОёудПШuс<єPМН ђ CCCд ђ йДA@б/ѓ#„GGћ їЭЩЩЛz=Љ­­ЭЬL`ooЧn6|E‘ф”tBˆЇЇ{иА!ъЭv||єзxіЉЕЕpHhў„ јХ%Зи_ГBfЯœЮбgšVR„R(ф„šтQХуiц“JЅ‚уЉž%…ЎnЊЧхЂRBУšІ†ao)`ffNгJюЩїєt_ід“_И|јчЃRЉЬТммЭЭ•н„VвG9жв"сѓљоо§Ї>4‰Лм8КZ›>mВ™™ 1)хтЅ+„зџѓŠжc­ёєш/ПB\\њM}h"[>nьЈ+WЏЗGЃ#ьиЙ‹ŸХ‹цћ hПU™п п?vст•н_|Mqww[№јcƒњъъ!wЁa9+ВА0 Оz-Aерєi“ЭЭЭввn\Йк~Џ vд]8=д”)“ОпЗћЛ[е ПпЗъC“y<ŠšЛ$fѓкЅНxќIЩ)<:ЋU&S[aЅx<оНя Mгьщь ­––Пћ%"<Ќ/DсЖwvB6nxoHаJ*•.}ъ™аg—?эсс^^^ёѕоoГВsОћі+ƒяuеБ9ЂъR!ЧуёКeШ<ІišІля vџ€>Јt …пь§rїч{žYБЊККЦХЅп”Щ“і~§{›Гђzѓ0„"_ `ўЙзъžœЕ=Eхёк{B(ТєіЩџЧІЏс­мььь^[џъkыЕќbV@Д]дпЛRVvm•bO%eOИѓГ@г №ТP<ЊН?Н}ђК{ VoN›„V–ЅЅХ||AїŽTu'SšІ•ДВДДXheйЛ' Ыrжо}–aDDXBBaˆ‡ЇЇ™™YЇЗDНл(Š’Ыхх"QFЦ#"‚zхЌНоˆ ‰ЩWЏ]3.йиX#a0 gэ WsG!Aшб9ы…ѓЇ1`в9ыВ/a"РdсЂu@Ю €œГ gаM€)nЇђѓDЫ#§='№ЧќrVш~‰yЂѕ‹&t,пБџrVИ7pntŽІ•ь?ŠЂhZIгŠжV ї&……ѕББ—УТО!„Py§u"’ˆ’—з^!,ь›ииЫ……ѕ˜^@Ю ]@оЎ"—ЗI$M Еb­5EЂц]ЛЦћ~еЊуЮЮV'N,`ЫmlH]™7ФФДз­Ћ#іі$0ќјc{6—фЌ] впѓhšˆsBЬ5Ъѕla!dћvЬ%rV€Лcвм‡Œ€{]€ЉУ:+єB …ђ‹ŸŽŸК’ж&W`6Г˜Ђ/Ÿˆџ; ѓаkрмш…N\JС$ g0i8%9+@пђ[мwоШДё‡>\п%M™цБџюй|bШY ы-~јўѕяџп#Яoе'вš єкЙn™zvЅўЯИ^ЉкьИSƒк|фљ­К†fhпдЛФ=!юЋЛ† џш@И р^pДГ)МUy'-Ь™4ъшЉ+lvЅЪЅ:M‚ѕlгt˜`—0:фЌї”{?ЧћG„мђbь—ьт™’ІKDтЏ>™’]ФБс#Ђžxф~BHмјП3U)уЁП.=ђ@”ЈЊіЅи/ !Nі6k–ЮъяSUл№ёїПнШ/!jЋtьуВL7g‡AоюлїўЬQЧЫЭљеeГ}ћЛн,ЋмљЭб[•5ьN:~qжФQЅелПўYTUkP›#ъtъ8Ўu>ѕьв'ЏЏј;)ѓ|BfEu>Sw‡ 5иДŽNk rV€оУЩоіОШёСВжЖK)йo|МO•№xTЏзVЬ]КaG 6BЫe?ŽъПlЮ$eЩ>\>gћјљ…гO^I{{їСЁО/=љШЪЗтT;R_эјЛfеKьƒŽyэЬЃ~?—@г G'Ÿ[8§RJіКпЮ2іЙ…г7~є=[.mm[№Ъ{s&^Е`кцO~рhГcЧ8FдщдuxЇѓйБK'фї ŠY<УмLp19ћяФЬк†&г‚ж`г::­}рrV€оь›m/цо}єПcьв#kDшрgцNюяъЬуQ гI GO]‘ЕЩ/&g­_ў˜zљС?/HemŸэgŸ†=<,›MсhгаЅVЁЅХИˆрчоўœЛZ№ Џm_ќ$W(НіјДqџІqgф хogLЯнІzЧифOыˆ š:§чSЯa64KтџNŠџ;ЩЫЭyѕ’G—?6iVLЌ‰ AWАiж>p9+@oЖlу'їG†Ќ^ђЈДЕэbrіх”ь†fЩъ%~vры7ђЭќŸ>XЯн‚ЌMNЁi†Ђn+—ЪкдŸв4ѓиЫяvzЇ-ŽuV­~!)Kc_F дzЏg›ZGФ1uEєIЕЮЇž]ВЗŽ dia~!)+іЫУІ3]СІutZћ`шшГєЕ MGO_=zњЊ‡‹у§‘Ё[_zтХи/ЭЬ M3сєћКjG)йEO>њРПŸ—Еr%^­ГђxдУїGЊ~бЯ!ЇЈьс ‘GO]}dBTvб-UљУ"œКњ№„ШЬ‚RCлд:"]SWлашыЅОk6ЗрсRR.юЊan}щ‰ IYqтЫХuF”Л1Ž`г::­}0ttШYzЁrqнС?/ќѓ!фыУ'6­zœЂЈ}ПыЊі?лџGЬт?ьXkn& Ц^nЅa\xpAiEUmCЇ5уФПКlі“>pГЌjч7GUх6BЋп_WVYѓюW‡ mSыˆtMнwПžнњвbK sѕџrњъЎџ::€ОƒšЛ$цаwŸ`" 7щ’dбDьќџію>,Њ2oрјo„1E@“LСЎЧ-“IQPДUQГнVєzžеVD“]CгtЩФ—04’ЎЬХжrФЕЫEDлд” R_K|!CIu€yўЧ”З§~..Џsпч>ї9ѓ;ї?яЙЯ™љASіхўћђу^уѓC ьг?fлО‰ažАpo­од*њl{—ФЇ‹Тя`Ё R*љЯX•Ж4х gк”ž§m гQhƒfMkmm•~4ЛЮзNrV …†ЕЕеЌЩcgMK(hXrV ‰™ќ g,BFЦe/ЏO‰фЌ€хZЛ6sю\т9+` nFEeЈеоЪ~ёт­'ЎM˜ 2д,Z$66ЂVK~~UZН)**Ѓ р&€œh*EEЗcbВМН?›93ЭСЁczњdУЎииЌйГн­ЌЌcЕГ“’™4IТТЊjвг';8t Mѓёй“UTt›аz)&N KйМž@Рв(•Ћ==ЧЉT]ыKKяЙЙ§эьй;ЛіUƒX!Зn‰НН”–ŠЃЃ”•=дOnюррдЬЬ+Zэ|Ђ @+ХћYaЁ g%% кckЋ Pљћ;wыf#"7ž t1$ЌЕ).ОГsg~JJ^Y™60P•’2žаzБ6ЪЩЩ6<мушб) cŠ‹5~~I"RQЁ‹?>gЮ`“ЦёёrџОФЧ‹OUŸ_RqБ&!aЬ‘#SТУ=œœl )­ѓЌАtН{w‰ˆ№Šˆ№‘;ђмн{єъeoвІЄD:w•J’“ЋjВГƒфЌ@ XЗ.sнК‘&•:ˆHt4с€œАS џXЯ rV€œфЌ9+@ЮŠЖAЁ ГmNFЦe/ЏO‰фЌ€хZЛ6sю\т9+а„ nFEeЈе›єХE‹ФЦFдjЩЯаFЁ… ХжV|ши‹o8qmТ•ОЈVoŠŠЪ((ИIT gAQбэ˜˜,oяЯfЮLspш˜ž>Y_og'%%2i’„…™rхŠ њPMllжьйюVVUЫ`гг';8t Mѓёй“UTt›8а*(&N KйМž@Рв(•Ћ==ЧЉT]ŒW…мК%ііRZ*ŽŽRVі ОДTььъЁДєž›лпЮž БГkoвynюррдЬЬ+Zэ|B €хГ&АL……Г’’ЮэБЕUЈќ§ЛuГ1го$a‘OК'ЌХХwvюЬOIЩ++гЊRRЦgZжРB99醇{=:%!aLqБЦЯ/I_/їяK|Мјј˜;МЂB|ЮœСЦ•~~IХХš„„1GŽL їprВ%ЮД ЬГТвѕюн%"Т+"ТK_,)‘ЮEЅ’фdsGэи‘чюоЃW/{уЪьь т 9+аДt:‘шшšы­[—ЙnнH"9+`Й22Ік жГ€œ g9+@Ю ГЂ-йАAььDЁh™Г+бмšяКBѓ9uўЛѓEеыѕq9ИO§ћ‰Œ”У‡E­~МŒГ†—Й>юэFO‚uКЗѕџжqщП^Су9+Pп/Z№пУЋзЏкz№‘rжЋW3amнєЉjKM/аЂX€fUYYЁџS(••••хїюн1HAСЭЈЈ Еz“>aгO”ъ7єђђd№`iп^–ММЊJ…BттФСAžzJ’’ФpЌё†;vфѕэЛQЉ\­PDшЗMж*Šh+ЋUњЪ+WnєФ8;o8tш‡fВZН)**Ѓ р&у аf0ЯŠfЅеjEDD'"Zэ}­ію_JklYTt;99wћімN”'Њвг'‹бTЃёзуoМ!&Ш‘#Вj•Мё†8PU_R"—.ЩюнВ`Lž\ѓБЕ NнН;`шP'kыџЏгљn’Гъ+oма Вyіlw}хЌYћІMыїХ_}u)$$5?FѕUА†Џђѕ§Э~ѕУгг'џѓŸљЁЁiMy@€jв$''[Ц USLœ–Вy=@3XЕѕр_ќuьhg\љcaо–#WЊЏP*W{z:&&ŽSЉКšŽк‡ѓNЙzUьэхж-qt”;wЊкмО-:IE…(•RYYѓБЕ9rлO?н9ђ7žžŽЏОњМвшьІ+GяпЏ5j[џўOЧХљщk:uZ{чŽжpЦЪЪGЯGkYНњH rssoЇff^бjч3ќ­ѓЌhV•ПќRRQQоЅKЗŸo\НWsяyn?5 НIDATnY- g%% кckЋ Pљћ;wыfScЫкRИNDDЌЌчЁЋН{'эпёјёk}”wќыЏџhІёŒ{ээŸˆeЈБВRh4ѓ:tА6ЮtЋ]ЖйDЖOŠпйЙ3?%%ЏЌLЈJIЯРГрvщЯїяо-/П/"%ХWЪЫЕ•к[:9醇{„‡{|џ§ЭmлЮљљ%egеиrшP‰—№p‰‹“ЁCыИ€NфЬqsЋЃ™Rйюх—Ÿ=њYŸgЦпaІхЪ•ЇN:є?VVЩŽѕ›Х‹/YтmkлО^jЃђѓK tIHгЛw† m`mšЯЊ­Чїяxчі­r­VПЄUЇгЕkguр’MяЈuд>ќѕјЙsђЧ?JNŽєы'[ЖˆЋЋiуэwо‘ЕkEЃЉcS?-jeЅшгЇkTд0gЉe%kЛvбЦ]щ+Џ_П3cЦо}ћў­б”7ZТjђрX_й9+PЃЦz?+јOУк4Ÿ‘ƒћ›€ЧРћY@Ю Г€œ gШY@Ю д-#уВ—зЇФГТr­]›9wЎqфЌА7ЃЂ2дъM†š‹o8qmТ•ОЈPШ_џ*66тю.чЯWЕQЋ7EEeм$€€œMЅЈшvLL–Зїg3gІ98tLOŸlи›5{ЖЛ••ТPck+%% aaU5щщ“:††Іљјl‰‰Щ**КMH€ˆ(&N KйМž@ Q(•Ћ==ЧЉT]ыKKяЙЙ§эьй;ЛіU#O!Зn‰НН”–ŠЃЃ”•=дOnюррдЬЬ+Zэ|Ђ Ќ QaсЌЄЄsAA{lm•*чnнlDdуЦS.†„е„NWЕQ\|gчЮќ””МВ2m` *%eўјœ9ƒMЧЧЫ§ћ/ооU5~~IХХš„„1GŽL їprВ%Є@˜gEщнЛKD„WD„—ˆьи‘чюоЃW/{“6%%вЙГЈT’œ\U“Dш@uЬГЂЩ­[Wѓ+ЎЂЃEЃ‘ьlqv&HРцYбф22ІаЬГЂežЛ g@Ћg-"kV-'АшœUDц-X\Ÿж …тыC§"М}Gp'Z‘&J ыШYEфћѓgЭ7=єеўІЛŽ:Я бЄiaXЯ KWkЮZYYЙтНї ,4gН{їюŸџўЩп?%@hq5ќІ@IIЩыЁГГГOX‚цYчЭ_HТ ‹ЮYзЌ~_­@h`Й9ы“O>љйцФ1ЃGXhЮ*":tXџсКišJ€`Ё9ЋˆДkзюˆ…–›ГтСЛЎšџ7ИŒЕьйа rV…Bб‚бВg€…SLœЖ-q-€ХВ‘O7%XtЮ*"С3ц XtЮ*"ђЯX оurV IsжчнˆQS АMCb Р\ЮZџŒЁ‰ТЉS9cЧНZ[MM№чнjћkј­Б“FЩ/ЕF­ŽЕ…\ЧžдД—ЧŽЎ­ˆц‰џœП„'‚ђXžоJF­NнѓЌЛОи§ъј€О/ TЛ ћѓ›?џќГќ:Гe2gВНu[ђHП—]њјэШ1;ўљЙљГьMK;vŒIёЪ•ЋC_fЈœїжлњ Nч9дїъеk o`rл’ЖПєлб.}Œє{yыЖdу М­л’GŒывwРˆQc“’SŒ2ГЫL‡ќыї˜шъІіђўСšu5†шРŸ0Љя ‡Н4jћіuž§ygЗ-џHziФW7ѕ+Пѓ?qтфчŸя5zœЋ›кТЄ‹……fт_™Г˜Пбuі\g~јсЧ3gїW{є}a`pHш?U}ье6hу’@ЋЩY7lиИфнˆ“'О§rЊ­­mФ;‘ђы‹Б.фŸ1ќѕ~М1qнšU9'ГbжЎŽћп„}щћMђ ƒгgЮЖW*UЮ}LŠŽŽ=:wюœ—^ŸИьњbїѕыХ"rњє™'ŸьвЃGї†70ОŒн{R?aьšœ“YыжЌњxcЂaзчŸяњшЃk?ˆЮ9™Еfut|ТGЛОи]ч.3>|фУИјї–GцœЬJйО5?џBLl\ѕfџњър’ЅЫчПž}ќиЇO<–™UчйE$mпОЯ6'f?6zєШщЏ‡юќ|з'‰g?івKУ"#WдџъЬŸХЬЎГчњФсѕа7‚ІНіэ7‡ezюйgЃVFXуиЋqа>ъ%€ж”ГЦЦЌф>PЉTvэкuбТ_9ZЯЎ—EОлП?ЅR9`@џЅKoј8БЖ–ЉЉ{Ч}Wk\єѕё>z4CDіјRЁPкA[ч‡­2g}юЙg л;лk4šzv=xАЛaлУcpnnžЁhВProZКёњBуЂЏЏїбЃпˆHњў/'Nєџър!љърaŸЋAn^ўрAЎyбіљѓ†xzŠC<=ѕsЗцw™щарє™3№TЙіwvщзGѕТа‡Wovю\Ўза!еыЭœ]DžyІЇ~ЃSЇN&EцnmёдØЙб&=OЬO‹šУЅK?„Ь˜ЅvђМГ[ПƒJJJ2hыќАРе§ –BЁhв+8w.WD\]]j,zzxМ5бO?нШЩ9§aЬšqП_\ќS^^žЇ‡Gc5hY••КoПљКsgћ–К“€ЗTЯfт>oС№a>яG­шкѕINЇrэџиƒЖщ>,hRџЎ+…BЁгщj+ŠHVжƒoŸ33Г\\T5іcў;vpuUХФЎф>№ЉЇ\]TБЦ ЈюиБCc50pQ9wќ„ЁhќэyŸ>Яћm–ЁјmfІaAЄ™]f:4xС­яў_жmWW—oŽЋ^oцьѕTŸ‡шЭŸЅЖ§Hч›‰УщгgBg„<ѕ”CЛvэrrNз9јa@›ЪYЛwњиБLCЎ`R‘%K—Ÿ:•S^^~ђфЉ%K—‡L2ь2^ؘі№CмiеžщіёіNоОcјp_>м7yћ_ŸFl`Șрр?-‰\žsњLyyyЮщ3‘K<Ѕ4эЕ)K–.?yђTеЧ‰\1mкkuю2гЁСœ9a+п_Н{OЊFЃбh4G3О ™1ЋњЕЭ }§н%Ы2О9Іеj /НЕ`QgЏЇДz.іУИј…‹ыtКњ‡LŸV§кFŒxI[^Еrе… пwњщйГgжyіњЈ1ре™?K7Кž=з'ЋояэEяМ§AїюO‡LZЖ<ЪќPlр‡H1qjи+О}‚gЬi”Шдѓє1БыянЛџі‚y5[ж™3gчЮ[Жїџ,ЖУ†kxРkЛбu+-і’@=ЕkйгЇюнgМОаЄиќцЭ_xБААЂЂтмЙмˆХ‘-­У&ЋшЙ-]ЈЇўэжДд/Ь›Ÿї‹^гCfўјух=КL˜ќ'KыАIуп*znK—ъЉЉжЅ!9+@Ю €6­ъЌzўŒаќЇ†XВџi]4Ы\э’CIENDЎB`‚Scrapy-0.14.4/docs/topics/_images/scrapy_architecture.odg0000600000016101777760000004630511754531743023546 0ustar buildbotnogroupPKBДВ>Ÿ.Ф++mimetypeapplication/vnd.oasis.opendocument.graphicsPKBДВ>ЯЃ€я[[-Pictures/10000000000000200000002000309F1C.png‰PNG  IHDR DЄŠЦPLTE€€€€€€€€€€€€РРРџџџџџџџџџџџџ3f™Ьџ333f3™3Ь3џ3f3fff™fЬfџf™3™f™™™Ь™џ™Ь3ЬfЬ™ЬЬЬџЬџ3џfџ™џЬџџџ333f3™3Ь3џ333333f33™33Ь33џ33f33f3ff3™f3Ьf3џf3™33™3f™3™™3Ь™3џ™3Ь33Ь3fЬ3™Ь3ЬЬ3џЬ3џ33џ3fџ3™џ3Ьџ3џџ3f3fff™fЬfџf3f33ff3f™3fЬ3fџ3fff3fffff™ffЬffџff™f3™ff™f™™fЬ™fџ™fЬf3ЬffЬf™ЬfЬЬfџЬfџf3џffџf™џfЬџfџџf™3™f™™™Ь™џ™3™33™f3™™3™Ь3™џ3™f™3f™ff™™f™Ьf™џf™™™3™™f™™™™™Ь™™џ™™Ь™3Ь™fЬ™™Ь™ЬЬ™џЬ™џ™3џ™fџ™™џ™Ьџ™џџ™Ь3ЬfЬ™ЬЬЬџЬ3Ь33Ьf3Ь™3ЬЬ3Ьџ3ЬfЬ3fЬffЬ™fЬЬfЬџfЬ™Ь3™Ьf™Ь™™ЬЬ™Ьџ™ЬЬЬ3ЬЬfЬЬ™ЬЬЬЬЬџЬЬџЬ3џЬfџЬ™џЬЬџЬџџЬџ3џfџ™џЬџџџ3џ33џf3џ™3џЬ3џџ3џfџ3fџffџ™fџЬfџџfџ™џ3™џf™џ™™џЬ™џџ™џЬџ3ЬџfЬџ™ЬџЬЬџџЬџџџ3џџfџџ™џџЬџџџџџИџИвOIDATxœcр'FŒ*U0RЫU<4р№IENDЎB`‚PKBДВ> content.xmlэ]щ’у8rўяЇ`h=Ž™u"РЛмGxclяFt;&Ж{cџщ`‘ФŠЄIЊŽy‰§Йяч'q&РI‰UЊCUХю(QB&ЎЬ™ €п}МйЦЪЫ‹(Mо/ˆІ/–i%ыї‹ŸПўЛъ.>~ј‡wщjь"Lƒн–%ЅЄI Wr'Х… О_ьђф"ѕ‹ЈИHќ-+.Ър"ЭXRчКЙ/x]"Ѕ(oуЩй9ГœЛd7хдЬШлЩы_NЏ™3ЫЙУмПžšyAЈrіU:5ѓMЋЋЄОЭќ2кkХM%ПН_lЪ2ЛX.ЏЏЏЕkCKѓѕ’xžЗфдІСAУ—эђ˜s…С’Х ++–D#ЫšwЫJjћWnRВл^В|Вhќвяi5ЫY,а]цД‚ф<|]­'Ѓыj="ц`уч“qЦ™ЛP1ТщP1B9яж/7#њu—ŸШ?>jq•oЇж…МQy”MюІр–ѓЇiк43ˆСЮ›Kuн\ŠпїѕAіы<*Y.Бй?‰Їл!ЁY‡ЪЎђЭ BA#шRц"-њ—ЯŸОЖѕ[цш8Г%Eщ'­dŠmOж№Ž€жOЂЩP@ооаЩ ЃЗ–9ЫвМlДšю кШhSnуq†дšu‡с +4ЧX‚9cЂ^Eьњ‹Žw: Lo˜мдЫТ™šAБИс o%v“Б<Тоћ1‚Xн pvš]HЙЛО!поL+Сœ†Ћ§їvPF9$ЗЏ]"MEзЦПЊI @K\шeю'–[хЊ,аlЁqЋиT^Ј(ќ ".—а>š‡+ ~,>дб„АХВI№weŠ…*wХ‡wТ-№OE|GМ_„YT +р IТСЊ™Пf‹хxжuоЫКЮ§lurцчу№ЊШ„ƒ3єѓpQ—[eQ38ЫЫˆ 6ъЫгп C’& №&U ŒЯ8јџ[юUЧ5o“аАЎјП…ВJ/ЖQЂnXДо”уVДlл€X сЂЎѓєКЭЪ vЩ'HІb‰ЃЂЫ№‰LD‰њ8ісѓеПб^ЎЃНОš> Ѓ­ŸџЦrhP›в4ъyXbŸ#`h>jvРxя— IЫDњ^‰R'вЬЂЄMt§ЛEG$­В_гTЇЛ$\LйjeлuA &'  [ЕУm p0ЕЙA9a„кjћъ'ЊТЉј–ЂdЕK@ОђрU7~lЊўŒп1;щs№ЖwЅЫЉрrз5pў•[?cr C’ЊвЊL§Тdфьгъ"`Х1 œљ$Ў§,9[ЉhЄTС5dИ=Т№~!0Qђ(F"VЋ‚(6ь;ЅѓOk4іЉ•Л +U бяl€“тtз"эЖг•y•&ыЈ2я{I™т$ЪЭpŒjю.гВФwˆГUЩGІщэQђЊ-щŒ Iсг“TХпѕXкј!аA‚,щ$Vу а нjаж%пŽыaщъј/kє|ЮоЇЕЛьІЊa”Г@аБѓ=К0reGaЋМOšaЉ‹оo‹}hђO_u§ЯКўYзП ”ЯВBhј.Ъœ]дьЂf5ЛЈйEЭ.ъ<]”5ЛЈйEЭ.jvQГ‹š]дyК({vQГ‹š]дьЂf5ЛЈѓtQЮьЂf5ЛЈйEЭ.jvQчщЂмйEЭ.jvQГ‹š]дьЂЮгEyЯщЂ:їrЮ^ы>^ыиЭАГгzЫNыЮоэзS§uРэг?ДOhн5{Ё—р…ˆў8Змј<јџ;ш*LЫ2ыƒє#г fуџ'C†œcЙ­r†NЦЦ\E[wAФSн4{Гй›ЭоlіfГ7›НйƒxГS„z9‹Ь~зŒїgИV|Ъs/GƒН­E3˜З NЙџі…Њ>Єиы ЋўїЕЅ—П‚ЛŽЪMК+QЈxgyZь †я,xПјз~§;Є[ТгЎ?Ю}w9Іё‘nщL'ђ`Ъc*БгШP+ѓнhф,hН§|У­д^€{!пу>Œ—?юvухыeїИЭтy`Xє^ѓ=­ŽŸѓ6zЪНgpМЏЎйЦƒzЪ†лK@ВйCђSMіžЪ?ѕ<Јбчjj”жЄШjуыDљ0U5ЈY<т‰yw­ЅЂFIШ’ђŽЮєЇž%}ВŠТ9Ѕ[ю*EœѓU6bfeгР:Yѕ‹ШOЦˆxˆ~Ьn*ђtAєLЮtA№>tб:НооИŸX/_Jb ž_ЏŠwu№їНШk“еSЅŠгMkЦJ•ŒсG{=_Ž–Р кIщ1™/‰B–ŠњqЖё“ЗK‚r'–Ъp LF„ЪmЗd -—` ~C[˜GA|чŽкmŠ+wqЎ–—4Kџ’/ь­Ž†<P‘К+аž&И8ЩЁWPєJ!жРVЖЛ -‰ЗОм@ЗоГој$ЉтДˆФ"Іўвˆ›WYухŸ|`њ—O €‹Jљт'…HьŒСЏЎYP 0|мњI‡#‹Ъlѕ•ŸGтЕЈoV–шpё•рЛbAђ“ѕЮ_CГq%јIMњљKлЬІgM>оБz!ЙзcˆiYЮeг‘I]мuНКМСœBэИ%ЛtWtUr5…ЋЃ1ЮJ8ры.Y§ВЬ+ƒ%”ђ#ћеџЏн4-е9‹лЂdлОВjzЋВЊa•jjњя775ЉRRM‘хпъЋЅЪR–p€ЁъAc‡ябы&яhПŽбž7}oh#Н—шc§aсиa[0bТѕр]@ЬТjЫ)›Г8b+ э›лlцНllЧt—влэ~Ÿў0Nу•y‡гзлиyББР+SыsњН|^h ppвєvЃ„БщтD4ОЈЗE§x &Žглнл_}юvпзЃLяro_ё<ЛќlK;§'Й_l\ѕ–жX^_,хЮБдЋŠЅм9–ЖЯC‚yУБTџрˆ3^‘y.цD…оwлlŽ(žMнћ{іs1s1Џ&Ш8с†'^и3Хwшу}я–ИоcyЪ:=ДЮ1Hl!NяЖЮё4їRЕњЖщŒю.Ш'*AєAпq\Іq8.ЩоŠ‘l№ъ"G оpйƒ‹УCcМќ†…з0Y—_{3”этм–жОітрƒЭЉѓ6к•НјЈlоF;n)_ET6нітЏmсєš{Qб1g1Я…gЋћ­ю<~#sсщЖЏ7ч™jћцЩрŒ„ЏPЗ‡u„ЭtЮ)Б‹'„Т%нсlцj}QЉжа Ч­)h_лeиЭ‰V8"šeзПёt@Њ>а_ЕŒW|™оTаЭ*€Щ-i&4ЛЄOџ є/YВМR-ВеjЮN+ї3?БркЯY1\јrЏЫVмUAi A\…Ъ5@G5`е„йЮОъs-р;ešU)@tЂЦ K6~АP]ГtЫЪќ–чИŠиѕŸRtEЄёФ5Э:A4ЂJсŸU ЇŠЬ$ЫЃ€Z ŠRСVж|ЗЎ/Цq”5MlD’љ(юŸы:zŸP*Дђ”џфіЇЇїѓ…ѕЬ 3а,{ž1Ca†BСіш … "X#3f( toFТг"aй™Ф­'BУ…†yДsК/ Ks Ўц<ДЛЕпЧF‹›7‹ŒI@0–эЭH8$XjЛ3Ю іc#Сд<{і/ Юу#СЖэ чїё‘`ижŒ„чGТш>7 ћ(ŒњМщƒћ@†цЩnСг<ЯЛг6udЛ†.>ќ˜^'qъп}+hBйЇmugrЧ$бGеQU|S]’ЃЪБ4НЗGghЎЌЗ~сCЅG3‰ДCзŽ’ЖfЗЩЌRMЗфмреЛFѓ{M~ЊQc$їЈЖœ#к2ОЙŸноЪ§Зd%lИмЕaw<ойˆumƒTЁH0LZ#ўAlZv SљлДy™Хžaњм˜ЁO]c(5`п`ъЖШјцЕз%ЧM˜ЅyЄчХЈцКЖlТlЭudЈSсцN@Y|ј >ž›Ађ4LЙЎЉXд~< e'iЂfyДЪшjФ}VЯT У•s7izpЕСХ%T1C\=гMj+Žc+&а№Іk)Жg(–n(ЖсСеSlнуП-ЇКZ–bzŽb„ѓ[ЄJз |я6аРїAїгЄт7qкƒќДѓXПGpXМ\У0Х•шœкџMЉЈРŸi;0рЗi*Ф:+єzјJјКlъа“їžš\ Ш‚Š­PDƒ4 Н†тСИ …ПўесWл2†jGq\уМ@˜ˆC‹Š9@&Д{fAgАq Е&RФш)$ЅPЯ•.†n\@6в…RЬn€-Ћхb™†KБ.eТ…n:B .C{ј‚œ6ˆZьй рu TEeSќnлЪаіxЏhіŸBЯ]\нюИљъv S\1XуWЛ{ѕn+.|'ІЋИь ],JGѕђп 3ЄSO№ ЕzœhЫЦзѓŽ-Г&„c­woЃ1ЋГИ/э<ЗLrд’§Ѕdлi^єЇ(cqы'ŸЭ7 [†яѕ“uјіъ+PФјj5™rWтШW$<šЄЋ Ыt[@rь~AГОqd‚јКUЛ7УM“ЊLѓA/Bl^K=AПRщ‘№eSZZ"ЈZ№ЪЊ)А8ZЛqK9mпs%^Х;!ЯЈEr;ЙтЯ%TЂ [v*%ЪЬ†Ј|С–аBХУXBCˆAхсОьJыЉЅЇk5eщеsџVzШтЪт3ъYанФg‰Я•ФЇ{Э™ЭЋЬPЯK|К#‹Џq­]љ™Вќб( 9*@}H€dD€vG€0 4 [еpм)"œЭърб\оH3f$=ьЩШнa\й ё]:ОЁ…‹ЕИЦЃXийЊh9 (ІоЈл(‰ЖЛmkTїщў ‹‘ж˜Щžс8%Ьпvk&…Т†ПЃoФ†кКе™~8КМ;iљђ ŠrЯ&t6ЁГ Mш›а' фЛ; C“HK6Ае$’иwŸYЩ‘‚x9Qf&mяBљRЅžЉЋч4‡є4з В№ърМuYИ8‰5*=:*НС)="=yщA§Š ЭКR г ЧЅ‡ž`Ppу7Jу7Ы ›6їнДЁЇГЭhjђTЧ”–ѓ;юŠВƒ8NлOHtХp=ZS‰й~ыї{yьЮ:~/9ОA5‡іVуtЭеїzьщђТЈ-nZxИG„­Х‡П‚љdEyячxЧAЧwЁOF„(,jжЃšв/™ђН Ёeиžgк6Е-ы…3ХœHp7ЧДLКžeсДY!$NtгезqШі‡‰€RL`чЮ T у8Ј,9ќOАИегЎ Ћ"K“тўЯ‡тj|G№бpehКс“Ид6=‡tqхhЖiX5Љmшžk"Ў№f_м’nЙФФeїЩИЂ.Л‡+Ѓ§іИ"šу <еХtй3VИзzoH‹чРfуЄaчє&™ Wždкђ2ŸМi97[ŽЬ |ЦЗЬСoxРH­NI ­u(тHHГ‡sl\‹љЕt> &ё“G–sIrьЖŒ‘’”p§№џPK.oчЙvцPKBДВ> styles.xmlэ\ЭrуИОч)TмкTr€ј#yl)уйЪfj“УLеVМ›Ћ "A 3$СAЫš—Ш1я—'Iу%RІМžБзЁІj\D7Шю§ƒnA|ћУ}žMюЏ(+ЎНpxRФ,ЁХњкћѕ—Ÿа•їУЛ?МeiJcВLX\чЄЈЛŒT˜\TKMМіj^,ЎhЕ,pNЊЅˆ—Ќ$…ДtЙ—ъQzDнlшtХьЮф^ ,y[sёjј“Г;;сx;tВфLнщ):љОЪPЪPЬђ z Х}F‹ЯзоFˆrщћлэvКM_ћсbБ№Е8nјЪšgŠ+‰}’љАЪЇЁoys"№Pљ$Џ+RQч+ТCƒ>Zе’“ X@]i—УnфЮiйзнzАuн­{`Ž7˜Ж3Хм6•Y2мTf‰;7ЧbгГОWўG Њџ>~илЯ‡>KђЖ Š9-ЋЉЙнљŒБFT9A;Л7 ‚ЙЏЏюэIі-Ї‚p‡=>Щу,ngyhРњРШ4љFяœfƒЕо#С Нф=2U.Сяе№ТчЄd\4€ЄУƒ.<%jBЦFфYШTЫКцIвЩ тЬ|рМшŽ’эw^+œ6„Х!ЈањаХдИcUЭD—\Пќг—4$C=3“mœ ((Ѕ–‚уЂ’`‚ŒЙ WMЂHJЁnЊяё1ђ! Ÿ%iФ“t о;›uN|їV†zР '№ŸЈ+Й4зопЭиmм^zš’аЊЬ№Е9&–lR#q‚y3С 0cbП дЧ`нBЊ>­9ДЄЈЈиA№Н3Љ›‚‹Е|КНћŠёD†u ћFЭ ёЦеёr@*v*ЈШ“А­]EхsЬXЇ^p ЙЧІAчf˜3› ‚FЂ”fЯZWЌЉШqIњ3 R4•2—NРЛ~ІБЈ!­јaр~"чя,XќўmZk;UьJЅU^*{UcеF†:y1БCnpВтУIЃAŽљgТ]щџЪ9лzHNKщj?В{аxLЂ`2 є8d—<„1ЪБM|iюW Ю>”рЊЕRясZтuт!Љ“ЈНNœФТ23Q…`+ю%ЪHБ–ЉЊЕF’Е9ЃЮУE IЄ ЗбеэцЗбтЖW’Lў<~œJстђћ#1УшRy‚оМ$$ХufЖЦ=–bШрS Ъ =Ы[bŽе*9.(lІ5IЦ-„kСЊЫ‡а„0ЭŠГrƒ=УVж…2А~Дђоо4]F ДтУж№ЃRMM‘9х,‘‘…#БВЄ”2H$D&ЙЭWw‘‚Јr ХYE ƒЎЌЌП_­†]ъuЄm]ћт„m‘zИё~Сkа$eZ"‹тqЩЊП| АЗTŠOnЇЉmЉ ' 5)€Ж œхИhq”таЕAuVБЯЊш@%š—Bekˆ%рК…ˆY]ТќzгѕHHИА’О'Ÿ№ПъЩ Є™…Д3Ћ]%H~,ЋЅя%v8Єа–A‹nŒРHoi_юя-Щшa)+H—tr›“‘ћGiдЬэеЉсшзЊaщдЋЁvhжа”n{#mљЉЕNзiuЬNž`žxИВЙvMл Zp–бФ›ВVќ•маЖ4QQFEgИ3+ъDtnЗЇГ& Жш1јГLок‰[ rа?]э):&ЫDлVOЅ^+щbЧЭіCQь–ѕя†ŸкS žэФ>НЃоЬю№Иpв6„Ў7ТСЕKœШЖ‚˜&Ѓ‹ТŠ !ЋŠ.ZFRyЋщ›‡ŠЖƒ NTхC“DЦgСfВ"нЋЕчXђЎ‡lбП ф?0RеёШ eЁ&юВePeš­ъ,#bЂ‰r\ЅGuЉIHКзоџѓяЦœ›ь]ТяЫ‡!ўlѕf—ЏXц“!tдž_ЈtьŸRц UЃGЉЊчЈфVЪ ЕŸy#JQrZ  Џ`Nуroє^чї‡аьЩ‚ЪъU"4B„Ў^%BO†P4ПJ„о<BГW‰ЯхгсѓJуєе“!4Ѕqzё„НЮ8OбХя+P;dS3њ}ЅпЩў<JЊЕTX ІR0ƒМ]˜Q]ŒДЧš:Ф Ћ.­ftћFrTѕq4k/Іps&ВZШ6Х CR‚ˆ gѕzƒL‡Mwnх i)<Ђ“^™NŽГ2ёg{”l=цvAЁјИn‚uiГ5˜лћ? ŠeЕsЁкf.‚/ЋWЄСq‰Ъ№›žЋ•Ѓ№ †з6ъQбiѕ)йУЂь‚фхk;ъœq’QљеСAлщv[}"БиRaU7ћ;ЛNЖћ‹ys ѓg5І:zOг№тміSѓХ‰ЦдхщЦ”v““}ЉG Љ]ћkТibЧ­шЊљЋЋ™sВ•ѓ@#чšCœ–ЭЎЏnI*ъbТJfЯV{№<'Оѕ ,_Льo’rХ’нsJzzsdRп}gkѕЉЎРnHoпћлiзЗ‰впet 'ЅЅщю|5ѕЦшй”<н˜=ђЋЗgћ{–/i>6nЧЦэиИ}1лБq;6nЧЦэѓ"46nЧЦэЫjмЖћЎ”е N7ы…ж•ѓљYuЅT)|z&КЇrfџ&:еП9"їo†ЕК#6t9Ђѓ—#њ:ЫбеJtŽ(Еж MуxБјІk№@Э­BѕXQѕXQПD„ЦŠzЌЈЧŠzЌЈЧŠњџЂЂжvxjі‡jХдy'ЂкиЬЮњ"uCp"Пў2ўш šѓЈЮ(ŽбyUЈХшYњЯRћ0š==›œнWЊс_*tAЧ1>*РЧуп hNpUѓoсЎnCъмУSћцЦўpTчМжЁЉ§,g§lВB]PaђСPŽКќм?x‚Й”?c–oWˆ‘%X0зRнŽеЂЕ? Мžце“ †u’ ;“Œ-Ÿ F—гХмЗ‡kЃpzБˆ›3‰%Ї уtџІš lЂŠqщž'rTщ7УIy ЭKƒМAŒ;xdj+^sVƒшѕвяlш3‚ƒ–œѓТˆGќ†Вз*ŽЭРr\ eъЛ§‹3'9$wћkїgџШцхGє=CxєyЙЊŸ5лч,;Сaˆ ЪŠЫ?дCƒb”•ЫжZјїњїШћ@ГЗћНW8/-@ŽНј{АєЛпиѕюPKЃЅPš• ёKPKBДВ>aŠН  meta.xml OpenOffice.org/3.2$Unix OpenOffice.org_project/320m19$Build-9505Pablo Hoffman2009-01-06T16:13:282011-05-18T19:34:0428PT02H52M18SPablo HoffmanPKBДВ>Thumbnails/thumbnail.pngЭxgPгбЗэ/!T•&в•&‚t) НH•аˆšв{ MЄIG:šH‘"=Ё(ˆ@Кє*Ёw~Йѓџ№цЭ{я‡ћaэ9sЮЬк3kŸй{іŠжеQЛEСDР- u}]N2№uЌэ†З*ІО9›Йф†іижэdV%Ž0 ІЊЂ—єp|ТЌnт™ѓІЈsзЊhа~NŸtаўDfžЗ?T,wЙfБZvoBчЌ&ЖЖЂ?ftДЖм‹Ш(I•Ю–7ајщ=ћT0‚:91B@ђфчб<ѓHђјЭеv№еvгq№{e=:рSћю1%€’€џSP4C ъћз; •ьЙУ]$фCl{ђІЪq~‹ё’N}!‡cAЭМlч  F|џБыСаѓGЩž@b+Rщтм П‰ нh‚ЅUР…ўеxуqМ‚8Hд+ВfФиD‡rgРч= э:цй‚Чі № фcщ­N<;ЙР-]:”œШЧ)ўп4ЈЇ^В_<сYвƒxХJЮкШП’e ‚eWЉМћЧИž†аq#ЧЧ—К‡Eцч Гeќ7ї2E"мJ­fЛ†[„>ю–Щ•ЁgїiМЎ*O$x•РЫЫ—ŸЉўЩa3ЭшR€– •#•Wd/ТE>‹іjХ е‡ч(FЮjƒсЇ^іицЉBИVВфzЁ›ДОК˜МбlIo{jm“ќ=6п_•Wс$јфі/oХY„&Ђ…jxфмћ›3­л…HЛюyШј…XфT{’˜lz…ељупTВ{#њ3“ЦLV|Š”'ЙЋDРŒуТ™ќ…Ї*{’XtТˆe§ГѓяADЌujršП—е^лд цX„n~аzьн;UЃ‡ H‚ЄA„S#ЉјІМ3Ќ4NQзъ7пgлЭ‹А‡7јHДе…Оw$‹E^аu&ѕсoч=”Џ…wЗеЬбгЫгЪ†G˜$чto!m5Ў!,tйVЖТˆк?% w№И#xс=ЖЦбVTЬъЅ-ж­rmy8IUеє™фPѓ/mЙг(=~fx:oz2ZYнЮTљыZЧž_{о H\.Ш<.кfD(– јлфА”s ,–MЕ?>lt‚цoџюќ+а‚nHRW_œь[aК%œxиoЧЫ;…П ЎЬЛLВ­IЮžJїЦќМСТ›JЖг­е!„Е4ТфзlFЎLŽTМгћ–c85ŒЫЯZпяœЊory—з`ёєчНђ*оЌайЎбжю7WЗmb@оБgИжЭ4чъЪ&™ЪшTЯs#W’З }фš^Y4УЁ?ејFЭžЦь–%mёызяnіїee1№a†}<ЃЂ 0Gnh<Оƒз­ЌzљДчГOЖwFКІ6~Љћ8ЗЩ\уjг}tАfBЧяsoЦ?Бє+Т”6сjДИяМ<‡†А“ аxиІ# RlЛ[Бze y нп Б fЮ?Ÿ™јЎ;tа|Зїќ­%\Вті+Ъёkht,)ZU5*ЪЗАРd­‚s1 з<Ф*И„џ(:NЦЅ$GArКфє  Љ9fРо Њі!:!сјЯћ0Ђˆ+…Чи"ж EфкŠA‹КffюЫš˜Ь";;;Ќ_э’’1ФТJА)ЪЭлю)D­ўœYёюeхіЂяЏк,_Г [EmйЙR Зн$Ce;$ШЏДk<Щ‘!уЕ~їЯN- pД>`сКxg…†Шaюх!ŒэёЗЏLРХ€оzрbьp0иd~‘qO§@L˜йіщєУїЧ +'ЂВ(WицWвъЧЃwˆ{п§&нь§uP0™+іЊГыН•€œc€Џ qa–ЃYЗ­ђбМп†ˆsю›™˜ŠуЪиP;ЗžбўЯ&­ШшЪRя ш~мФЇиЈuіБ3˜"ўкj7Alшн|~IœHdё‰ˆбГTШЧбї<рЌ‰ЪЁЁž"vˆВFCuкЙ1NЊ№ У7щиўУѓКЦ8= vXі џ2…—эшщRЭ‰'љQ†0ЗWГ!ў€ѕUя–V(ч”№НЮ‚ЊщЮ`ДЬД-№ яЋ%Ѓ™їŸ5ОЇЬ,єKё{9Х уьPў•IАWš-RЭuflЫ‡^ј*aкЉЎл1аЏФMX*ЈШЬ—ЅЪoyЁMЩ~>‚ы5WФШфЫзhxМЉ’цЫиаОгž(y)šїв,v6жИњцћ…№‰| Ѓ\whа}OФ_̘WŠЎкХ],eVŸЎњUэ+Юъџ дkЈѕЄ[(–P0v];€oПƒЭёРї8Xv4љпbpуŠ2ППБ1/жЉАѕчГQЖДг&LТOk}(C.‹JФ|ИПp[ЌЛг`vSЮA=ы‰е}MЏ•‰Gvюoљ›F вихП;$™Пw ьщnЦЯSЪzЄћ–јёюИйjˆJPы"x.EŒ6XLœ-*~Пф /@V%д{tРаvЛLvъ;7м…%S[ы.SІ‹ЬЩ”ЖП‰ ЮБ 1м^ќ'pП8Лp#н.І1™YиКwаŸГКZNЋЁ‰Z§ЄЦѓz“i\+)…-VїBЄлЇfлp„ђёЁ˜еЗјbэЬ№ ЏMkГŒЭq“F{і_„ƒŠxБo›ыж:C1љЭЙкќe^ЄЈџрЊeœђRкЮŽЏ˜ќO”Бќ'\П5шЫчs.о˜eT\XŽ3Э7CBl2єлЙзnввvPЁЫнŸЎЫќbHХ$x.БњКЄbгсkYŸ3Šж05 ЂfХMВ‘WHr^™жЖŸ6!h7qЧн ж\#b”tŒёЎьъ'Hх‰3‰ьШИКЬ?~ЖЃ\ДтгЅЗУєcЛЭТAЊєщb‘ъЙ]œлзГЮІŠФњnѕ;p—лW2ƒкWќЄRZy+З^&XЋѕ§%SGgп!DVIш’Йљ48№в'ЧМjR*~Ѕф™ѓRЏЦы‰гЇ/Т[ђžlqяЄЄu_4zРЋБй#ы?нJЩ’[ ІzE!Й_жH$œ‡Ыђух.lœъJФЬe„Ч#ѕZјТ–§ёЋсLњ:toцi†>UшИ›~˜6ю~&8@iї„+/KѓVЇФTУ ]ž%Й’л8ЫКs`nЎceЮwЗlьkСЛ•‡y@d[ЭžЋВ,#щю^ЪUпО=ЃчПsD`Xв"–„ зFtЎ}.ПЃњ,ХIšJЪу\ާyцС…:ДIb‰r>5'гGv:–5žZ†рR“е†ЛyUНІяpнД@SХПыеxъ<‹ЧЩs›К№ўА+Œ\э’7[а> ’м„Aзmѓ.ЃpžKП{‰ jw( №Eв…cю„ЋњŠe+хаЏzЉвўфpŠЇЕ3Ђ=‘ЃЮvvќ‚gѕОJ@јжд-SЩh;[u%ІTЉЈg‰ї“'_@/r во)ЦpИх,ѕщЄ'ЦБх–НLњІuЂ єf*Œ=ї#ј‘ёu?|K§Н/Ѓх4уџбaэƒцRY:s]Gy_^*а‚§1ь‰ТЏЦaCѓO›bЯtЮхx9(џprЅ(Aф ж5Z.~UмžЩП>hъ˜вЇž[ќЩиЌ­pyЌQŽиuJŽЬпu‹P> ЇчdкМжЙЏuk<)јтр§­ЪЗmЬрЌP‰ёrњ”eйKW(ш:!ВŠ?Ѕ§]иf:jj0œ UКђŠ”6 ЦџMјЈ_ьЇэ'ѕјhЕ@u! оУкђS~@Бс№чј ЋЉЅZs75Eщн(цќј&ї9пjу|)ŒЏВžјEЖѓјЯо~–}?­ЉzXVwXoЪпWЦœщ):36.ЅоCф{яn#эЬ„ЋHZ†m’{]–нУжС3e-"‹еГBrr3бjьб;wЕxWчœ6< єЭі›98тŸ7‹BЕинвZЩƒ9gn1™ЎАдБ5Г2y№nъ=?]™ьт•Ÿ:ђ:ьVQ?~*œЈЉeзЄž‡о`ПdМоƒЄГЏfћ7'5]bй’Ѕ{+–ў%H l“€iмЩОB”9;_лЋЋ‚ѕ=‚^Lѕу§ауgћТЫ‡Я‹Л\ƒžЇљk>|џ+„ ч>f'7вљЬq}bsюЄ*=lімћТщxЉJM$уqЬ]%oнфЬal|эйoВkшrђосюёэЈижžќcU>‡ FИ~Уn˜^ІH[Хуё^~=ˆyXHЊ‘%ТЃ:’dsщDьощs"cфф*ѕэS5љ„‡ыe~ aзvуhЗ%рmФ§‘ЭnБ}И ‚+Ѓ‘­0Žсh{љЦtйЫФЬ@l‰ГАБ v†VЎпикHдŒcШм=sў"НžїЬЋљ‘ /'й=№Џ$э2Кbgm/C:ыaІНџl˜{cW№ьLb‚/СMЈ‡Ј[цšQ2ПoGЌ-Р*ŒeVђеђO'71l™™y’F_˜ЪЇ)Y.wз+{шў=ѓ UИа кЙєД7S_ѕ’эŒ[Хчъ33нѕ52эа@ЙНаŽЅž^г-НгЧёWЪп68юњчУЊСл №АmИаOyФ=Б›?ЅвдDTЁЛЧЅ“кQ2^ u Щ#UЅСыХ”њаШзшiŠ­ьGањ•&ЌŽђїƒDAma>ч=КО+6.іnиЎ3яHbыXM †œЏх ,tAНлмЏкAMЬЊ†”G3љСХІ‚С‰>ц 3V§-ђгЂЌ{J э9jˆЁ+;цЎВ-јЏŒHЋ„з•I—!ƒ 8d–џ?ХЁ*YБ‘ЁГВP>дХqщ6gеЪV’ДcЯšЗ]p\zЖїCеП“ PBtKЫЎыеjз,ˆ2ššŒѓВЏчМ@9б/0^­Є­і•_)‘§ЄТ )'КV )1ЩтТ9,uЎы&ъЭАdŽiГьЛЃkиЈШAо 0™€мфiРp€ф/О@. Д|йЃ1~ЏqWЗа>AOT ігˆ”ЉцўЎ<УЗiѓ(BAРп в8оыС@ЏЧy№i!_*є№ї)Љгї>h'Ђ€§,"х‹wу>€Є*юлrы žй_хXІхўŸ†…пjЋDcЃЬdъ‰GОщHj€йЛ•љYIK5š(5Ё™oРќВFГ!ЙзМJАis—Ѓ“АšrЇџSУР3ПJЕPliB‘њТ ЇАНUќТгužQ;"\9-ЈвŠT†‘=Н4ЄчЛX ЬяD юiШ:}мroН  ”urњѓяn&{gѓ‘г?щецЁК“Ÿ[лЋcџУ IЦ’yg'yЄ P|DŸЅЄ@љјоRm–ћ†гJkŽ@8ћZ&52t|ліC‹Ћ43C}{žњТ ЉьЉч$ІЈХёћ%r*ёЙu?фšя']ъ1”ѓЧЩє$cлIЯnжSЌCtЗœЄdў)—iэ›ћ#№ГŠжЉњЁŒ9ЙФёЊ.ЖюIэš~йїž h§Ф=`јFў!эиѓ—KqAlЬЊЌї“’z#aОg˜Эізш+:Ж…sЮMЖ67юŒ6в Ј‰‰ѓ5ђЬО.kвˆmлић[э“ЮвЈЮ? Ю7ь }:Ђ+S#С:/ЏШXŠ5-Љƒ.мхЫѕж/щD|."oфADœn+LЭёFѓЋЮЪLЃдИЅŽ‹gЦЄЄSœЪцUљщeхhrtBSgЖ†Ќ<ПJLнkEZTўНyСџєі.НYš„vяІ3E~ ,б]8•ЯзЧъ4БМ2АoЋЮRќКЂ~КИ/Q‡mыAkm(Œ>Ipz=ъB/$}ПJyКfšЕЌС”!W4эЙ%;ИХжWЊ З9дvoЎ^.ўю2M\дœ‘ŸўгЉс#Чb%‹ZіŽіЕŠ@f&љї/1ё’єšFжТ\ ™0ŠyЁю&<злуЧЅД‚нŒКЊ7іЬГ6YygžЮEvьpTЏ ЏЙіВ‚щpŽ9О=чЩ”Ž;уя Ѓиsšё ЪЈ8еƒЙЏв,Ћў4ЬгzРDЅ6 1ШŸˆ№щ№ЫwЛ,МЅj1§žтb!MтЃІnхx„“ЂX’;}_mД'г‘э\ї№чФJ ъVЮ лš4Ат)шњ.у ѓјН] §:)ЉЃ|Йh”xЕЌpg‰HcЂ“„Кџ,ЫNJЩм!d§„СљюœЮcлГlэZ„`щjŒTЩP$Хет-Їž4(d›GиJШžЋ—JЇ{ŒЁКЊожL˜УЫО ИiJN$e\љnЃ!ІTЭqьвшэcџ2_ђ+ГЯНљ*Jѓ’7SЖЫ"њ€k ЂuъЖ‹uvщ.Їqo-мА!;%юлоA„Јdк‹d\UxNЃ‰9 fђZ˜Ыmg"ДŠ]МЏйєw}ЏšЯ(яПPu[pkЗЈu/§ЁAL~qђКxйЁчч„!%мЉ/oTыЇQhu‡В”Ц=Sy^йHТ­Ы™ V‡ФлЈ†иЬЁPэпXќДђЂgЇeбT‹—aчUєђЏІјpЁйЛуr`,б#Pя˜5о-­ˆ[ы ppсщ€$.пœ.мР”vЃaŸ е`ыY™"ъ7€cбuўПаџ\QЙє>ЏŠ”џo‡^CUGЅ\ і_PKuEЮєСPKBДВ>'Configurations2/accelerator/current.xmlPKPKBДВ>Configurations2/progressbar/PKBДВ>Configurations2/floater/PKBДВ>Configurations2/popupmenu/PKBДВ>Configurations2/menubar/PKBДВ>Configurations2/toolbar/PKBДВ>Configurations2/images/Bitmaps/PKBДВ>Configurations2/statusbar/PKBДВ> settings.xmlнZ[sт6~яЏШxња>Шu&А$4Ь’„ЦАэіMиЃF–<’ТП_I˜”;qdдщ4ЙивїstnфъѓKLž ЬhЫ;:lx@bЕМЩИ_Л№>ЗКbГ В Ъš)еq ЖSб\Нny)ЇM†MŠbM4YtН­ЙЙКiШVO^ІO-o.eвЌз‹ХстфёЈ~tyyY7oзKBA!i.GИЙg“6`t†ЃВ(Ће›ћcЏBы ЋƒСгњъяѕjcR–KЏ­,N”ЬSђƒЎХqYНvчд+Љо–§rKv /В,Љ^ЛтЫ Нс^Ч^{эKkj_eŠ]§Јa БіЏƒьБІjyJмц3†ХЋчyyћ~мѓ ­М4f‰З~)—‰z‰ЉєкЕГЦЇЋњ.Ю‡А‡0“EрЇ•бџРЁœчСŸ\œW‡ПЭsХз|RПЃЄ†i/nsС"пXfrО,#1,с–˜Brх ^[ћХ‘&~у8ˆL[№SЦ ъЕgˆЈ‚пчŒnыxшсS”Œ™f)‚—<­ˆ>Bм!a*м‘шяCLЁbjz˜ў ьsѕа9ЫˆЉKфц0#’Š[DCЂChща}ЕыЊ_Ў9Š|uеHБyь=р†рS$aФШвЈmЈШŽІЫ‹KGg†эivЙ\˜(ГЧ-UтлЦGЮOЛJОєкѕв_-;YF*DJє_‘fШ‚'пЅў>д=ыHџЇ. xЯzŒА]Ћ;ћcJ€П›pьёuЌўЂђ№vІœ3ЎЎŽхНёЈˆЁп+№@WИcЁU DGYœtЏYЊlи#8xЋ’ї&Ф…9ОЪ) AoŽhlеУ88ŽOpbќ‘-ђmlYkjЕhћR™ёmЯ4!нв}жаїLю˜xИкyXš[[–›џџŽфЬqGb)ОЎ{ q…вВє—Р‹eЏ†оW•qЁрч'–J_#K] Z—УFш?яг8’Œ;фИЪLm—ЯRЁ?|хјі/œу››s „ЉЇ5Э ех`aПX%‡(teћ_ŒХ4Їј(#sЮУЭЙCбыl”‘П@€,?ДZ=HЙIм™^u’„,'ј5’hџ…EKuаБі…ЩуА`Вђѓ/ЉрзŒ .В~(идŽUе1ЉZ’їXЌЇВz.8V”{-+Mёn}ВРŽє‰Й5ghЩ 3”Љh}™_N|$эЯ№њŒOq}5_ѕ‰оPДiNC‘щ Ј­эSЭ_€:ё-ЫФ1ксeЈ%šе.D˜ъё–5Т пм_5ЮНŸЄPˆ˜Е‹GvšПE2АПWsлlљ8эeѓQJ™КъКТЬgКŒ=p3!о$шЂрЩE‹КЩёцЌЛ2‰Ъ…nцœНЅe™Q’ржDQЭсd”n84њНЋЗacnѕ4ЦNфзTRыpbKЫPј(d”l'І}œъH9LшЮќ)+Ѓ, Ц‘вx \хџх6ˆ–эјH•GЩмOуиUp5žє{Š–лjЏ22АР*sъЯОUNЂхю˜hэM„-jЊаe$еЧЌD™sЋЫ†2нЎ?:zїб3|]}Іџ@{„ wе7ЭА~Г—Зž$ЁЪO*ЦcˆђFЎ*Žђ… g}чџ&ъEџ•гўPK*ACFз#PKBДВ>META-INF/manifest.xmlЕ•ЭnТ0 Чя{Š*ї6Р.[EAчи˜р–HљRт xћЕh@З!mEM/љћћ;qlЯ—G­Вњ ­ЉиĘА А;išŠ}lжљ [.žцŒЌ1Py™dэ&\—‹о”‚ ЅЁ$QZ‡fgEдhЈќn_vJ‹ЇьЎЅТМ5єЇь&†; 9V œSRЕ~ђƒйg­Ђ/Q4м^ŠРn„оЩfНэ:*•; }Х8уƒќфЮ4їqяRPєјtвџfНёyђКžЎŠ1LњХ4Т#ё6*їЉТъюД3“шЄ0ŒŽеH№є>lГzk@ЊРщ2-\=ј(М•5ЕlЂ?gF˜qЖKыЙˆо?ФсZIœЗMћДУ дЪa*я­‹Ў­L1 О#'ЛВV%ƒŸkhрo’4ИDy№Ѕ‘„(>ђ(6БM—œE”…шkќЯq 3ЕMџZšчќWЯ_|PKfт n.PKBДВ>Ÿ.Ф++mimetypePKBДВ>ЯЃ€я[[-QPictures/10000000000000200000002000309F1C.pngPKBДВ>.oчЙvц їcontent.xmlPKBДВ>ЃЅPš• ёK Пstyles.xmlPKBДВ>aŠН  Œ"meta.xmlPKBДВ>uEЮєСR(Thumbnails/thumbnail.pngPKBДВ>'Œ?Configurations2/accelerator/current.xmlPKBДВ>у?Configurations2/progressbar/PKBДВ>@Configurations2/floater/PKBДВ>S@Configurations2/popupmenu/PKBДВ>‹@Configurations2/menubar/PKBДВ>С@Configurations2/toolbar/PKBДВ>ї@Configurations2/images/Bitmaps/PKBДВ>4AConfigurations2/statusbar/PKBДВ>*ACFз# lAsettings.xmlPKBДВ>fт n.ЕFMETA-INF/manifest.xmlPKIfHScrapy-0.14.4/docs/topics/selectors.rst0000600000016101777760000003207111754531743020134 0ustar buildbotnogroup.. _topics-selectors: =============== XPath Selectors =============== When you're scraping web pages, the most common task you need to perform is to extract data from the HTML source. There are several libraries available to achieve this: * `BeautifulSoup`_ is a very popular screen scraping library among Python programmers which constructs a Python object based on the structure of the HTML code and also deals with bad markup reasonably well, but it has one drawback: it's slow. * `lxml`_ is a XML parsing library (which also parses HTML) with a pythonic API based on `ElementTree`_ (which is not part of the Python standard library). Scrapy comes with its own mechanism for extracting data. They're called XPath selectors (or just "selectors", for short) because they "select" certain parts of the HTML document specified by `XPath`_ expressions. `XPath`_ is a language for selecting nodes in XML documents, which can also be used with HTML. Both `lxml`_ and Scrapy Selectors are built over the `libxml2`_ library, which means they're very similar in speed and parsing accuracy. This page explains how selectors work and describes their API which is very small and simple, unlike the `lxml`_ API which is much bigger because the `lxml`_ library can be used for many other tasks, besides selecting markup documents. For a complete reference of the selectors API see the :ref:`XPath selector reference `. .. _BeautifulSoup: http://www.crummy.com/software/BeautifulSoup/ .. _lxml: http://codespeak.net/lxml/ .. _ElementTree: http://docs.python.org/library/xml.etree.elementtree.html .. _libxml2: http://xmlsoft.org/ .. _XPath: http://www.w3.org/TR/xpath Using selectors =============== Constructing selectors ---------------------- There are two types of selectors bundled with Scrapy. Those are: * :class:`~scrapy.selector.HtmlXPathSelector` - for working with HTML documents * :class:`~scrapy.selector.XmlXPathSelector` - for working with XML documents .. highlight:: python Both share the same selector API, and are constructed with a Response object as their first parameter. This is the Response they're going to be "selecting". Example:: hxs = HtmlXPathSelector(response) # a HTML selector xxs = XmlXPathSelector(response) # a XML selector Using selectors with XPaths --------------------------- To explain how to use the selectors we'll use the `Scrapy shell` (which provides interactive testing) and an example page located in the Scrapy documentation server: http://doc.scrapy.org/_static/selectors-sample1.html .. _topics-selectors-htmlcode: Here's its HTML code: .. literalinclude:: ../_static/selectors-sample1.html :language: html .. highlight:: sh First, let's open the shell:: scrapy shell http://doc.scrapy.org/_static/selectors-sample1.html Then, after the shell loads, you'll have some selectors already instantiated and ready to use. Since we're dealing with HTML, we'll be using the :class:`~scrapy.selector.HtmlXPathSelector` object which is found, by default, in the ``hxs`` shell variable. .. highlight:: python So, by looking at the :ref:`HTML code ` of that page, let's construct an XPath (using an HTML selector) for selecting the text inside the title tag:: >>> hxs.select('//title/text()') [] As you can see, the select() method returns an XPathSelectorList, which is a list of new selectors. This API can be used quickly for extracting nested data. To actually extract the textual data, you must call the selector ``extract()`` method, as follows:: >>> hxs.select('//title/text()').extract() [u'Example website'] Now we're going to get the base URL and some image links:: >>> hxs.select('//base/@href').extract() [u'http://example.com/'] >>> hxs.select('//a[contains(@href, "image")]/@href').extract() [u'image1.html', u'image2.html', u'image3.html', u'image4.html', u'image5.html'] >>> hxs.select('//a[contains(@href, "image")]/img/@src').extract() [u'image1_thumb.jpg', u'image2_thumb.jpg', u'image3_thumb.jpg', u'image4_thumb.jpg', u'image5_thumb.jpg'] Using selectors with regular expressions ---------------------------------------- Selectors also have a ``re()`` method for extracting data using regular expressions. However, unlike using the ``select()`` method, the ``re()`` method does not return a list of :class:`~scrapy.selector.XPathSelector` objects, so you can't construct nested ``.re()`` calls. Here's an example used to extract images names from the :ref:`HTML code ` above:: >>> hxs.select('//a[contains(@href, "image")]/text()').re(r'Name:\s*(.*)') [u'My image 1', u'My image 2', u'My image 3', u'My image 4', u'My image 5'] .. _topics-selectors-nesting-selectors: Nesting selectors ----------------- The ``select()`` selector method returns a list of selectors, so you can call the ``select()`` for those selectors too. Here's an example:: >>> links = hxs.select('//a[contains(@href, "image")]') >>> links.extract() [u'Name: My image 1
    ', u'Name: My image 2
    ', u'Name: My image 3
    ', u'Name: My image 4
    ', u'Name: My image 5
    '] >>> for index, link in enumerate(links): args = (index, link.select('@href').extract(), link.select('img/@src').extract()) print 'Link number %d points to url %s and image %s' % args Link number 0 points to url [u'image1.html'] and image [u'image1_thumb.jpg'] Link number 1 points to url [u'image2.html'] and image [u'image2_thumb.jpg'] Link number 2 points to url [u'image3.html'] and image [u'image3_thumb.jpg'] Link number 3 points to url [u'image4.html'] and image [u'image4_thumb.jpg'] Link number 4 points to url [u'image5.html'] and image [u'image5_thumb.jpg'] .. _topics-selectors-relative-xpaths: Working with relative XPaths ---------------------------- Keep in mind that if you are nesting XPathSelectors and use an XPath that starts with ``/``, that XPath will be absolute to the document and not relative to the ``XPathSelector`` you're calling it from. For example, suppose you want to extract all ``

    `` elements inside ``

    `` elements. First, you would get all ``
    `` elements:: >>> divs = hxs.select('//div') At first, you may be tempted to use the following approach, which is wrong, as it actually extracts all ``

    `` elements from the document, not only those inside ``

    `` elements:: >>> for p in divs.select('//p') # this is wrong - gets all

    from the whole document >>> print p.extract() This is the proper way to do it (note the dot prefixing the ``.//p`` XPath):: >>> for p in divs.select('.//p') # extracts all

    inside >>> print p.extract() Another common case would be to extract all direct ``

    `` children:: >>> for p in divs.select('p') >>> print p.extract() For more details about relative XPaths see the `Location Paths`_ section in the XPath specification. .. _Location Paths: http://www.w3.org/TR/xpath#location-paths .. _topics-selectors-ref: Built-in XPath Selectors reference ================================== .. module:: scrapy.selector :synopsis: XPath selectors classes There are two types of selectors bundled with Scrapy: :class:`HtmlXPathSelector` and :class:`XmlXPathSelector`. Both of them implement the same :class:`XPathSelector` interface. The only different is that one is used to process HTML data and the other XML data. XPathSelector objects --------------------- .. class:: XPathSelector(response) A :class:`XPathSelector` object is a wrapper over response to select certain parts of its content. ``response`` is a :class:`~scrapy.http.Response` object that will be used for selecting and extracting data .. method:: select(xpath) Apply the given XPath relative to this XPathSelector and return a list of :class:`XPathSelector` objects (ie. a :class:`XPathSelectorList`) with the result. ``xpath`` is a string containing the XPath to apply .. method:: re(regex) Apply the given regex and return a list of unicode strings with the matches. ``regex`` can be either a compiled regular expression or a string which will be compiled to a regular expression using ``re.compile(regex)`` .. method:: extract() Return a unicode string with the content of this :class:`XPathSelector` object. .. method:: register_namespace(prefix, uri) Register the given namespace to be used in this :class:`XPathSelector`. Without registering namespaces you can't select or extract data from non-standard namespaces. See examples below. .. method:: __nonzero__() Returns ``True`` if there is any real content selected by this :class:`XPathSelector` or ``False`` otherwise. In other words, the boolean value of an XPathSelector is given by the contents it selects. XPathSelectorList objects ------------------------- .. class:: XPathSelectorList The :class:`XPathSelectorList` class is subclass of the builtin ``list`` class, which provides a few additional methods. .. method:: select(xpath) Call the :meth:`XPathSelector.select` method for all :class:`XPathSelector` objects in this list and return their results flattened, as a new :class:`XPathSelectorList`. ``xpath`` is the same argument as the one in :meth:`XPathSelector.select` .. method:: re(regex) Call the :meth:`XPathSelector.re` method for all :class:`XPathSelector` objects in this list and return their results flattened, as a list of unicode strings. ``regex`` is the same argument as the one in :meth:`XPathSelector.re` .. method:: extract() Call the :meth:`XPathSelector.extract` method for all :class:`XPathSelector` objects in this list and return their results flattened, as a list of unicode strings. .. method:: extract_unquoted() Call the :meth:`XPathSelector.extract_unoquoted` method for all :class:`XPathSelector` objects in this list and return their results flattened, as a list of unicode strings. This method should not be applied to all kinds of XPathSelectors. For more info see :meth:`XPathSelector.extract_unoquoted`. HtmlXPathSelector objects ------------------------- .. class:: HtmlXPathSelector(response) A subclass of :class:`XPathSelector` for working with HTML content. It uses the `libxml2`_ HTML parser. See the :class:`XPathSelector` API for more info. .. _libxml2: http://xmlsoft.org/ HtmlXPathSelector examples ~~~~~~~~~~~~~~~~~~~~~~~~~~ Here's a couple of :class:`HtmlXPathSelector` examples to illustrate several concepts. In all cases, we assume there is already an :class:`HtmlPathSelector` instantiated with a :class:`~scrapy.http.Response` object like this:: x = HtmlXPathSelector(html_response) 1. Select all ``

    `` elements from a HTML response body, returning a list of :class:`XPathSelector` objects (ie. a :class:`XPathSelectorList` object):: x.select("//h1") 2. Extract the text of all ``

    `` elements from a HTML response body, returning a list of unicode strings:: x.select("//h1").extract() # this includes the h1 tag x.select("//h1/text()").extract() # this excludes the h1 tag 3. Iterate over all ``

    `` tags and print their class attribute:: for node in x.select("//p"): ... print node.select("@href") 4. Extract textual data from all ``

    `` tags without entities, as a list of unicode strings:: x.select("//p/text()").extract_unquoted() # the following line is wrong. extract_unquoted() should only be used # with textual XPathSelectors x.select("//p").extract_unquoted() # it may work but output is unpredictable XmlXPathSelector objects ------------------------ .. class:: XmlXPathSelector(response) A subclass of :class:`XPathSelector` for working with XML content. It uses the `libxml2`_ XML parser. See the :class:`XPathSelector` API for more info. XmlXPathSelector examples ~~~~~~~~~~~~~~~~~~~~~~~~~ Here's a couple of :class:`XmlXPathSelector` examples to illustrate several concepts. In all cases we assume there is already a :class:`XmlPathSelector` instantiated with a :class:`~scrapy.http.Response` object like this:: x = HtmlXPathSelector(xml_response) 1. Select all ```` elements from a XML response body, returning a list of :class:`XPathSelector` objects (ie. a :class:`XPathSelectorList` object):: x.select("//h1") 2. Extract all prices from a `Google Base XML feed`_ which requires registering a namespace:: x.register_namespace("g", "http://base.google.com/ns/1.0") x.select("//g:price").extract() .. _Google Base XML feed: http://base.google.com/support/bin/answer.py?hl=en&answer=59461 Scrapy-0.14.4/docs/topics/webservice.rst0000600000016101777760000001543511754531743020274 0ustar buildbotnogroup.. _topics-webservice: =========== Web Service =========== Scrapy comes with a built-in web service for monitoring and controlling a running crawler. The service exposes most resources using the `JSON-RPC 2.0`_ protocol, but there are also other (read-only) resources which just output JSON data. Provides an extensible web service for managing a Scrapy process. It's enabled by the :setting:`WEBSERVICE_ENABLED` setting. The web server will listen in the port specified in :setting:`WEBSERVICE_PORT`, and will log to the file specified in :setting:`WEBSERVICE_LOGFILE`. The web service is a :ref:`built-in Scrapy extension ` which comes enabled by default, but you can also disable it if you're running tight on memory. .. _topics-webservice-resources: Web service resources ===================== The web service contains several resources, defined in the :setting:`WEBSERVICE_RESOURCES` setting. Each resource provides a different functionality. See :ref:`topics-webservice-resources-ref` for a list of resources available by default. Althought you can implement your own resources using any protocol, there are two kinds of resources bundled with Scrapy: * Simple JSON resources - which are read-only and just output JSON data * JSON-RPC resources - which provide direct access to certain Scrapy objects using the `JSON-RPC 2.0`_ protocol .. module:: scrapy.contrib.webservice :synopsis: Built-in web service resources .. _topics-webservice-resources-ref: Available JSON-RPC resources ---------------------------- These are the JSON-RPC resources available by default in Scrapy: .. _topics-webservice-crawler: Crawler JSON-RPC resource ~~~~~~~~~~~~~~~~~~~~~~~~~ .. module:: scrapy.contrib.webservice.crawler :synopsis: Crawler JSON-RPC resource .. class:: CrawlerResource Provides access to the main Crawler object that controls the Scrapy process. Available by default at: http://localhost:6080/crawler Stats Collector JSON-RPC resource ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. module:: scrapy.contrib.webservice.stats :synopsis: Stats JSON-RPC resource .. class:: StatsResource Provides access to the Stats Collector used by the crawler. Available by default at: http://localhost:6080/stats Spider Manager JSON-RPC resource ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can access the spider manager JSON-RPC resource through the :ref:`topics-webservice-crawler` at: http://localhost:6080/crawler/spiders Extension Manager JSON-RPC resource ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can access the extension manager JSON-RPC resource through the :ref:`topics-webservice-crawler` at: http://localhost:6080/crawler/spiders Available JSON resources ------------------------ These are the JSON resources available by default: Engine status JSON resource ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. module:: scrapy.contrib.webservice.enginestatus :synopsis: Engine Status JSON resource .. class:: EngineStatusResource Provides access to engine status metrics. Available by default at: http://localhost:6080/enginestatus Web service settings ==================== These are the settings that control the web service behaviour: .. setting:: WEBSERVICE_ENABLED WEBSERVICE_ENABLED ------------------ Default: ``True`` A boolean which specifies if the web service will be enabled (provided its extension is also enabled). .. setting:: WEBSERVICE_LOGFILE WEBSERVICE_LOGFILE ------------------ Default: ``None`` A file to use for logging HTTP requests made to the web service. If unset web the log is sent to standard scrapy log. .. setting:: WEBSERVICE_PORT WEBSERVICE_PORT --------------- Default: ``[6080, 7030]`` The port range to use for the web service. If set to ``None`` or ``0``, a dynamically assigned port is used. .. setting:: WEBSERVICE_HOST WEBSERVICE_HOST --------------- Default: ``'0.0.0.0'`` The interface the web service should listen on WEBSERVICE_RESOURCES -------------------- Default: ``{}`` The list of web service resources enabled for your project. See :ref:`topics-webservice-resources`. These are added to the ones available by default in Scrapy, defined in the :setting:`WEBSERVICE_RESOURCES_BASE` setting. WEBSERVICE_RESOURCES_BASE ------------------------- Default:: { 'scrapy.contrib.webservice.crawler.CrawlerResource': 1, 'scrapy.contrib.webservice.enginestatus.EngineStatusResource': 1, 'scrapy.contrib.webservice.stats.StatsResource': 1, } The list of web service resources available by default in Scrapy. You shouldn't change this setting in your project, change :setting:`WEBSERVICE_RESOURCES` instead. If you want to disable some resource set its value to ``None`` in :setting:`WEBSERVICE_RESOURCES`. Writing a web service resource ============================== Web service resources are implemented using the Twisted Web API. See this `Twisted Web guide`_ for more information on Twisted web and Twisted web resources. To write a web service resource you should subclass the :class:`JsonResource` or :class:`JsonRpcResource` classes and implement the :class:`renderGET` method. .. class:: scrapy.webservice.JsonResource A subclass of `twisted.web.resource.Resource`_ that implements a JSON web service resource. See .. attribute:: ws_name The name by which the Scrapy web service will known this resource, and also the path wehere this resource will listen. For example, assuming Scrapy web service is listening on http://localhost:6080/ and the ``ws_name`` is ``'resource1'`` the URL for that resource will be: http://localhost:6080/resource1/ .. class:: scrapy.webservice.JsonRpcResource(crawler, target=None) This is a subclass of :class:`JsonResource` for implementing JSON-RPC resources. JSON-RPC resources wrap Python (Scrapy) objects around a JSON-RPC API. The resource wrapped must be returned by the :meth:`get_target` method, which returns the target passed in the constructor by default .. method:: get_target() Return the object wrapped by this JSON-RPC resource. By default, it returns the object passed on the constructor. Examples of web service resources ================================= StatsResource (JSON-RPC resource) --------------------------------- .. literalinclude:: ../../scrapy/contrib/webservice/stats.py EngineStatusResource (JSON resource) ------------------------------------- .. literalinclude:: ../../scrapy/contrib/webservice/enginestatus.py Example of web service client ============================= scrapy-ws.py script ------------------- .. literalinclude:: ../../extras/scrapy-ws.py .. _Twisted Web guide: http://jcalderone.livejournal.com/50562.html .. _JSON-RPC 2.0: http://www.jsonrpc.org/ .. _twisted.web.resource.Resource: http://twistedmatrix.com/documents/10.0.0/api/twisted.web.resource.Resource.html Scrapy-0.14.4/docs/topics/firefox.rst0000600000016101777760000000540711754531743017576 0ustar buildbotnogroup.. _topics-firefox: ========================== Using Firefox for scraping ========================== Here is a list of tips and advice on using Firefox for scraping, along with a list of useful Firefox add-ons to ease the scraping process. .. _topics-firefox-livedom: Caveats with inspecting the live browser DOM ============================================ Since Firefox add-ons operate on a live browser DOM, what you'll actually see when inspecting the page source is not the original HTML, but a modified one after applying some browser clean up and executing Javascript code. Firefox, in particular, is known for adding ``

    `` elements to tables. Scrapy, on the other hand, does not modify the original page HTML, so you won't be able to extract any data if you use ```` elements in your XPath expressions unless you really know what you're doing .. _topics-firefox-addons: Useful Firefox add-ons for scraping =================================== Firebug ------- `Firebug`_ is a widely known tool among web developers and it's also very useful for scraping. In particular, its `Inspect Element`_ feature comes very handy when you need to construct the XPaths for extracting data because it allows you to view the HTML code of each page element while moving your mouse over it. See :ref:`topics-firebug` for a detailed guide on how to use Firebug with Scrapy. XPather ------- `XPather`_ allows you to test XPath expressions directly on the pages. XPath Checker ------------- `XPath Checker`_ is another Firefox add-on for testing XPaths on your pages. Tamper Data ----------- `Tamper Data`_ is a Firefox add-on which allows you to view and modify the HTTP request headers sent by Firefox. Firebug also allows to view HTTP headers, but not to modify them. Firecookie ---------- `Firecookie`_ makes it easier to view and manage cookies. You can use this extension to create a new cookie, delete existing cookies, see a list of cookies for the current site, manage cookies permissions and a lot more. .. _Firebug: http://getfirebug.com .. _Inspect Element: http://www.youtube.com/watch?v=-pT_pDe54aA .. _XPather: https://addons.mozilla.org/firefox/addon/1192 .. _XPath Checker: https://addons.mozilla.org/firefox/addon/1095 .. _Tamper Data: http://addons.mozilla.org/firefox/addon/966 .. _Firecookie: https://addons.mozilla.org/firefox/addon/6683 Scrapy-0.14.4/docs/topics/email.rst0000600000016101777760000001065511754531743017224 0ustar buildbotnogroup.. _topics-email: ============== Sending e-mail ============== .. module:: scrapy.mail :synopsis: Email sending facility Although Python makes sending e-mails relatively easy via the `smtplib`_ library, Scrapy provides its own facility for sending e-mails which is very easy to use and it's implemented using `Twisted non-blocking IO`_, to avoid interfering with the non-blocking IO of the crawler. It also provides a simple API for sending attachments and it's very easy to configure, with a few :ref:`settings ` for downloading images attached to a particular item, for example, when you scrape products and also want to download their images locally. This pipeline, called the Images Pipeline and implemented in the :class:`ImagesPipeline` class, provides a convenient way for downloading and storing images locally with some additional features: * Convert all downloaded images to a common format (JPG) and mode (RGB) * Avoid re-downloading images which were downloaded recently * Thumbnail generation * Check images width/height to make sure they meet a minimum constraint This pipeline also keeps an internal queue of those images which are currently being scheduled for download, and connects those items that arrive containing the same image, to that queue. This avoids downloading the same image more than once when it's shared by several items. The `Python Imaging Library`_ is used for thumbnailing and normalizing images to JPEG/RGB format, so you need to install that library in order to use the images pipeline. .. _Python Imaging Library: http://www.pythonware.com/products/pil/ Using the Images Pipeline ========================= The typical workflow, when using the :class:`ImagesPipeline` goes like this: 1. In a Spider, you scrape an item and put the URLs of its images into a ``image_urls`` field. 2. The item is returned from the spider and goes to the item pipeline. 3. When the item reaches the :class:`ImagesPipeline`, the URLs in the ``image_urls`` field are scheduled for download using the standard Scrapy scheduler and downloader (which means the scheduler and downloader middlewares are reused), but with a higher priority, processing them before other pages are scraped. The item remains "locked" at that particular pipeline stage until the images have finish downloading (or fail for some reason). 4. When the images are downloaded another field (``images``) will be populated with the results. This field will contain a list of dicts with information about the images downloaded, such as the downloaded path, the original scraped url (taken from the ``image_urls`` field) , and the image checksum. The images in the list of the ``images`` field will retain the same order of the original ``image_urls`` field. If some image failed downloading, an error will be logged and the image won't be present in the ``images`` field. Usage example ============= In order to use the image pipeline you just need to :ref:`enable it ` and define an item with the ``image_urls`` and ``images`` fields:: from scrapy.item import Item class MyItem(Item): # ... other item fields ... image_urls = Field() images = Field() If you need something more complex and want to override the custom images pipeline behaviour, see :ref:`topics-images-override`. .. _topics-images-enabling: Enabling your Images Pipeline ============================= .. setting:: IMAGES_STORE To enable your images pipeline you must first add it to your project :setting:`ITEM_PIPELINES` setting:: ITEM_PIPELINES = ['scrapy.contrib.pipeline.images.ImagesPipeline'] And set the :setting:`IMAGES_STORE` setting to a valid directory that will be used for storing the downloaded images. Otherwise the pipeline will remain disabled, even if you include it in the :setting:`ITEM_PIPELINES` setting. For example:: IMAGES_STORE = '/path/to/valid/dir' Images Storage ============== File system is currently the only officially supported storage, but there is also (undocumented) support for `Amazon S3`_. .. _Amazon S3: https://s3.amazonaws.com/ File system storage ------------------- The images are stored in files (one per image), using a `SHA1 hash`_ of their URLs for the file names. For example, the following image URL:: http://www.example.com/image.jpg Whose `SHA1 hash` is:: 3afec3b4765f8f0a07b78f98c07b83f013567a0a Will be downloaded and stored in the following file:: /full/3afec3b4765f8f0a07b78f98c07b83f013567a0a.jpg Where: * ```` is the directory defined in :setting:`IMAGES_STORE` setting * ``full`` is a sub-directory to separate full images from thumbnails (if used). For more info see :ref:`topics-images-thumbnails`. Additional features =================== Image expiration ---------------- .. setting:: IMAGES_EXPIRES The Image Pipeline avoids downloading images that were downloaded recently. To adjust this retention delay use the :setting:`IMAGES_EXPIRES` setting, which specifies the delay in number of days:: # 90 days of delay for image expiration IMAGES_EXPIRES = 90 .. _topics-images-thumbnails: Thumbnail generation -------------------- The Images Pipeline can automatically create thumbnails of the downloaded images. .. setting:: IMAGES_THUMBS In order use this feature, you must set :setting:`IMAGES_THUMBS` to a dictionary where the keys are the thumbnail names and the values are their dimensions. For example:: IMAGES_THUMBS = { 'small': (50, 50), 'big': (270, 270), } When you use this feature, the Images Pipeline will create thumbnails of the each specified size with this format:: /thumbs//.jpg Where: * ```` is the one specified in the :setting:`IMAGES_THUMBS` dictionary keys (``small``, ``big``, etc) * ```` is the `SHA1 hash`_ of the image url .. _SHA1 hash: http://en.wikipedia.org/wiki/SHA_hash_functions Example of image files stored using ``small`` and ``big`` thumbnail names:: /full/63bbfea82b8880ed33cdb762aa11fab722a90a24.jpg /thumbs/small/63bbfea82b8880ed33cdb762aa11fab722a90a24.jpg /thumbs/big/63bbfea82b8880ed33cdb762aa11fab722a90a24.jpg The first one is the full image, as downloaded from the site. Filtering out small images -------------------------- .. setting:: IMAGES_MIN_HEIGHT .. setting:: IMAGES_MIN_WIDTH You can drop images which are too small, by specifying the minimum allowed size in the :setting:`IMAGES_MIN_HEIGHT` and :setting:`IMAGES_MIN_WIDTH` settings. For example:: IMAGES_MIN_HEIGHT = 110 IMAGES_MIN_WIDTH = 110 Note: these size constraints don't affect thumbnail generation at all. By default, there are no size constraints, so all images are processed. .. _topics-images-override: Implementing your custom Images Pipeline ======================================== .. module:: scrapy.contrib.pipeline.images :synopsis: Images Pipeline Here are the methods that you should override in your custom Images Pipeline: .. class:: ImagesPipeline .. method:: get_media_requests(item, info) As seen on the workflow, the pipeline will get the URLs of the images to download from the item. In order to do this, you must override the :meth:`~get_media_requests` method and return a Request for each image URL:: def get_media_requests(self, item, info): for image_url in item['image_urls']: yield Request(image_url) Those requests will be processed by the pipeline and, when they have finished downloading, the results will be sent to the :meth:`~item_completed` method, as a list of 2-element tuples. Each tuple will contain ``(success, image_info_or_failure)`` where: * ``success`` is a boolean which is ``True`` if the image was downloaded successfully or ``False`` if it failed for some reason * ``image_info_or_error`` is a dict containing the following keys (if success is ``True``) or a `Twisted Failure`_ if there was a problem. * ``url`` - the url where the image was downloaded from. This is the url of the request returned from the :meth:`~get_media_requests` method. * ``path`` - the path (relative to :setting:`IMAGES_STORE`) where the image was stored * ``checksum`` - a `MD5 hash`_ of the image contents .. _Twisted Failure: http://twistedmatrix.com/documents/8.2.0/api/twisted.python.failure.Failure.html .. _MD5 hash: http://en.wikipedia.org/wiki/MD5 The list of tuples received by :meth:`~item_completed` is guaranteed to retain the same order of the requests returned from the :meth:`~get_media_requests` method. Here's a typical value of the ``results`` argument:: [(True, {'checksum': '2b00042f7481c7b056c4b410d28f33cf', 'path': 'full/7d97e98f8af710c7e7fe703abc8f639e0ee507c4.jpg', 'url': 'http://www.example.com/images/product1.jpg'}), (True, {'checksum': 'b9628c4ab9b595f72f280b90c4fd093d', 'path': 'full/1ca5879492b8fd606df1964ea3c1e2f4520f076f.jpg', 'url': 'http://www.example.com/images/product2.jpg'}), (False, Failure(...))] By default the :meth:`get_media_requests` method returns ``None`` which means there are no images to download for the item. .. method:: item_completed(results, items, info) The :meth:`ImagesPipeline.item_completed` method called when all image requests for a single item have completed (either finshed downloading, or failed for some reason). The :meth:`~item_completed` method must return the output that will be sent to subsequent item pipeline stages, so you must return (or drop) the item, as you would in any pipeline. Here is an example of the :meth:`~item_completed` method where we store the downloaded image paths (passed in results) in the ``image_paths`` item field, and we drop the item if it doesn't contain any images:: from scrapy.exceptions import DropItem def item_completed(self, results, item, info): image_paths = [x['path'] for ok, x in results if ok] if not image_paths: raise DropItem("Item contains no images") item['image_paths'] = image_paths return item By default, the :meth:`item_completed` method returns the item. Custom Images pipeline example ============================== Here is a full example of the Images Pipeline whose methods are examplified above:: from scrapy.contrib.pipeline.images import ImagesPipeline from scrapy.exceptions import DropItem from scrapy.http import Request class MyImagesPipeline(ImagesPipeline): def get_media_requests(self, item, info): for image_url in item['image_urls']: yield Request(image_url) def item_completed(self, results, item, info): image_paths = [x['path'] for ok, x in results if ok] if not image_paths: raise DropItem("Item contains no images") item['image_paths'] = image_paths return item Scrapy-0.14.4/docs/index.rst0000600000016101777760000001225511754531743015741 0ustar buildbotnogroup.. _topics-index: ============================== Scrapy |version| documentation ============================== This documentation contains everything you need to know about Scrapy. Getting help ============ Having trouble? We'd like to help! * Try the :doc:`FAQ ` -- it's got answers to some common questions. * Looking for specific information? Try the :ref:`genindex` or :ref:`modindex`. * Search for information in the `archives of the scrapy-users mailing list`_, or `post a question`_. * Ask a question in the `#scrapy IRC channel`_. * Report bugs with Scrapy in our `issue tracker`_. .. _archives of the scrapy-users mailing list: http://groups.google.com/group/scrapy-users/ .. _post a question: http://groups.google.com/group/scrapy-users/ .. _#scrapy IRC channel: irc://irc.freenode.net/scrapy .. _issue tracker: https://github.com/scrapy/scrapy/issues First steps =========== .. toctree:: :hidden: intro/overview intro/install intro/tutorial intro/examples :doc:`intro/overview` Understand what Scrapy is and how it can help you. :doc:`intro/install` Get Scrapy installed on your computer. :doc:`intro/tutorial` Write your first Scrapy project. :doc:`intro/examples` Learn more by playing with a pre-made Scrapy project. .. _section-basics: Basic concepts ============== .. toctree:: :hidden: topics/commands topics/items topics/spiders topics/link-extractors topics/selectors topics/loaders topics/shell topics/item-pipeline topics/feed-exports topics/link-extractors :doc:`topics/commands` Learn about the command-line tool used to manage your Scrapy project. :doc:`topics/items` Define the data you want to scrape. :doc:`topics/spiders` Write the rules to crawl your websites. :doc:`topics/selectors` Extract the data from web pages. :doc:`topics/shell` Test your extraction code in an interactive environment. :doc:`topics/loaders` Populate your items with the extracted data. :doc:`topics/item-pipeline` Post-process and store your scraped data. :doc:`topics/feed-exports` Output your scraped data using different formats and storages. :doc:`topics/link-extractors` Convenient classes to extract links to follow from pages. Built-in services ================= .. toctree:: :hidden: topics/logging topics/stats topics/email topics/telnetconsole topics/webservice :doc:`topics/logging` Understand the simple logging facility provided by Scrapy. :doc:`topics/stats` Collect statistics about your scraping crawler. :doc:`topics/email` Send email notifications when certain events occur. :doc:`topics/telnetconsole` Inspect a running crawler using a built-in Python console. :doc:`topics/webservice` Monitor and control a crawler using a web service. Solving specific problems ========================= .. toctree:: :hidden: faq topics/firefox topics/firebug topics/leaks topics/images topics/ubuntu topics/scrapyd topics/jobs :doc:`faq` Get answers to most frequently asked questions. :doc:`topics/firefox` Learn how to scrape with Firefox and some useful add-ons. :doc:`topics/firebug` Learn how to scrape efficiently using Firebug. :doc:`topics/leaks` Learn how to find and get rid of memory leaks in your crawler. :doc:`topics/images` Download static images associated with your scraped items. :doc:`topics/ubuntu` Install latest Scrapy packages easily on Ubuntu :doc:`topics/scrapyd` Deploying your Scrapy project in production. :doc:`topics/jobs` Learn how to pause and resume crawls for large spiders. .. _extending-scrapy: Extending Scrapy ================ .. toctree:: :hidden: topics/architecture topics/downloader-middleware topics/spider-middleware topics/extensions :doc:`topics/architecture` Understand the Scrapy architecture. :doc:`topics/downloader-middleware` Customize how pages get requested and downloaded. :doc:`topics/spider-middleware` Customize the input and output of your spiders. :doc:`topics/extensions` Add any custom functionality using :doc:`signals ` and the Scrapy API Reference ========= .. toctree:: :hidden: topics/request-response topics/settings topics/signals topics/exceptions topics/exporters :doc:`topics/commands` Learn about the command-line tool and see all :ref:`available commands `. :doc:`topics/request-response` Understand the classes used to represent HTTP requests and responses. :doc:`topics/settings` Learn how to configure Scrapy and see all :ref:`available settings `. :doc:`topics/signals` See all available signals and how to work with them. :doc:`topics/exceptions` See all available exceptions and their meaning. :doc:`topics/exporters` Quickly export your scraped items to a file (XML, CSV, etc). All the rest ============ .. toctree:: :hidden: contributing versioning experimental/index :doc:`contributing` Learn how to contribute to the Scrapy project. :doc:`versioning` Understand Scrapy versioning and API stability. :doc:`experimental/index` Learn about bleeding-edge features. Scrapy-0.14.4/docs/faq.rst0000600000016101777760000002447311754531743015406 0ustar buildbotnogroup.. _faq: Frequently Asked Questions ========================== How does Scrapy compare to BeautifulSoup or lxml? ------------------------------------------------- `BeautifulSoup`_ and `lxml`_ are libraries for parsing HTML and XML. Scrapy is an application framework for writing web spiders that crawl web sites and extract data from them. Scrapy provides a built-in mechanism for extracting data (called :ref:`selectors `) but you can easily use `BeautifulSoup`_ (or `lxml`_) instead, if you feel more comfortable working with them. After all, they're just parsing libraries which can be imported and used from any Python code. In other words, comparing `BeautifulSoup`_ (or `lxml`_) to Scrapy is like comparing `jinja2`_ to `Django`_. .. _BeautifulSoup: http://www.crummy.com/software/BeautifulSoup/ .. _lxml: http://codespeak.net/lxml/ .. _jinja2: http://jinja.pocoo.org/2/ .. _Django: http://www.djangoproject.com .. _faq-python-versions: What Python versions does Scrapy support? ----------------------------------------- Scrapy runs in Python 2.5, 2.6 and 2.7. But it's recommended you use Python 2.6 or above, since the Python 2.5 standard library has a few bugs in their URL handling libraries. Some of these Python 2.5 bugs not only affect Scrapy but any user code, such as spiders. Does Scrapy work with Python 3.0? --------------------------------- No, and there are no plans to port Scrapy to Python 3.0 yet. At the moment, Scrapy works with Python 2.5, 2.6 and 2.7. .. seealso:: :ref:`faq-python-versions`. Did Scrapy "steal" X from Django? --------------------------------- Probably, but we don't like that word. We think Django_ is a great open source project and an example to follow, so we've used it as an inspiration for Scrapy. We believe that, if something is already done well, there's no need to reinvent it. This concept, besides being one of the foundations for open source and free software, not only applies to software but also to documentation, procedures, policies, etc. So, instead of going through each problem ourselves, we choose to copy ideas from those projects that have already solved them properly, and focus on the real problems we need to solve. We'd be proud if Scrapy serves as an inspiration for other projects. Feel free to steal from us! .. _Django: http://www.djangoproject.com Does Scrapy work with HTTP proxies? ----------------------------------- Yes. Support for HTTP proxies is provided (since Scrapy 0.8) through the HTTP Proxy downloader middleware. See :class:`~scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware`. Scrapy crashes with: ImportError: No module named win32api ---------------------------------------------------------- You need to install `pywin32`_ because of `this Twisted bug`_. .. _pywin32: http://sourceforge.net/projects/pywin32/ .. _this Twisted bug: http://twistedmatrix.com/trac/ticket/3707 How can I simulate a user login in my spider? --------------------------------------------- See :ref:`topics-request-response-ref-request-userlogin`. Does Scrapy crawl in breath-first or depth-first order? ------------------------------------------------------- By default, Scrapy uses a `LIFO`_ queue for storing pending requests, which basically means that it crawls in `DFO order`_. This order is more convenient in most cases. If you do want to crawl in true `BFO order`_, you can do it by setting the following settings:: DEPTH_PRIORITY = 1 SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleFifoDiskQueue' SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.FifoMemoryQueue' My Scrapy crawler has memory leaks. What can I do? -------------------------------------------------- See :ref:`topics-leaks`. Also, Python has a builtin memory leak issue which is described in :ref:`topics-leaks-without-leaks`. How can I make Scrapy consume less memory? ------------------------------------------ See previous question. Can I use Basic HTTP Authentication in my spiders? -------------------------------------------------- Yes, see :class:`~scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware`. Why does Scrapy download pages in English instead of my native language? ------------------------------------------------------------------------ Try changing the default `Accept-Language`_ request header by overriding the :setting:`DEFAULT_REQUEST_HEADERS` setting. .. _Accept-Language: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4 Where can I find some example Scrapy projects? ---------------------------------------------- See :ref:`intro-examples`. Can I run a spider without creating a project? ---------------------------------------------- Yes. You can use the :command:`runspider` command. For example, if you have a spider written in a ``my_spider.py`` file you can run it with:: scrapy runspider my_spider.py See :command:`runspider` command for more info. I get "Filtered offsite request" messages. How can I fix them? -------------------------------------------------------------- Those messages (logged with ``DEBUG`` level) don't necessarily mean there is a problem, so you may not need to fix them. Those message are thrown by the Offsite Spider Middleware, which is a spider middleware (enabled by default) whose purpose is to filter out requests to domains outside the ones covered by the spider. For more info see: :class:`~scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware`. What is the recommended way to deploy a Scrapy crawler in production? --------------------------------------------------------------------- See :ref:`topics-scrapyd`. Can I use JSON for large exports? --------------------------------- It'll depend on how large your output is. See :ref:`this warning ` in :class:`~scrapy.contrib.exporter.JsonItemExporter` documentation. Can I return (Twisted) deferreds from signal handlers? ------------------------------------------------------ Some signals support returning deferreds from their handlers, others don't. See the :ref:`topics-signals-ref` to know which ones. What does the response status code 999 means? --------------------------------------------- 999 is a custom reponse status code used by Yahoo sites to throttle requests. Try slowing down the crawling speed by using a download delay of ``2`` (or higher) in your spider:: class MySpider(CrawlSpider): name = 'myspider' DOWNLOAD_DELAY = 2 # [ ... rest of the spider code ... ] Or by setting a global download delay in your project with the :setting:`DOWNLOAD_DELAY` setting. Can I call ``pdb.set_trace()`` from my spiders to debug them? ------------------------------------------------------------- Yes, but you can also use the Scrapy shell which allows you too quickly analyze (and even modify) the response being processed by your spider, which is, quite often, more useful than plain old ``pdb.set_trace()``. For more info see :ref:`topics-shell-inspect-response`. Simplest way to dump all my scraped items into a JSON/CSV/XML file? ------------------------------------------------------------------- To dump into a JSON file:: scrapy crawl myspider -o items.json -t json To dump into a CSV file:: scrapy crawl myspider -o items.csv -t csv To dump into a XML file:: scrapy crawl myspider -o items.xml -t xml For more information see :ref:`topics-feed-exports` What's this huge cryptic ``__VIEWSTATE`` parameter used in some forms? ---------------------------------------------------------------------- The ``__VIEWSTATE`` parameter is used in sites built with ASP.NET/VB.NET. For more info on how it works see `this page`_. Also, here's an `example spider`_ which scrapes one of these sites. .. _this page: http://search.cpan.org/~ecarroll/HTML-TreeBuilderX-ASP_NET-0.09/lib/HTML/TreeBuilderX/ASP_NET.pm .. _example spider: http://github.com/AmbientLighter/rpn-fas/blob/master/fas/spiders/rnp.py What's the best way to parse big XML/CSV data feeds? ---------------------------------------------------- Parsing big feeds with XPath selectors can be problematic since they need to build the DOM of the entire feed in memory, and this can be quite slow and consume a lot of memory. In order to avoid parsing all the entire feed at once in memory, you can use the functions ``xmliter`` and ``csviter`` from ``scrapy.utils.iterators`` module. In fact, this is what the feed spiders (see :ref:`topics-spiders`) use under the cover. Does Scrapy manage cookies automatically? ----------------------------------------- Yes, Scrapy receives and keeps track of cookies sent by servers, and sends them back on subsequent requests, like any regular web browser does. For more info see :ref:`topics-request-response` and :ref:`cookies-mw`. How can I see the cookies being sent and received from Scrapy? -------------------------------------------------------------- Enable the :setting:`COOKIES_DEBUG` setting. How can I instruct a spider to stop itself? ------------------------------------------- Raise the :exc:`~scrapy.exceptions.CloseSpider` exception from a callback. For more info see: :exc:`~scrapy.exceptions.CloseSpider`. How can I prevent my Scrapy bot from getting banned? ---------------------------------------------------- Some websites implement certain measures to prevent bots from crawling them, with varying degrees of sophistication. Getting around those measures can be difficult and tricky, and may sometimes require special infrastructure. Here are some tips to keep in mind when dealing with these kind of sites: * rotate your user agent from a pool of well-known ones from browsers (google around to get a list of them) * disable cookies (see :setting:`COOKIES_ENABLED`) as some sites may use cookies to spot bot behaviour * use download delays (2 or higher). See :setting:`DOWNLOAD_DELAY` setting. * is possible, use `Google cache`_ to fetch pages, instead of hitting the sites directly * use a pool of rotating IPs. For example, the free `Tor project`_. If you are still unable to prevent your bot getting banned, consider contacting `commercial support`_. .. _user agents: http://en.wikipedia.org/wiki/User_agent .. _Google cache: http://www.googleguide.com/cached_pages.html .. _Tor project: https://www.torproject.org/ .. _commercial support: http://scrapy.org/support/ .. _LIFO: http://en.wikipedia.org/wiki/LIFO .. _DFO order: http://en.wikipedia.org/wiki/Depth-first_search .. _BFO order: http://en.wikipedia.org/wiki/Breadth-first_search Scrapy-0.14.4/docs/conf.py0000600000016101777760000001373511754531743015403 0ustar buildbotnogroup# -*- coding: utf-8 -*- # # Scrapy documentation build configuration file, created by # sphinx-quickstart on Mon Nov 24 12:02:52 2008. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). # # All configuration values have a default; values that are commented out # serve to show the default. import sys from os import path # If your extensions are in another directory, add it here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. sys.path.append(path.join(path.dirname(__file__), "_ext")) sys.path.append(path.join(path.dirname(path.dirname(__file__)), "scrapy")) # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['scrapydocs'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Scrapy' copyright = u'2008-2011, Insophia' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. try: import scrapy version = '.'.join(map(str, scrapy.version_info[:2])) release = scrapy.__version__ except ImportError: version = '' release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. language = 'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['.build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Options for HTML output # ----------------------- # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'scrapydoc.css' # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, the reST sources are included in the HTML build as _sources/. html_copy_source = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'Scrapydoc' # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). latex_documents = [ ('index', 'Scrapy.tex', ur'Scrapy Documentation', ur'Insophia', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True Scrapy-0.14.4/docs/experimental/0000700000016101777760000000000011754532077016567 5ustar buildbotnogroupScrapy-0.14.4/docs/experimental/djangoitems.rst0000600000016101777760000000460511754531743021633 0ustar buildbotnogroup.. _topics-djangoitem: .. module:: scrapy.contrib_exp.djangoitem ========== DjangoItem ========== :class:`DjangoItem` is a class of item that gets its fields definition from a Django model, you simply create a :class:`DjangoItem` and specify what Django model it relates to. Besides of getting the model fields defined on your item, :class:`DjangoItem` provides a method to create and populate a Django model instance with the item data. Using DjangoItem ================ :class:`DjangoItem` works much like ModelForms in Django, you create a subclass and define its ``django_model`` atribute to ve a valid Django model. With this you will get an item with a field for each Django model field. In addition, you can define fields that aren't present in the model and even override fields that are present in the model defining them in the item. Let's see some examples: Django model for the examples:: class Person(models.Model): name = models.CharField(max_length=255) age = models.IntegerField() Defining a basic :class:`DjangoItem`:: class PersonItem(DjangoItem): django_model = Person :class:`DjangoItem` work just like :class:`~scrapy.item.Item`:: p = PersonItem() p['name'] = 'John' p['age'] = '22' To obtain the Django model from the item, we call the extra method :meth:`~DjangoItem.save` of the :class:`DjangoItem`:: >>> person = p.save() >>> person.name 'John' >>> person.age '22' >>> person.id 1 As you see the model is already saved when we call :meth:`~DjangoItem.save`, we can prevent this by calling it with ``commit=False``. We can use ``commit=False`` in :meth:`~DjangoItem.save` method to obtain an unsaved model:: >>> person = p.save(commit=False) >>> person.name 'John' >>> person.age '22' >>> person.id None As said before, we can add other fields to the item:: class PersonItem(DjangoItem): django_model = Person sex = Field() p = PersonItem() p['name'] = 'John' p['age'] = '22' p['sex'] = 'M' .. note:: fields added to the item won't be taken into account when doing a :meth:`~DjangoItem.save` And we can override the fields of the model with your own:: class PersonItem(DjangoItem): django_model = Person name = Field(default='No Name') This is usefull to provide properties to the field, like a default or any other property that your project uses. Scrapy-0.14.4/docs/experimental/index.rst0000600000016101777760000000120711754531743020431 0ustar buildbotnogroup.. _experimental: Experimental features ===================== This section documents experimental Scrapy features that may become stable in future releases, but whose API is not yet stable. Use them with caution, and subscribe to the `mailing lists `_ to get notified of any changes. Since it's not revised so frequently, this section may contain documentation which is outdated, incomplete or overlapping with stable documentation (until it's properly merged) . Use at your own risk. .. warning:: This documentation is a work in progress. Use at your own risk. .. toctree:: :maxdepth: 1 djangoitems Scrapy-0.14.4/docs/versioning.rst0000600000016101777760000000262311754531743017013 0ustar buildbotnogroup.. _versioning: ============================ Versioning and API Stability ============================ Versioning ========== Scrapy uses the `odd-numbered versions for development releases`_. There are 3 numbers in a Scrapy version: *A.B.C* * *A* is the major version. This will rarely change and will signify very large changes. So far, only zero is available for *A* as Scrapy hasn't yet reached 1.0. * *B* is the release number. This will include many changes including features and things that possibly break backwards compatibility. Even Bs will be stable branches, and odd Bs will be development. * *C* is the bugfix release number. For example: * *0.14.1* is the first bugfix release of the *0.14* series (safe to use in production) API Stability ============= API stability is one of Scrapy major goals for the *1.0* release, which doesn't have a due date scheduled yet. Methods or functions that start with a single dash (``_``) are private and should never be relied as stable. Besides those, the plan is to stabilize and document the entire API, as we approach the 1.0 release. Also, keep in mind that stable doesn't mean complete: stable APIs could grow new methods or functionality but the existing methods should keep working the same way. .. _odd-numbered versions for development releases: http://en.wikipedia.org/wiki/Software_versioning#Odd-numbered_versions_for_development_releases Scrapy-0.14.4/docs/contributing.rst0000600000016101777760000001347011754531743017341 0ustar buildbotnogroup.. _topics-contributing: ====================== Contributing to Scrapy ====================== There are many ways to contribute to Scrapy. Here are some of them: * Blog about Scrapy. Tell the world how you're using Scrapy. This will help newcomers with more examples and the Scrapy project to increase its visibility. * Report bugs and request features in the `issue tracker`_, trying to follow the guidelines detailed in `Reporting bugs`_ below. * Submit patches for new functionality and/or bug fixes. Please read `Writing patches`_ and `Submitting patches`_ below for details on how to write and submit a patch. * Join the `scrapy-developers`_ mailing list and share your ideas on how to improve Scrapy. We're always open to suggestions. Reporting bugs ============== Well-written bug reports are very helpful, so keep in mind the following guidelines when reporting a new bug. * check the :ref:`FAQ ` first to see if your issue is addressed in a well-known question * check the `open issues`_ to see if it has already been reported. If it has, don't dismiss the report but check the ticket history and comments, you may find additional useful information to contribute. * search the `scrapy-users`_ list to see if it has been discussed there, or if you're not sure if what you're seeing is a bug. You can also ask in the `#scrapy` IRC channel. * write complete, reproducible, specific bug reports. The smaller the test case, the better. Remember that other developers won't have your project to reproduce the bug, so please include all relevant files required to reproduce it. * include the output of ``scrapy version -v`` so developers working on your bug know exactly which version and platform it occurred on, which is often very helpful for reproducing it, or knowing if it was already fixed. Writing patches =============== The better written a patch is, the higher chance that it'll get accepted and the sooner that will be merged. Well-written patches should: * contain the minimum amount of code required for the specific change. Small patches are easier to review and merge. So, if you're doing more than one change (or bug fix), please consider submitting one patch per change. Do not collapse multiple changes into a single patch. For big changes consider using a patch queue. * pass all unit-tests. See `Running tests`_ below. * include one (or more) test cases that check the bug fixed or the new functionality added. See `Writing tests`_ below. * if you're adding or changing a public (documented) API, please include the documentation changes in the same patch. See `Documentation policies`_ below. Submitting patches ================== The best way to submit a patch is to issue a `pull request`_ on Github, optionally creating a new issue first. Alternatively, we also accept the patches in the traditional way of sending them to the `scrapy-developers`_ list. Regardless of which mechanism you use, remember to explain what was fixed or the new functionality (what it is, why it's needed, etc). The more info you include, the easier will be for core developers to understand and accept your patch. You can also discuss the new functionality (or bug fix) in `scrapy-developers`_ first, before creating the patch, but it's always good to have a patch ready to illustrate your arguments and show that you have put some additional thought into the subject. Coding style ============ Please follow these coding conventions when writing code for inclusion in Scrapy: * Unless otherwise specified, follow :pep:`8`. * It's OK to use lines longer than 80 chars if it improves the code readability. * Don't put your name in the code you contribute. Our policy is to keep the contributor's name in the `AUTHORS`_ file distributed with Scrapy. Documentation policies ====================== * **Don't** use docstrings for documenting classes, or methods which are already documented in the official (sphinx) documentation. For example, the :meth:`ItemLoader.add_value` method should be documented in the sphinx documentation, not its docstring. * **Do** use docstrings for documenting functions not present in the official (sphinx) documentation, such as functions from ``scrapy.utils`` package and its sub-modules. Tests ===== Tests are implemented using the `Twisted unit-testing framework`_ called ``trial``. Running tests ------------- To run all tests go to the root directory of Scrapy source code and run: ``bin/runtests.sh`` (on unix) ``bin\runtests.bat`` (on windows) To run a specific test (say ``scrapy.tests.test_contrib_loader``) use: ``bin/runtests.sh scrapy.tests.test_contrib_loader`` (on unix) ``bin\runtests.bat scrapy.tests.test_contrib_loader`` (on windows) Writing tests ------------- All functionality (including new features and bug fixes) must include a test case to check that it works as expected, so please include tests for your patches if you want them to get accepted sooner. Scrapy uses unit-tests, which are located in the ``scrapy.tests`` package (`scrapy/tests`_ directory). Their module name typically resembles the full path of the module they're testing. For example, the item loaders code is in:: scrapy.contrib.loader And their unit-tests are in:: scrapy.tests.test_contrib_loader .. _issue tracker: https://github.com/scrapy/scrapy/issues .. _scrapy-users: http://groups.google.com/group/scrapy-users .. _scrapy-developers: http://groups.google.com/group/scrapy-developers .. _Twisted unit-testing framework: http://twistedmatrix.com/documents/current/core/development/policy/test-standard.html .. _AUTHORS: https://github.com/scrapy/scrapy/blob/master/AUTHORS .. _scrapy/tests: https://github.com/scrapy/scrapy/tree/master/scrapy/tests .. _open issues: https://github.com/scrapy/scrapy/issues .. _pull request: http://help.github.com/send-pull-requests/ Scrapy-0.14.4/docs/intro/0000700000016101777760000000000011754532077015225 5ustar buildbotnogroupScrapy-0.14.4/docs/intro/overview.rst0000600000016101777760000002143011754531743017626 0ustar buildbotnogroup.. _intro-overview: ================== Scrapy at a glance ================== Scrapy is an application framework for crawling web sites and extracting structured data which can be used for a wide range of useful applications, like data mining, information processing or historical archival. Even though Scrapy was originally designed for `screen scraping`_ (more precisely, `web scraping`_), it can also be used to extract data using APIs (such as `Amazon Associates Web Services`_) or as a general purpose web crawler. The purpose of this document is to introduce you to the concepts behind Scrapy so you can get an idea of how it works and decide if Scrapy is what you need. When you're ready to start a project, you can :ref:`start with the tutorial `. Pick a website ============== So you need to extract some information from a website, but the website doesn't provide any API or mechanism to access that info programmatically. Scrapy can help you extract that information. Let's say we want to extract the URL, name, description and size of all torrent files added today in the `Mininova`_ site. The list of all torrents added today can be found on this page: http://www.mininova.org/today .. _intro-overview-item: Define the data you want to scrape ================================== The first thing is to define the data we want to scrape. In Scrapy, this is done through :ref:`Scrapy Items ` (Torrent files, in this case). This would be our Item:: from scrapy.item import Item, Field class Torrent(Item): url = Field() name = Field() description = Field() size = Field() Write a Spider to extract the data ================================== The next thing is to write a Spider which defines the start URL (http://www.mininova.org/today), the rules for following links and the rules for extracting the data from pages. If we take a look at that page content we'll see that all torrent URLs are like http://www.mininova.org/tor/NUMBER where ``NUMBER`` is an integer. We'll use that to construct the regular expression for the links to follow: ``/tor/\d+``. We'll use `XPath`_ for selecting the data to extract from the web page HTML source. Let's take one of those torrent pages: http://www.mininova.org/tor/2657665 And look at the page HTML source to construct the XPath to select the data we want which is: torrent name, description and size. .. highlight:: html By looking at the page HTML source we can see that the file name is contained inside a ``

    `` tag::

    Home[2009][Eng]XviD-ovd

    .. highlight:: none An XPath expression to extract the name could be:: //h1/text() .. highlight:: html And the description is contained inside a ``
    `` tag with ``id="description"``::

    Description:

    "HOME" - a documentary film by Yann Arthus-Bertrand

    ***

    "We are living in exceptional times. Scientists tell us that we have 10 years to change the way we live, avert the depletion of natural resources and the catastrophic evolution of the Earth's climate. ... .. highlight:: none An XPath expression to select the description could be:: //div[@id='description'] .. highlight:: html Finally, the file size is contained in the second ``

    `` tag inside the ``

    `` tag with ``id=specifications``::

    Category: Movies > Documentary

    Total size: 699.79 megabyte

    .. highlight:: none An XPath expression to select the description could be:: //div[@id='specifications']/p[2]/text()[2] .. highlight:: python For more information about XPath see the `XPath reference`_. Finally, here's the spider code:: class MininovaSpider(CrawlSpider): name = 'mininova.org' allowed_domains = ['mininova.org'] start_urls = ['http://www.mininova.org/today'] rules = [Rule(SgmlLinkExtractor(allow=['/tor/\d+']), 'parse_torrent')] def parse_torrent(self, response): x = HtmlXPathSelector(response) torrent = TorrentItem() torrent['url'] = response.url torrent['name'] = x.select("//h1/text()").extract() torrent['description'] = x.select("//div[@id='description']").extract() torrent['size'] = x.select("//div[@id='info-left']/p[2]/text()[2]").extract() return torrent For brevity's sake, we intentionally left out the import statements. The Torrent item is :ref:`defined above `. Run the spider to extract the data ================================== Finally, we'll run the spider to crawl the site an output file ``scraped_data.json`` with the scraped data in JSON format:: scrapy crawl mininova.org -o scraped_data.json -t json This uses :ref:`feed exports ` to generate the JSON file. You can easily change the export format (XML or CSV, for example) or the storage backend (FTP or `Amazon S3`_, for example). You can also write an :ref:`item pipeline ` to store the items in a database very easily. Review scraped data =================== If you check the ``scraped_data.json`` file after the process finishes, you'll see the scraped items there:: [{"url": "http://www.mininova.org/tor/2657665", "name": ["Home[2009][Eng]XviD-ovd"], "description": ["HOME - a documentary film by ..."], "size": ["699.69 megabyte"]}, # ... other items ... ] You'll notice that all field values (except for the ``url`` which was assigned directly) are actually lists. This is because the :ref:`selectors ` return lists. You may want to store single values, or perform some additional parsing/cleansing to the values. That's what :ref:`Item Loaders ` are for. What else? ========== You've seen how to extract and store items from a website using Scrapy, but this is just the surface. Scrapy provides a lot of powerful features for making scraping easy and efficient, such as: * Built-in support for :ref:`selecting and extracting ` data from HTML and XML sources * Built-in support for cleaning and sanitizing the scraped data using a collection of reusable filters (called :ref:`Item Loaders `) shared between all the spiders. * Built-in support for :ref:`generating feed exports ` in multiple formats (JSON, CSV, XML) and storing them in multiple backends (FTP, S3, local filesystem) * A media pipeline for :ref:`automatically downloading images ` (or any other media) associated with the scraped items * Support for :ref:`extending Scrapy ` by plugging your own functionality using :ref:`signals ` and a well-defined API (middlewares, :ref:`extensions `, and :ref:`pipelines `). * Wide range of built-in middlewares and extensions for: * cookies and session handling * HTTP compression * HTTP authentication * HTTP cache * user-agent spoofing * robots.txt * crawl depth restriction * and more * Robust encoding support and auto-detection, for dealing with foreign, non-standard and broken encoding declarations. * Extensible :ref:`stats collection ` for multiple spider metrics, useful for monitoring the performance of your spiders and detecting when they get broken * An :ref:`Interactive shell console ` for trying XPaths, very useful for writing and debugging your spiders * A :ref:`System service ` designed to ease the deployment and run of your spiders in production. * A built-in :ref:`Web service ` for monitoring and controlling your bot * A :ref:`Telnet console ` for hooking into a Python console running inside your Scrapy process, to introspect and debug your crawler * :ref:`Logging ` facility that you can hook on to for catching errors during the scraping process. * Support for crawling based on URLs discovered through `Sitemaps`_ * A caching DNS resolver What's next? ============ The next obvious steps are for you to `download Scrapy`_, read :ref:`the tutorial ` and join `the community`_. Thanks for your interest! .. _download Scrapy: http://scrapy.org/download/ .. _the community: http://scrapy.org/community/ .. _screen scraping: http://en.wikipedia.org/wiki/Screen_scraping .. _web scraping: http://en.wikipedia.org/wiki/Web_scraping .. _Amazon Associates Web Services: http://aws.amazon.com/associates/ .. _Mininova: http://www.mininova.org .. _XPath: http://www.w3.org/TR/xpath .. _XPath reference: http://www.w3.org/TR/xpath .. _Amazon S3: http://aws.amazon.com/s3/ .. _Sitemaps: http://www.sitemaps.org Scrapy-0.14.4/docs/intro/tutorial.rst0000600000016101777760000004231111754531743017624 0ustar buildbotnogroup.. _intro-tutorial: =============== Scrapy Tutorial =============== In this tutorial, we'll assume that Scrapy is already installed on your system. If that's not the case, see :ref:`intro-install`. We are going to use `Open directory project (dmoz) `_ as our example domain to scrape. This tutorial will walk you through these tasks: 1. Creating a new Scrapy project 2. Defining the Items you will extract 3. Writing a :ref:`spider ` to crawl a site and extract :ref:`Items ` 4. Writing an :ref:`Item Pipeline ` to store the extracted Items Scrapy is written in Python_. If you're new to the language you might want to start by getting an idea of what the language is like, to get the most out of Scrapy. If you're already familiar with other languages, and want to learn Python quickly, we recommend `Dive Into Python`_. If you're new to programming and want to start with Python, take a look at `this list of Python resources for non-programmers`_. .. _Python: http://www.python.org .. _this list of Python resources for non-programmers: http://wiki.python.org/moin/BeginnersGuide/NonProgrammers .. _Dive Into Python: http://www.diveintopython.org Creating a project ================== Before you start scraping, you will have set up a new Scrapy project. Enter a directory where you'd like to store your code and then run:: scrapy startproject tutorial This will create a ``tutorial`` directory with the following contents:: tutorial/ scrapy.cfg tutorial/ __init__.py items.py pipelines.py settings.py spiders/ __init__.py ... These are basically: * ``scrapy.cfg``: the project configuration file * ``tutorial/``: the project's python module, you'll later import your code from here. * ``tutorial/items.py``: the project's items file. * ``tutorial/pipelines.py``: the project's pipelines file. * ``tutorial/settings.py``: the project's settings file. * ``tutorial/spiders/``: a directory where you'll later put your spiders. Defining our Item ================= `Items` are containers that will be loaded with the scraped data; they work like simple python dicts but provide additional protecting against populating undeclared fields, to prevent typos. They are declared by creating an :class:`scrapy.item.Item` class an defining its attributes as :class:`scrapy.item.Field` objects, like you will in an ORM (don't worry if you're not familiar with ORMs, you will see that this is an easy task). We begin by modeling the item that we will use to hold the sites data obtained from dmoz.org, as we want to capture the name, url and description of the sites, we define fields for each of these three attributes. To do that, we edit items.py, found in the ``tutorial`` directory. Our Item class looks like this:: from scrapy.item import Item, Field class DmozItem(Item): title = Field() link = Field() desc = Field() This may seem complicated at first, but defining the item allows you to use other handy components of Scrapy that need to know how your item looks like. Our first Spider ================ Spiders are user-written classes used to scrape information from a domain (or group of domains). They define an initial list of URLs to download, how to follow links, and how to parse the contents of those pages to extract :ref:`items `. To create a Spider, you must subclass :class:`scrapy.spider.BaseSpider`, and define the three main, mandatory, attributes: * :attr:`~scrapy.spider.BaseSpider.name`: identifies the Spider. It must be unique, that is, you can't set the same name for different Spiders. * :attr:`~scrapy.spider.BaseSpider.start_urls`: is a list of URLs where the Spider will begin to crawl from. So, the first pages downloaded will be those listed here. The subsequent URLs will be generated successively from data contained in the start URLs. * :meth:`~scrapy.spider.BaseSpider.parse` is a method of the spider, which will be called with the downloaded :class:`~scrapy.http.Response` object of each start URL. The response is passed to the method as the first and only argument. This method is responsible for parsing the response data and extracting scraped data (as scraped items) and more URLs to follow. The :meth:`~scrapy.spider.BaseSpider.parse` method is in charge of processing the response and returning scraped data (as :class:`~scrapy.item.Item` objects) and more URLs to follow (as :class:`~scrapy.http.Request` objects). This is the code for our first Spider; save it in a file named ``dmoz_spider.py`` under the ``dmoz/spiders`` directory:: from scrapy.spider import BaseSpider class DmozSpider(BaseSpider): name = "dmoz" allowed_domains = ["dmoz.org"] start_urls = [ "http://www.dmoz.org/Computers/Programming/Languages/Python/Books/", "http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/" ] def parse(self, response): filename = response.url.split("/")[-2] open(filename, 'wb').write(response.body) Crawling -------- To put our spider to work, go to the project's top level directory and run:: scrapy crawl dmoz The ``crawl dmoz`` command runs the spider for the ``dmoz.org`` domain. You will get an output similar to this:: 2008-08-20 03:51:13-0300 [scrapy] INFO: Started project: dmoz 2008-08-20 03:51:13-0300 [tutorial] INFO: Enabled extensions: ... 2008-08-20 03:51:13-0300 [tutorial] INFO: Enabled downloader middlewares: ... 2008-08-20 03:51:13-0300 [tutorial] INFO: Enabled spider middlewares: ... 2008-08-20 03:51:13-0300 [tutorial] INFO: Enabled item pipelines: ... 2008-08-20 03:51:14-0300 [dmoz] INFO: Spider opened 2008-08-20 03:51:14-0300 [dmoz] DEBUG: Crawled (referer: ) 2008-08-20 03:51:14-0300 [dmoz] DEBUG: Crawled (referer: ) 2008-08-20 03:51:14-0300 [dmoz] INFO: Spider closed (finished) Pay attention to the lines containing ``[dmoz]``, which corresponds to our spider. You can see a log line for each URL defined in ``start_urls``. Because these URLs are the starting ones, they have no referrers, which is shown at the end of the log line, where it says ``(referer: )``. But more interesting, as our ``parse`` method instructs, two files have been created: *Books* and *Resources*, with the content of both URLs. What just happened under the hood? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Scrapy creates :class:`scrapy.http.Request` objects for each URL in the ``start_urls`` attribute of the Spider, and assigns them the ``parse`` method of the spider as their callback function. These Requests are scheduled, then executed, and :class:`scrapy.http.Response` objects are returned and then fed back to the spider, through the :meth:`~scrapy.spider.BaseSpider.parse` method. Extracting Items ---------------- Introduction to Selectors ^^^^^^^^^^^^^^^^^^^^^^^^^ There are several ways to extract data from web pages. Scrapy uses a mechanism based on `XPath`_ expressions called :ref:`XPath selectors `. For more information about selectors and other extraction mechanisms see the :ref:`XPath selectors documentation `. .. _XPath: http://www.w3.org/TR/xpath Here are some examples of XPath expressions and their meanings: * ``/html/head/title``: selects the ```` element, inside the ``<head>`` element of a HTML document * ``/html/head/title/text()``: selects the text inside the aforementioned ``<title>`` element. * ``//td``: selects all the ``<td>`` elements * ``//div[@class="mine"]``: selects all ``div`` elements which contain an attribute ``class="mine"`` These are just a couple of simple examples of what you can do with XPath, but XPath expressions are indeed much more powerful. To learn more about XPath we recommend `this XPath tutorial <http://www.w3schools.com/XPath/default.asp>`_. For working with XPaths, Scrapy provides a :class:`~scrapy.selector.XPathSelector` class, which comes in two flavours, :class:`~scrapy.selector.HtmlXPathSelector` (for HTML data) and :class:`~scrapy.selector.XmlXPathSelector` (for XML data). In order to use them you must instantiate the desired class with a :class:`~scrapy.http.Response` object. You can see selectors as objects that represent nodes in the document structure. So, the first instantiated selectors are associated to the root node, or the entire document. Selectors have three methods (click on the method to see the complete API documentation). * :meth:`~scrapy.selector.XPathSelector.select`: returns a list of selectors, each of them representing the nodes selected by the xpath expression given as argument. * :meth:`~scrapy.selector.XPathSelector.extract`: returns a unicode string with the data selected by the XPath selector. * :meth:`~scrapy.selector.XPathSelector.re`: returns a list of unicode strings extracted by applying the regular expression given as argument. Trying Selectors in the Shell ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To illustrate the use of Selectors we're going to use the built-in :ref:`Scrapy shell <topics-shell>`, which also requires IPython (an extended Python console) installed on your system. To start a shell, you must go to the project's top level directory and run:: scrapy shell http://www.dmoz.org/Computers/Programming/Languages/Python/Books/ This is what the shell looks like:: [ ... Scrapy log here ... ] [s] Available Scrapy objects: [s] 2010-08-19 21:45:59-0300 [default] INFO: Spider closed (finished) [s] hxs <HtmlXPathSelector (http://www.dmoz.org/Computers/Programming/Languages/Python/Books/) xpath=None> [s] item Item() [s] request <GET http://www.dmoz.org/Computers/Programming/Languages/Python/Books/> [s] response <200 http://www.dmoz.org/Computers/Programming/Languages/Python/Books/> [s] spider <BaseSpider 'default' at 0x1b6c2d0> [s] xxs <XmlXPathSelector (http://www.dmoz.org/Computers/Programming/Languages/Python/Books/) xpath=None> [s] Useful shortcuts: [s] shelp() Print this help [s] fetch(req_or_url) Fetch a new request or URL and update shell objects [s] view(response) View response in a browser In [1]: After the shell loads, you will have the response fetched in a local ``response`` variable, so if you type ``response.body`` you will see the body of the response, or you can type ``response.headers`` to see its headers. The shell also instantiates two selectors, one for HTML (in the ``hxs`` variable) and one for XML (in the ``xxs`` variable) with this response. So let's try them:: In [1]: hxs.select('//title') Out[1]: [<HtmlXPathSelector (title) xpath=//title>] In [2]: hxs.select('//title').extract() Out[2]: [u'<title>Open Directory - Computers: Programming: Languages: Python: Books'] In [3]: hxs.select('//title/text()') Out[3]: [] In [4]: hxs.select('//title/text()').extract() Out[4]: [u'Open Directory - Computers: Programming: Languages: Python: Books'] In [5]: hxs.select('//title/text()').re('(\w+):') Out[5]: [u'Computers', u'Programming', u'Languages', u'Python'] Extracting the data ^^^^^^^^^^^^^^^^^^^ Now, let's try to extract some real information from those pages. You could type ``response.body`` in the console, and inspect the source code to figure out the XPaths you need to use. However, inspecting the raw HTML code there could become a very tedious task. To make this an easier task, you can use some Firefox extensions like Firebug. For more information see :ref:`topics-firebug` and :ref:`topics-firefox`. After inspecting the page source, you'll find that the web sites information is inside a ``
      `` element, in fact the *second* ``
        `` element. So we can select each ``
      • `` element belonging to the sites list with this code:: hxs.select('//ul/li') And from them, the sites descriptions:: hxs.select('//ul/li/text()').extract() The sites titles:: hxs.select('//ul/li/a/text()').extract() And the sites links:: hxs.select('//ul/li/a/@href').extract() As we said before, each ``select()`` call returns a list of selectors, so we can concatenate further ``select()`` calls to dig deeper into a node. We are going to use that property here, so:: sites = hxs.select('//ul/li') for site in sites: title = site.select('a/text()').extract() link = site.select('a/@href').extract() desc = site.select('text()').extract() print title, link, desc .. note:: For a more detailed description of using nested selectors, see :ref:`topics-selectors-nesting-selectors` and :ref:`topics-selectors-relative-xpaths` in the :ref:`topics-selectors` documentation Let's add this code to our spider:: from scrapy.spider import BaseSpider from scrapy.selector import HtmlXPathSelector class DmozSpider(BaseSpider): name = "dmoz" allowed_domains = ["dmoz.org"] start_urls = [ "http://www.dmoz.org/Computers/Programming/Languages/Python/Books/", "http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/" ] def parse(self, response): hxs = HtmlXPathSelector(response) sites = hxs.select('//ul/li') for site in sites: title = site.select('a/text()').extract() link = site.select('a/@href').extract() desc = site.select('text()').extract() print title, link, desc Now try crawling the dmoz.org domain again and you'll see sites being printed in your output, run:: scrapy crawl dmoz Using our item -------------- :class:`~scrapy.item.Item` objects are custom python dicts; you can access the values of their fields (attributes of the class we defined earlier) using the standard dict syntax like:: >>> item = DmozItem() >>> item['title'] = 'Example title' >>> item['title'] 'Example title' Spiders are expected to return their scraped data inside :class:`~scrapy.item.Item` objects. So, in order to returnthe data we've scraped so far, the final code for our Spider would be like this:: from scrapy.spider import BaseSpider from scrapy.selector import HtmlXPathSelector from tutorial.items import DmozItem class DmozSpider(BaseSpider): name = "dmoz" allowed_domains = ["dmoz.org"] start_urls = [ "http://www.dmoz.org/Computers/Programming/Languages/Python/Books/", "http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/" ] def parse(self, response): hxs = HtmlXPathSelector(response) sites = hxs.select('//ul/li') items = [] for site in sites: item = DmozItem() item['title'] = site.select('a/text()').extract() item['link'] = site.select('a/@href').extract() item['desc'] = site.select('text()').extract() items.append(item) return items .. note:: You can find a fully-functional variant of this spider in the dirbot_ project available at https://github.com/scrapy/dirbot Now doing a crawl on the dmoz.org domain yields ``DmozItem``'s:: [dmoz] DEBUG: Scraped from <200 http://www.dmoz.org/Computers/Programming/Languages/Python/Books/> {'desc': [u' - By David Mertz; Addison Wesley. Book in progress, full text, ASCII format. Asks for feedback. [author website, Gnosis Software, Inc.\n], 'link': [u'http://gnosis.cx/TPiP/'], 'title': [u'Text Processing in Python']} [dmoz] DEBUG: Scraped from <200 http://www.dmoz.org/Computers/Programming/Languages/Python/Books/> {'desc': [u' - By Sean McGrath; Prentice Hall PTR, 2000, ISBN 0130211192, has CD-ROM. Methods to build XML applications fast, Python tutorial, DOM and SAX, new Pyxie open source XML processing library. [Prentice Hall PTR]\n'], 'link': [u'http://www.informit.com/store/product.aspx?isbn=0130211192'], 'title': [u'XML Processing with Python']} Storing the scraped data ======================== The simplest way to store the scraped data is by using the :ref:`Feed exports `, with the following command:: scrapy crawl dmoz -o items.json -t json That will generate a ``items.json`` file containing all scraped items, serialized in `JSON`_. In small projects (like the one in this tutorial), that should be enough. However, if you want to perform more complex things with the scraped items, you can write an :ref:`Item Pipeline `. As with Items, a placeholder file for Item Pipelines has been set up for you when the project is created, in ``tutorial/pipelines.py``. Though you don't need to implement any item pipeline if you just want to store the scraped items. Next steps ========== This tutorial covers only the basics of Scrapy, but there's a lot of other features not mentioned here. We recommend you continue by playing with an example project (see :ref:`intro-examples`), and then continue with the section :ref:`section-basics`. .. _JSON: http://en.wikipedia.org/wiki/JSON .. _dirbot: https://github.com/scrapy/dirbot Scrapy-0.14.4/docs/intro/examples.rst0000600000016101777760000000162611754531743017603 0ustar buildbotnogroup.. _intro-examples: ======== Examples ======== The best way to learn is with examples, and Scrapy is no exception. For this reason, there is an example Scrapy project named dirbot_, that you can use to play and learn more about Scrapy. It contains the dmoz spider described in the tutorial. This dirbot_ project is available at: https://github.com/scrapy/dirbot It contains a README file with a detailed description of the project contents. If you're familiar with git, you can checkout the code. Otherwise you can download a tarball or zip file of the project by clicking on `Downloads`_. There is also a site for sharing code snippets such as spiders, middlewares, extensions, or scripts, called `Scrapy snippets`_. Feel free to share any code there. .. _dirbot: https://github.com/scrapy/dirbot .. _Downloads: https://github.com/scrapy/dirbot/archives/master .. _Scrapy snippets: http://snippets.scrapy.org/ Scrapy-0.14.4/docs/intro/install.rst0000600000016101777760000001237311754531743017434 0ustar buildbotnogroup.. _intro-install: ================== Installation guide ================== This document describes how to install Scrapy on Linux, Windows and Mac OS X. .. _intro-install-requirements: Requirements ============ * `Python`_ 2.5, 2.6, 2.7 (3.x is not yet supported) * `Twisted`_ 2.5.0, 8.0 or above (Windows users: you'll need to install `Zope.Interface`_ and maybe `pywin32`_ because of `this Twisted bug`_) * `w3lib`_ * `lxml`_ or `libxml2`_ (if using `libxml2`_, version 2.6.28 or above is highly recommended) * `simplejson`_ (not required if using Python 2.6 or above) * `pyopenssl `_ (for HTTPS support. Optional, but highly recommended) .. _intro-install-python: Install Python ============== First, you need to install Python, if you haven't done so already. Scrapy works with Python 2.5, 2.6 or 2.7, which you can get at http://www.python.org/download/ .. seealso:: :ref:`faq-python-versions` .. highlight:: sh .. _intro-install-scrapy: Install Scrapy ============== There are many ways to install Scrapy. Pick the one you feel more comfortable with. * :ref:`intro-install-release` (requires installing dependencies separately) * :ref:`intro-install-easy` (automatically installs dependencies) * :ref:`intro-install-pip` (automatically installs dependencies) .. _intro-install-release: Download and install an official release ---------------------------------------- Download Scrapy from the `Download page`_. Scrapy is distributed in two ways: a source code tarball (for Unix and Mac OS X systems) and a Windows installer (for Windows). If you downloaded the tarball, you can install it as any Python package using ``setup.py``:: tar zxf Scrapy-X.X.X.tar.gz cd Scrapy-X.X.X python setup.py install If you downloaded the Windows installer, just run it. .. warning:: In Windows, you may need to add the ``C:\Python25\Scripts`` (or ``C:\Python26\Scripts``) folder to the system path by adding that directory to the ``PATH`` environment variable from the `Control Panel`_. .. _Download page: http://scrapy.org/download/ .. _intro-install-easy: Installing with ``easy_install`` -------------------------------- You can install Scrapy using setuptools_'s ``easy_install`` with:: easy_install -U Scrapy .. _intro-install-pip: Installing with `pip`_ ---------------------- You can install Scrapy using `pip`_ with:: pip install Scrapy .. _intro-install-platforms: Platform specific instructions ============================== Linux ----- Ubuntu 9.10 or above ~~~~~~~~~~~~~~~~~~~~ If you're running Ubuntu 9.10 (or above), use the official :ref:`Ubuntu Packages `, which already solve all dependencies for you and are continuously updated with the latest bug fixes. Debian or Ubuntu (9.04 or older) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you're running Debian Linux, run the following command as root:: apt-get install python-twisted python-libxml2 python-pyopenssl python-simplejson Then:: easy_install -U w3lib And then follow the instructions in :ref:`intro-install-scrapy`. Arch Linux ~~~~~~~~~~ If you are running Arch Linux, run the following command as root:: pacman -S twisted libxml2 pyopenssl python-simplejson Then:: easy_install -U w3lib And then follow the instructions in :ref:`intro-install-scrapy`. Other Linux distros ~~~~~~~~~~~~~~~~~~~ The easiest way to install Scrapy in other Linux distros is through ``easy_install``, which will automatically install Twisted, w3lib and lxml as dependencies. See :ref:`intro-install-easy`. Another way would be to install dependencies, if you know the packages in your distros that meets them. See :ref:`intro-install-requirements`. Mac OS X -------- The easiest way to install Scrapy on Mac is through ``easy_install`` or ``pip``, which will automatically install Twisted, w3lib and lxml dependencies. See :ref:`intro-install-easy`. Windows ------- There are two ways to install Scrapy in Windows: * using ``easy_install`` or ``pip`` - see :ref:`intro-install-easy` or :ref:`intro-install-pip` * using the Windows installer, but you need to download and install the dependencies manually: 1. `Twisted for Windows `_ - you may need to install `pywin32`_ because of `this Twisted bug`_ 2. Install `Zope.Interface`_ (required by Twisted) 3. `libxml2 for Windows `_ 4. `PyOpenSSL for Windows `_ 5. Download the Windows installer from the `Downloads page`_ and install it. .. _Python: http://www.python.org .. _Twisted: http://twistedmatrix.com .. _w3lib: http://pypi.python.org/pypi/w3lib .. _lxml: http://codespeak.net/lxml/ .. _libxml2: http://xmlsoft.org .. _pywin32: http://sourceforge.net/projects/pywin32/ .. _simplejson: http://pypi.python.org/pypi/simplejson/ .. _Zope.Interface: http://pypi.python.org/pypi/zope.interface#download .. _this Twisted bug: http://twistedmatrix.com/trac/ticket/3707 .. _pip: http://pypi.python.org/pypi/pip .. _setuptools: http://pypi.python.org/pypi/setuptools .. _Control Panel: http://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/sysdm_advancd_environmnt_addchange_variable.mspx .. _Downloads page: http://scrapy.org/download/ Scrapy-0.14.4/docs/Makefile0000600000016101777760000000426111754531743015536 0ustar buildbotnogroup# # Makefile for Scrapy documentation [based on Python documentation Makefile] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # You can set these variables from the command line. PYTHON = python SPHINXOPTS = PAPER = SOURCES = ALLSPHINXOPTS = -b $(BUILDER) -d build/doctrees -D latex_paper_size=$(PAPER) \ $(SPHINXOPTS) . build/$(BUILDER) $(SOURCES) .PHONY: help update build html htmlhelp clean help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " text to make plain text files" @echo " changes to make an overview over all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" build: mkdir -p build/$(BUILDER) build/doctrees sphinx-build $(ALLSPHINXOPTS) @echo html: BUILDER = html html: build @echo "Build finished. The HTML pages are in build/html." htmlhelp: BUILDER = htmlhelp htmlhelp: build @echo "Build finished; now you can run HTML Help Workshop with the" \ "build/htmlhelp/pydoc.hhp project file." latex: BUILDER = latex latex: build @echo "Build finished; the LaTeX files are in build/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." text: BUILDER = text text: build @echo "Build finished; the text files are in build/text." changes: BUILDER = changes changes: build @echo "The overview file is in build/changes." linkcheck: BUILDER = linkcheck linkcheck: build @echo "Link check complete; look for any errors in the above output " \ "or in build/$(BUILDER)/output.txt" doctest: BUILDER = doctest doctest: build @echo "Testing of doctests in the sources finished, look at the " \ "results in build/doctest/output.txt" pydoc-topics: BUILDER = pydoc-topics pydoc-topics: build @echo "Building finished; now copy build/pydoc-topics/pydoc_topics.py " \ "into the Lib/ directory" htmlview: html $(PYTHON) -c "import webbrowser; webbrowser.open('build/html/index.html')" clean: -rm -rf build/* Scrapy-0.14.4/docs/_static/0000700000016101777760000000000011754532077015520 5ustar buildbotnogroupScrapy-0.14.4/docs/_static/scrapydoc.css0000600000016101777760000002272111754531743020226 0ustar buildbotnogroup/** * Sphinx Doc Design */ body { font-family: sans-serif; font-size: 100%; background-color: #3d1e11; color: #000; margin: 0; padding: 0; } /* :::: LAYOUT :::: */ div.document { background-color: #69341e; } div.documentwrapper { float: left; width: 100%; } div.bodywrapper { margin: 0 0 0 230px; } div.body { background-color: white; padding: 0 20px 30px 20px; } div.sphinxsidebarwrapper { padding: 10px 5px 0 10px; } div.sphinxsidebar { float: left; width: 230px; margin-left: -100%; font-size: 90%; } div.clearer { clear: both; } div.footer { color: #fff; width: 100%; padding: 9px 0 9px 0; text-align: center; font-size: 75%; } div.footer a { color: #fff; text-decoration: underline; } div.related { background-color: #5b1616; color: #fff; width: 100%; line-height: 30px; font-size: 90%; } div.related h3 { display: none; } div.related ul { margin: 0; padding: 0 0 0 10px; list-style: none; } div.related li { display: inline; } div.related li.right { float: right; margin-right: 5px; } div.related a { color: white; } /* ::: TOC :::: */ div.sphinxsidebar h3 { font-family: 'Trebuchet MS', sans-serif; color: white; font-size: 1.4em; font-weight: normal; margin: 0; padding: 0; } div.sphinxsidebar h3 a { color: white; } div.sphinxsidebar h4 { font-family: 'Trebuchet MS', sans-serif; color: white; font-size: 1.3em; font-weight: normal; margin: 5px 0 0 0; padding: 0; } div.sphinxsidebar p { color: white; } div.sphinxsidebar p.topless { margin: 5px 10px 10px 10px; } div.sphinxsidebar ul { margin: 10px; padding: 0; list-style: none; color: white; } div.sphinxsidebar ul ul, div.sphinxsidebar ul.want-points { margin-left: 20px; list-style: square; } div.sphinxsidebar ul ul { margin-top: 0; margin-bottom: 0; } div.sphinxsidebar a { color: #ffca9b; } div.sphinxsidebar form { margin-top: 10px; } div.sphinxsidebar input { border: 1px solid #ffca9b; font-family: sans-serif; font-size: 1em; } /* :::: MODULE CLOUD :::: */ div.modulecloud { margin: -5px 10px 5px 10px; padding: 10px; line-height: 160%; border: 1px solid #cbe7e5; background-color: #f2fbfd; } div.modulecloud a { padding: 0 5px 0 5px; } /* :::: SEARCH :::: */ ul.search { margin: 10px 0 0 20px; padding: 0; } ul.search li { padding: 5px 0 5px 20px; background-image: url(file.png); background-repeat: no-repeat; background-position: 0 7px; } ul.search li a { font-weight: bold; } ul.search li div.context { color: #888; margin: 2px 0 0 30px; text-align: left; } ul.keywordmatches li.goodmatch a { font-weight: bold; } /* :::: COMMON FORM STYLES :::: */ div.actions { padding: 5px 10px 5px 10px; border-top: 1px solid #cbe7e5; border-bottom: 1px solid #cbe7e5; background-color: #e0f6f4; } form dl { color: #333; } form dt { clear: both; float: left; min-width: 110px; margin-right: 10px; padding-top: 2px; } input#homepage { display: none; } div.error { margin: 5px 20px 0 0; padding: 5px; border: 1px solid #d00; font-weight: bold; } /* :::: INDEX PAGE :::: */ table.contentstable { width: 90%; } table.contentstable p.biglink { line-height: 150%; } a.biglink { font-size: 1.3em; } span.linkdescr { font-style: italic; padding-top: 5px; font-size: 90%; } /* :::: INDEX STYLES :::: */ table.indextable td { text-align: left; vertical-align: top; } table.indextable dl, table.indextable dd { margin-top: 0; margin-bottom: 0; } table.indextable tr.pcap { height: 10px; } table.indextable tr.cap { margin-top: 10px; background-color: #f2f2f2; } img.toggler { margin-right: 3px; margin-top: 3px; cursor: pointer; } form.pfform { margin: 10px 0 20px 0; } /* :::: GLOBAL STYLES :::: */ .docwarning { background-color: #ffe4e4; padding: 10px; margin: 0 -20px 0 -20px; border-bottom: 1px solid #f66; } p.subhead { font-weight: bold; margin-top: 20px; } a { color: #6e0909; text-decoration: none; } a:hover { text-decoration: underline; } div.body h1, div.body h2, div.body h3, div.body h4, div.body h5, div.body h6 { font-family: 'Trebuchet MS', sans-serif; background-color: #f2f2f2; font-weight: normal; color: #331F0A; border-bottom: 1px solid #ccc; margin: 20px -20px 10px -20px; padding: 3px 0 3px 10px; } div.body h1 { margin-top: 0; font-size: 200%; } div.body h2 { font-size: 160%; } div.body h3 { font-size: 140%; } div.body h4 { font-size: 120%; } div.body h5 { font-size: 110%; } div.body h6 { font-size: 100%; } a.headerlink { color: #c60f0f; font-size: 0.8em; padding: 0 4px 0 4px; text-decoration: none; visibility: hidden; } h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, h4:hover > a.headerlink, h5:hover > a.headerlink, h6:hover > a.headerlink, dt:hover > a.headerlink { visibility: visible; } a.headerlink:hover { background-color: #c60f0f; color: white; } div.body p, div.body dd, div.body li { text-align: justify; line-height: 130%; } div.body p.caption { text-align: inherit; } div.body td { text-align: left; } ul.fakelist { list-style: none; margin: 10px 0 10px 20px; padding: 0; } .field-list ul { padding-left: 1em; } .first { margin-top: 0 !important; } /* "Footnotes" heading */ p.rubric { margin-top: 30px; font-weight: bold; } /* Sidebars */ div.sidebar { margin: 0 0 0.5em 1em; border: 1px solid #ddb; padding: 7px 7px 0 7px; background-color: #ffe; width: 40%; float: right; } p.sidebar-title { font-weight: bold; } /* "Topics" */ div.topic { background-color: #eee; border: 1px solid #ccc; padding: 7px 7px 0 7px; margin: 10px 0 10px 0; } p.topic-title { font-size: 1.1em; font-weight: bold; margin-top: 10px; } /* Admonitions */ div.admonition { margin-top: 10px; margin-bottom: 10px; padding: 7px; } div.admonition dt { font-weight: bold; } div.admonition dl { margin-bottom: 0; } div.admonition p.admonition-title + p { display: inline; } div.seealso { background-color: #ffc; border: 1px solid #ff6; } div.warning { background-color: #ffe4e4; border: 1px solid #f66; } div.note { background-color: #eee; border: 1px solid #ccc; } p.admonition-title { margin: 0px 10px 5px 0px; font-weight: bold; display: inline; } p.admonition-title:after { content: ":"; } div.body p.centered { text-align: center; margin-top: 25px; } table.docutils { border: 0; } table.docutils td, table.docutils th { padding: 1px 8px 1px 0; border-top: 0; border-left: 0; border-right: 0; border-bottom: 1px solid #aaa; } table.field-list td, table.field-list th { border: 0 !important; } table.footnote td, table.footnote th { border: 0 !important; } .field-list ul { margin: 0; padding-left: 1em; } .field-list p { margin: 0; } dl { margin-bottom: 15px; clear: both; } dd p { margin-top: 0px; } dd ul, dd table { margin-bottom: 10px; } dd { margin-top: 3px; margin-bottom: 10px; margin-left: 30px; } .refcount { color: #060; } dt:target, .highlight { background-color: #fbe54e; } dl.glossary dt { font-weight: bold; font-size: 1.1em; } th { text-align: left; padding-right: 5px; } pre { padding: 5px; background-color: #efc; color: #333; border: 1px solid #ac9; border-left: none; border-right: none; overflow: auto; } td.linenos pre { padding: 5px 0px; border: 0; background-color: transparent; color: #aaa; } table.highlighttable { margin-left: 0.5em; } table.highlighttable td { padding: 0 0.5em 0 0.5em; } tt { background-color: #ecf0f3; padding: 0 1px 0 1px; font-size: 0.95em; } tt.descname { background-color: transparent; font-weight: bold; font-size: 1.2em; } tt.descclassname { background-color: transparent; } tt.xref, a tt { background-color: transparent; font-weight: bold; } .footnote:target { background-color: #ffa } h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { background-color: transparent; } .optional { font-size: 1.3em; } .versionmodified { font-style: italic; } form.comment { margin: 0; padding: 10px 30px 10px 30px; background-color: #eee; } form.comment h3 { background-color: #326591; color: white; margin: -10px -30px 10px -30px; padding: 5px; font-size: 1.4em; } form.comment input, form.comment textarea { border: 1px solid #ccc; padding: 2px; font-family: sans-serif; font-size: 100%; } form.comment input[type="text"] { width: 240px; } form.comment textarea { width: 100%; height: 200px; margin-bottom: 10px; } .system-message { background-color: #fda; padding: 5px; border: 3px solid red; } img.math { vertical-align: middle; } div.math p { text-align: center; } span.eqno { float: right; } img.logo { border: 0; } /* :::: PRINT :::: */ @media print { div.document, div.documentwrapper, div.bodywrapper { margin: 0; width : 100%; } div.sphinxsidebar, div.related, div.footer, div#comments div.new-comment-box, #top-link { display: none; } } Scrapy-0.14.4/docs/_static/selectors-sample1.html0000600000016101777760000000106511754531743021754 0ustar buildbotnogroup Example website Scrapy-0.14.4/docs/_ext/0000700000016101777760000000000011754532077015031 5ustar buildbotnogroupScrapy-0.14.4/docs/_ext/scrapydocs.py0000600000016101777760000000172511754531743017563 0ustar buildbotnogroupfrom docutils.parsers.rst.roles import set_classes from docutils import nodes def setup(app): app.add_crossref_type( directivename = "setting", rolename = "setting", indextemplate = "pair: %s; setting", ) app.add_crossref_type( directivename = "signal", rolename = "signal", indextemplate = "pair: %s; signal", ) app.add_crossref_type( directivename = "command", rolename = "command", indextemplate = "pair: %s; command", ) app.add_crossref_type( directivename = "reqmeta", rolename = "reqmeta", indextemplate = "pair: %s; reqmeta", ) app.add_role('source', source_role) def source_role(name, rawtext, text, lineno, inliner, options={}, content=[]): url = 'https://github.com/scrapy/scrapy/blob/master/' + text set_classes(options) node = nodes.reference(rawtext, text, refuri=ref, **options) return [node], [] Scrapy-0.14.4/docs/README0000600000016101777760000000224011754531743014751 0ustar buildbotnogroup====================================== Scrapy documentation quick start guide ====================================== This file provides a quick guide on how to compile the Scrapy documentation. Setup the environment --------------------- To compile the documentation you need the following Python libraries: * Sphinx * docutils * jinja If you have setuptools available the following command will install all of them (since Sphinx requires both docutils and jinja):: easy_install Sphinx Compile the documentation ------------------------- To compile the documentation (to classic HTML output) run the following command from this dir:: make html Documentation will be generated (in HTML format) inside the ``build/html`` dir. View the documentation ---------------------- To view the documentation run the following command:: make htmlview This command will fire up your default browser and open the main page of your (previously generated) HTML documentation. Start over ---------- To cleanup all generated documentation files and start from scratch run:: make clean Keep in mind that this command won't touch any documentation source files. Scrapy-0.14.4/PKG-INFO0000600000016101777760000000204211754532100014222 0ustar buildbotnogroupMetadata-Version: 1.0 Name: Scrapy Version: 0.14.4 Summary: A high-level Python Screen Scraping framework Home-page: http://scrapy.org Author: Pablo Hoffman Author-email: pablo@pablohoffman.com License: BSD Description: Scrapy is a high level scraping and web crawling framework for writing spiders to crawl and parse web pages for all kinds of purposes, from information retrieval to monitoring or testing web sites. Platform: UNKNOWN Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.5 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Environment :: Console Classifier: Topic :: Software Development :: Libraries :: Application Frameworks Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: Internet :: WWW/HTTP Scrapy-0.14.4/extras/0000700000016101777760000000000011754532077014450 5ustar buildbotnogroupScrapy-0.14.4/extras/scrapy.10000600000016101777760000000377611754531743016051 0ustar buildbotnogroup.TH SCRAPY 1 "October 17, 2009" .SH NAME scrapy \- the Scrapy command-line tool .SH SYNOPSIS .B scrapy [\fIcommand\fR] [\fIOPTIONS\fR] ... .SH DESCRIPTION .PP Scrapy is controlled through the \fBscrapy\fR command-line tool. The script provides several commands, for different purposes. Each command supports its own particular syntax. In other words, each command supports a different set of arguments and options. .SH OPTIONS .SS fetch\fR [\fIOPTION\fR] \fIURL\fR .TP Fetch a URL using the Scrapy downloader .TP .I --headers Print response HTTP headers instead of body .SS runspider\fR [\fIOPTION\fR] \fIspiderfile\fR Run a spider .TP .I --output=FILE Store scraped items to FILE in XML format .SS settings [\fIOPTION\fR] Query Scrapy settings .TP .I --get=SETTING Print raw setting value .TP .I --getbool=SETTING Print setting value, intepreted as a boolean .TP .I --getint=SETTING Print setting value, intepreted as an integer .TP .I --getfloat=SETTING Print setting value, intepreted as an float .TP .I --getlist=SETTING Print setting value, intepreted as an float .TP .I --init Print initial setting value (before loading extensions and spiders) .SS shell\fR \fIURL\fR | \fIfile\fR Launch the interactive scraping console .SS startproject\fR \fIprojectname\fR Create new project with an initial project template .SS --help, -h Print command help and options .SS --logfile=FILE Log file. if omitted stderr will be used .SS --loglevel=LEVEL, -L LEVEL Log level (default: None) .SS --nolog Disable logging completely .SS --spider=SPIDER Always use this spider when arguments are urls .SS --profile=FILE Write python cProfile stats to FILE .SS --lsprof=FILE Write lsprof profiling stats to FILE .SS --pidfile=FILE Write process ID to FILE .SS --set=NAME=VALUE, -s NAME=VALUE Set/override setting (may be repeated) .SH AUTHOR Scrapy was written by the Scrapy Developers . .PP This manual page was written by Ignace Mouzannar , for the Debian project (but may be used by others). Scrapy-0.14.4/extras/setup_wininst.bmp0000600000016101777760000011745611754531743020102 0ustar buildbotnogroupBM.Ÿ6(˜јš   b ] &G je(D'H lr (Jrn m t {*M "i| v w ƒ&`~Œ-P#r.K %m‡&m"‚0S%| "Š*j $ƒ+l4R3W*z%,u+{-w6[+ƒ$8T*‹*‰6a8]1s.~7a9]#2u.†;a;`!;fe$4…,@Z#6"?d2”(8{(Aa@m Ah @lCdBj#7’'Ci&Ed"DkDr&;Fn5Ha$Fn.?„+>Š.A,=5F}:F|7Eƒ3Mn/Mp2C–+Nt>Pj0Cž3G•9J“=L‹РџџџџџџџџџџџџџџџџџіїњњэЬИЇЃŸŸЇЖШщјљљџџџџџџџџџџћњјјјтЧИЊЇЈ­ЛЫяњјњџџњњяєєєјјџџџџџџџџџџџџџџјјпШНРСЬщјљїјвНСнјњџџџіН0$ВнНБЇЇБРтјјњњџџџџџџџџџџџџџњі——јњџџџџџџџџџџџџџџџњјЬŸtN1$$9tŸШјњџџџџџџџџџџњѕН—p>$$$$$S€Бєњџџїт“€€Š˜ИњџџџџџџџџџџџџњС—p>00.NtŸзг—`11lŸщџџџіН1$tp.$$$$.p˜ШјњљџџџџџџџџџџџџњњЖ$iмњџџџџџџџџџџџџџџџјЈd`Ÿщїњџџџџџџњњз—N.ŸѕџџњпZNИџљћџџџџџџћіѕЇ`pt$ŸљџџіН1dЇѕњњџџџџџџџџџџџњјвS$БјїџџџџџџџџџџџџџџД?‡вјџџџџџџїЬ‚?Иџњњм`˜јїћџџџџџџџіЃ>‡ѕњџіН1.›яіџџџџџџџџџџћњїШS‹ѕїџџџџџџџџџџџџџњŸ‹эџџџџџїщ‡Ÿјњњтd“єњџџџџџџџџУd‹ѕњњіН1. єџџџџџџџџџџћњј˜`вџџџџџџџџџџџџџџџД1.ZtutZЃљњњњџј SuŒ“‡tSЃњњњтd“єџџџџџџџњє˜9€‡uS‹ШњњњіН.Z‹ЇЊ“`?Бњњњџџџџџџџџњпp$БњњњџџџџџџџџџџџџнnZ“ДвяєяЬŸZnгњњњњзp.“Ш№ѕјѕэШЃ‡Сњњџщe“јџџџџџџџњщn?ЊєѕяШ—99РїњњњіН.€НєњјѕС€uтњњџџџџџџџџњЖ1ŒєњџџџџџџџџџџџџџјŸ9ŒСєњњџџњњј­?ЊјњњіЇ! єњњџњњџїјэјџњњщn“јџћџџџџіњЬ?}№њњњїє“ZвњџџњіН.!­јњїњњњШnЊјњџџџџџџџџј‘eзњџџџџџџџџџџџџџјтИ№јџљџџїћџџє“‡ѕњјє}‡яџљїџљџџџїџћџћџщe“јџћџџџћњџО9 јџћїџџЇSЬџџџџіН.!Бљџџџљџњ­!uэјџџџџџџџїщn!БњњџџџџџџџџџџџџјїіњњџџџџџџџџњИ.nщјњнd(Ињџџџџџџџџџџџџћџщe“јџћџџџћљџО9!ЊјџџџџџБ!?ЬџџџџіН.Бџџџџџџџщm9НјџџџџџџџџН.“јњћџџџџџџџџџџџџњџџџџџџџџџџџњвXZзњњО9mщњљџџџџџџџџџџџїџщn“јњћџџџћљџШ? јџџџџџБ!?ЬџџџџіН.!Бџџџџџџџє›Ÿјџџџџџџџњ›!eпњїџџџџџџџџџџџџџџџџџџџџџџџїџпXBЬџњБ!Œјњћџџџџџџџџџџџїџэn“јњћџџџџџњтe}№џџџџџБ!?ЬџџџџіН!!БџљџџџїџјЙ!}єџџџџџіїэu!n!ИњїџџџџџџџџџџџџџџџџџџџџџџџїњвX?ЬџњЃЃјњїџџџџџџџџџџџџџэm“јњћџџџџџњєŒ?ДџџџџџБ!?ЬџџџџіН(!БџљџџџџњїгXeтџџџџџњїС? XЃ!˜љџџџџџџџџџџџџџџџџџџџџџџџџџјД! Xнњљ›!Дњњљџџџџџџџџџџџїџэm ‘јњћџџџџџџџИ? mБяјњїБ!?ШџџџџіН+!Бџљџџџџџџэn ?ЬїџџџџїјЃ {ЬX mэџџџџџџџџџџџџџџџџџџџџџџџџџѕ‘ uэњј˜  +Йњџљџџџџџџџџџџџћџэm ‘јњћџџџџџџџє› ?|ЄНШ  +ШџџџџіН+!Бџљџџџџџџє{ +Оџџџџџњ№{  ›я}  +УџџџџџџџџџџџџџџџџџџџџџџџџїГD –јњє‘ +Оіџџџџџџџџџџџџџїџэm ‘јњїџџџџџџџїтŒ  +++ +ШџџџџіН+ Бџџџџџџџџє‡ (ДџџџџџїШD (Дј ЃїџџџџџџџџџџџџџџџџџџџџџњњШs 5Йіњј› +Ніњџџџџџџџџџџџџџџэm ‘јџїџџџџџџџџїщ–D   ,УџџџџіН+ БџџџџџџџџјŒ ­џџџџџџЊ  XЬњД+|яњџџџџџџџџџџџџџџџџџџџџњЬ}ˆєњњј Ињњџџџџџџџџџџџџџїэm‘јџїџџџџџџџџџџѕЙˆD+УџџџџіН+БџџџџџџџџљŒ ­џџџџџє| |єњЬDWЬњџџџџџџџџџџџџџџџџџїїњз| WОџџњњЈ ­њљљџџџџџџџџџџџџџэm‘јџџџџџџџџџџџџњњяУ |eD,*, +УџџџџіН+Аџџџџџџџџјˆ ­џџљџџЬW њџяuАњџџџџџџџџџџџџџџџџџџїпˆ   ,ЄјіџџњЙ –јњџџџџџџџџџџџџџњяs  ‘јџџџџџџџџџџџџџџџњњєтЬУУ +УџџџџіН+  Аџџџџџџџџє}  ГџџџњњЈ *Оіџј‘ ‘єџџџџџџџџџџџџџџџџџџг‰ єјџџџњЬG {яљћџџџџџџџџџџџџџяs ‘јџџџџџџџџџџџџџџџњџњїјњњА +УџџџџіН, Аџџџџџџџљяm +Йџџџіѕˆ mщџџџ­ fщџџџџџџџџџџџџџџџјїг–яџџџџџјэs WЭњљџџџџџџџџџџџџџяs ‘јџџџџџџџџџџџџџџџџџџџџџџД ,УџџџџіО*АџџџџџїџњзWDЬїџџігW ‘їїџћШ4 *НџџџџџџџџџџџџџџџїЭ~ –яїџџџџџњј  Єљїџџџџџџџџџџџџџяs‘јџџџџџџџџџџџџџџџџџџџџџџА *УџџџџіО*АџџџџџџњњДfтџџџїААњџџџэoЄљџџџџџџџџџџџџџџЭ|*єіњџџџџџњњУG mЬџџџџџџїџџџџџџџяs‰ѕњџџџџџџџџџџєЫтљџџџџџџџЈ *ШџџџџіО,Аџџџџџџњє‰‰ѕџџџє‰Wзњњџџј ƒєџџџџџџџџџџџњјЫ4Єєџџџџџџџїџџє‰‰нїњљњњњјьщјњџњэo8нєјєяяјїњњј Gf–Ищјњњјј’GЬџџџџњН*|ОјњњњљјЈ*­њџџџг]єџџџџњЈ YнїњџџџџџџџџџїнƒGЄјіџџџџџџџџџџџОY|Гт№№тЙ›vsЙљњњн]]‰vvЄјњњнf*q’АУШЉWgтіџїњџД]УщщЭžGoнњџџіАЄџџџџїњШ8ЙњїїџџџџџџњџѕGЈјњџџџџџџџџџџџџѕž*fvvf*’ѕњњУ4fнњјО44ƒ№іџћњњЄGggGЄњњііє‰8УџџџџљњщhžјїљџџџџџџјїЉGЈјџџџџџџџџџџџџџџјь’hщјїІКњјКЈњџџљјє‰‰яџџњњЭYvяїџџџљњѕvяњџџџџџџџјнs8Јјјџџџџџџџџџџџџџџњјщ–#qнјјД]YЉіјэ’8vтњџџџњя–48ƒбџџџјїЉžњїџџџџњљІ]нњџџџџџџџїЉžјџџџџџџџџџџџџџџџџџџіѕЉqxКџџџњУ–ghШž[#АљџџєК‰@vОїџџџџџџэЉv@qЏЙƒ%@•нїџџџњљГ@[ШњџџџџџџіУ[hщњџџџџџџџєƒ~эљџџџџџџџџџџџџџџџџџћіњњтЏ’h@%%%8gАщџџџџџџјщщєїјгЉƒ[@[xЉ№њџџџњѕШІ‰g@%%%@qžЭњџџџџџџџџњэбтјіяЙ’g4%%8h–ОѕњџџџџњњєКƒvxЉњњџџџџџџњљКž’–žГіњџџџџџџџн[#АјњџџџџџџџџџџџџџџџџџџџњњњњјтЭУООШтєњјџџџџџџїњјњњњњњ№тбняњјњџџџњњњњѕщЫУООУЭэјїњџџџџџџџџіњњњњњњјєтУОКОЫтјіњџџџџџњњњњјєяяјљњџџџџџџњїїјјљњњњњїџџџџџџб8]нњџџџџџџџџџџџџџџџџџџџџџљџџџџџџљџџџџџџџџџџџљџџџџџџџџџџџџџџџџџџџїљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїџџџџџџџџџџџџџџџџџџџџџџџїџџџџџџџџџџџџџџџџџџїџџџџџџџЫ8\щњіџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџб@\нјїџџџџїџџџџљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџэq#Гїњџџџџџјњњњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџљ•xбњџћїїџітУбјџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїО@vЉЭттгК•h#\Кџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњљџџіє’@[[@#‰ѕџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїњџџџњњнƒ\нџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїњџџћџџјб‰ƒэџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњїяІv#\žнџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїџџџџџџњњњщКžxh\[[hx•ГнјњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњџџџџџџџџџїџјѕщтттщєјџџїљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїџџџџџџџџџџџњњњњњњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњїїїњјњњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњїлеюїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњфЪХеъїњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњьЪЦЦХЪфјњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџюЪЦЦЦЦЦЪлѓіњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїеХЦЦЦЦЦЦЦеюјњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїілХЦЦЩЯЩЯЩЩЦЪъііџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџіъЪЦЦЦЩЯЯЯЯЩЩЩЪфѓїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњюЪЦЦЦЦЩЯЯЯЯЩЩЩЦЦеѓџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџѓеЦЦЦЩЩЯЯЯЯЯЯЯЩЩЦЦеюњџџњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњілЦЦЦЦЩЩаЯЯЯаЯЯЯЯЩЩЦЪфјњњњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњїфЪЦЩЩЩЩЯдЯЯЯааЯЯЯЯЯЯХЪфёїїњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїіъЪЩЦЩЯЯЯЯдЯааддаааЯаЯЩЩЪеюїњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџюЪЦЩЦЩЯЯдддЯааааадддаЯЯЯЩХЮёњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњџњѓеХЩЦЦЩЯЯдддаааЯЯаадддЯЯаЯЩЦЩфѓіџњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїлЦЦЯЩЩЯаЯдждддаааааддддддЯЯЩЦЦлѓњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњіфЪЯЯдЯажжжжжжжждджджжжджжжддааЩЪеюњїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњюЪЦЩЯЯЯЯжжжжижжжжжжжжжжжжиждддЯЯЩХеюіџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџёеЦЩЩЯЯЯЯждддждддждддддддджддждддЯЯЪеюџїїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњњѓлЪЩЯЩЯЯаажддджддддддждЯаажждддаадаЯЯЩдюћњњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџілЪЪЩажжджджддждддддджидддджждддддддддЯЦЮюјњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњџњїъЪЪЩЩЯджжжикижйижжжжджижддджддддджждддЯЯЪеюњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњїюЮЦЩЯЯЯдждджжжжййийиииййижжжжддддджждддаЯЯЪеюїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњѓеЦЦЯЯЯджждджждджджжжжжййийййжжжжжжжжжжждддЯЪлюџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњѓлЩЦЩЯЯджижжжжжжжжжжжжджжжжжйижииииййккйййижддЯиѓњњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњџџњјфЪЩЩЩЯажжижжжжжжжжжжжжжжжжжжйижжжжжжжжиийййжжжаЩеѓњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњњъЪЪЩЩЯджиижжжиижжжжжжжжжижжжжжижджжжжжжжжижжжжждаЮлѓњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњёЮХЯЯЯЯджииижжжижжижжжжжжйжжжжджижжжжжжжииижжжиждаЯЪлѓїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњѓлЦЩЯЯЯджжииижжжижжйжжжжжжижжжижжиижжжжжжйиижжжйжждаЯЪфѓџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњїѓлЪЩЩааадйийиижжиижийжжжжжийжжиижжийижжжжийижжжийжжждаЯЯфјњњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїіъЪЦЩЯадджкййиижжийжийжжиижйижийижжиййиижжййжжжжйижжжжддЯЯёїїњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїіюеЪЩЯджжжикййиижжийжййжиииийииййииииййижжйкииижжйижииждддЯеъіџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїѓлЪЯЯаийийкокйййиийкжкйииииййиййииииииийййойиииийкиииижжжждЯдюњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџѓлЮЪЯЯаикжйооококййккикйиииийййййииййииийккоиийииккиииижжжйжаЯлёџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњџјфЪдЯЯЯдйкжиойййккййккооккйкккйййиииииииийоойийиийййийиижжийждаЪлѕњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњїъЮЪжЯЯджкоийоккйкйииккккйййоооооокккйййййкоокйййийййййййййййижжЮЮфњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњюЮЩЯжЯдджорйороойкййиккккйййккйкокййккооорррррокккокккккккооойййидеюїїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџѓеЩЯдиджжйррйроооккккйокккйййккйкккйййййккоооороккорккоооккоокккййжаеюњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїџџџџјлЪЯадиджикрооркокойкййококйийокйккййййййкокйккккйиккийййййкокйккййидЯлѓњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїїњњјфЪЪЮедииийкросскркойкккооркййоокооккккккйкркккокоококйййййкоокйкккййждЯфїџџџџњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїїњъЮЩЯджийиййоскусооооккокорркккрокрооооокйкооккооккоррккккккооккооокккйидеъіџџџњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїюеЩЯажжйкйккрсоусооооккооосрккррккрооооокйороооооокосоккккккроккоооооокииееѓџњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџѓлЪЯЯжижроккксурссорорккоорсроорркооооооооороооороокосооооккороккоооооооккжафјњџњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњїлЪЯЯаиййурооосурссорррооооосоорроосоооооорроооорсоокрсрооооорркоооооорркйкждеъњџїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњјњњїъЪЯЯадкйкцррооуурсурррсооооруоорооосооороорррррррроррррсрооросррррррррррооокиджюњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњџњњњюЮЩЯаджойкцррррусрсусрссооорсурсррррсооррррррррррсрорррррсроррсррррррррррорройжафїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњїіюдЩЯаджксооцсрссцсссуурсррррсуурсрррссрррррссрррррссррссррссррссрррррррррррссрокидъњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњїѓлЩЯаджирсрруссссцррссурурррррсуссрррурррррсссссссссссрссроссррсррсссссссссссрррокдлёџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњіфЯЯаддлкссрруррсуцрссуцрусррррруусрррсррррсуссссррссссрсссррууусррсссссссссссрррркждъјџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњџѓеЩЯадйкрссрсцрррццруууцуууррссуцусссууссрсууссссррсссссууусссцусссссссссссссссссрокжжѓџџњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњюлЯЯдикссссуцррсцусуууцсууссссццусссуусссууссссссссссссууусссцуссссссссссссссссссоридфїџњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњїѓфеажкрррсуцссуцссууцусссссссццуссууусссусссссуууусссууууууууцусссссссссссссссссрсойеюіџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњњџњј№феиккррсцссццсуцуццсссссссццуууууууууусссссууцусссуцууууццццуууууууууууууууусссоодфіџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњіюфжийоруууццууцуццуусссуцццуууцууццууууусусуцууууцууууцццццуссууууууууууууусссррилѓњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџѓърджйросцууццуццццууууцццццццццццууууууусццуууццууууцууцццуууууууууууууууууусскеъјџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџіїюфежксцууцццццццууууцццууууцццуууццццууццуууцццууццууууцццццццццццццццццууссойлѓџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњџџџњњњїёфлкрорууццуцццууццццууууццццууцццууцццццццццуццууууццццццццццццццццццццуусоеюїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњџњњњњњњїјёъфижоосссуцццццццццццццццццццццууцццццццццццццццццуццццццццццццццццццццурйфїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџііюфлйокрссуууццчцууццццццццццццццццццуццццццццццууццццццццццццццццццццусллїїџљњџїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџјѕюфлллорсууццццццццццццццццццццццццццццццццццццццццццццццццццццццццхсрляјњњњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџјѓюфллорсуццццццццццццццццццццццццццццццццццццццццццццццццццццццццууеЎПѕјїњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџљїѓюфллллусууцуцццццццццчццццццццццццццццццццццццццццццццццццццццлœ/UœЭјљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїџџњњњџїњіѓюъфлййорсууццццццччцццццццчццццццццццццччцццццццццццццццццфЂCrЋщљџџџљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїїџџџџџџџџџџџјѓъфллкорсууццццццццццчччцццццццчччццччччччччццццццхчфцхЎU /…ПѕїњјџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїіѓюъфллоррсуцуццччцччцццццчччччччччччцццццццццццхцчъЏb' U™ЭјјїііљљљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњїёёъфллллрсуцццччццччччччччччччччццццццччччхчцшЎj'-' rЂщјїїїїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњњњџџџїіѓюъфллкоруццуццццччччччччччччцццццчччччцъКr32-3  †ЕьїїњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїњњњџџџїњњїїѓююъфурооссуцццццццччццччччччччшччъфКr3:-33    CТєљїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїџџџџџџџџџџџџџџњіѓюъффлоорсуццццццччччччччххчцчКy::;;:3-'  bЂщњњїљјњџњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїџїїѓююъффллллрсуцццччччччхчъТz;M:;;;;7-'   zЗэјњњїџњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњјјїѓюъффроорсучцццццчТ„LJ=A:::::---'  CФѕњљњњњїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњџџџњџџїїїѓюъффлллфслсаLM==AAA:::::3--' UЂпєљњњїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњџњњњњњџњјјѓююъффрЮ…JJJJAJAAA:E_P---'   rЎыљњџљљїїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїџџџџџџџџџџџџџџџњјїѓяЌkJJJKJAEAO•КЕz7---'  /†ПєњњњџњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџііњјђЌyAQJJJALyбъюЋP:7---'  UœЭєїїњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџіњњњњєФaMQJJLyЫюёЋO:;77--'' rЉтњњљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњјџџїњпЅkJKJJP™ПДw:;:::333-'' /zЗ№џџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџііџњњї№Е„OKIJPkVAAA:::::3---'  CœТљџїџџљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџћћљџџїџњЭš_KJJMMAEEAA::;:;7---'  bЂнјњњњњњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџыЁkIIKJJJAAAA:;;;;7---''  zЗєїїїїњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїєМ„VJOJJJJAAA::::::3---' /ФєњїњљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїјЭš^JVJJJJJAAAA:::::----'   bœгјњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїњїяЌwMMVJJJJAAAA::::7----'' rЎяџїїїџџљџљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїџџїїєФVVQJJJKK==A::::::66--'  /†П№њџјјјљњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњјгЁkJVJJJJJ=AA:2:_P3:---''  U™гјіїњїїњљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњњяАyKROJJJJJAAHЗЉr-3---''' rЋыјїїїњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњџњјФŽ_QVQJJJJArТёъ™7:6:----'  /†ЕьїїњљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїћіјњњњїпЅkIVQJQJ=yЭё№Ё::;:7----''   UЭјњњљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїћџџџџїїѕЕ„OVVJJJ_ЁФЕy2=;;;77---''' jЂьњљјњїїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџљљњїјјЭšaRTVJJLk_EAI=A:::77---'''  /™№њњљљљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџљљњїїїњыЌwIRKQKKJLPAAAA:::;7----''  HЕјїїљљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњљљњњїї№М„VKVQQJJJEEAAA:::::33--'''  †ђїїњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїџљњњњџџџјЭš_TTVJKIIKJAAAA::::76---''' bгїїњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџьЌwQVJVJMJJK==AE;;;667----''' /ПїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїѕМŽ_VKVJJJJJ=AAA::;;::----'''  &ЗњњњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњїњгЁkRRTQQJJJJJAAA:::-7----'''  &ЗїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњџјѕЌyJTTVVQJJJJAA:P†œ^----'-' &ПњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїџџџџњѕФaTVVQQJJJJAA…ЪъщЁE3-----'bгїїљљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїјпЅkTRVJVQJJJJ™чъёИ^:<7--3'яїїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџљіїяЕ„VRVVVJJJK…б№яЂP:<;7-3'HЕјњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџђјњїјФš_TTVVVQIOЁšjAA<:;32†ыјњџїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњјјњїњьЌkTVVVVQMJJLAAAE:A:'bПњњљњїїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїџіџџїџњѕМkTRVVVKIMMJAAAI2HЂѕџџџџџїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњјтЕŽkTVVVLOPOJAA:HЁяїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњџњіњыМšwaVQIIIMAVyЋѕїњџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџњњњјјџџјягМЁ„ww…ЋгѕїїљџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџјњџџњџџџњњњјящппщяјјљјјјџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџїїџџџџљџџџїџіџџџџџџџїџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџScrapy-0.14.4/extras/scrapy.bat0000600000016101777760000000043711754531743016446 0ustar buildbotnogroup@echo off rem Windows command-line tool for Scrapy setlocal rem Use a full path to Python (relative to this script) as the standard Python rem install does not put python.exe on the PATH... rem %~dp0 is the directory of this script "%~dp0..\python" "%~dp0scrapy" %* endlocal Scrapy-0.14.4/extras/makedeb.py0000600000016101777760000000214611754531743016416 0ustar buildbotnogroupimport sys, os, glob from subprocess import check_call def build(suffix): for ifn in glob.glob("debian/scrapy.*") + glob.glob("debian/scrapyd.*"): s = open(ifn).read() s = s.replace('SUFFIX', suffix) pre, suf = ifn.split('.', 1) ofn = "%s-%s.%s" % (pre, suffix, suf) with open(ofn, 'w') as of: of.write(s) for ifn in ['debian/control', 'debian/changelog']: s = open(ifn).read() s = s.replace('SUFFIX', suffix) with open(ifn, 'w') as of: of.write(s) check_call('debchange -m -D unstable --force-distribution -v $(python setup.py --version)+$(date +%s) "Automatic build"', \ shell=True) check_call('debuild -us -uc -b', shell=True) def clean(suffix): for f in glob.glob("debian/scrapy-%s.*" % suffix) + \ glob.glob("debian/scrapyd-%s.*" % suffix): os.remove(f) def main(): cmd = sys.argv[1] suffix = '%s.%s' % __import__('scrapy').version_info[:2] if cmd == 'build': build(suffix) elif cmd == 'clean': clean(suffix) if __name__ == '__main__': main() Scrapy-0.14.4/extras/test-scrapyd.sh0000700000016101777760000000471411754531743017436 0ustar buildbotnogroup#!/bin/bash # # This script is a quick system test for Scrapyd that: # # 1. runs scrapyd # 2. creates a new project and deploys it on scrapyd # 3. schedules a spider on scrapyd and waits for it to finish # 4. check the spider scraped the expected data # set -e export PATH=$PATH:$(pwd)/bin export PYTHONPATH=$PYTHONPATH:$(pwd) scrapyd_dir=$(mktemp /tmp/test-scrapyd.XXXXXXX -d) scrapyd_log=$(mktemp /tmp/test-scrapyd.XXXXXXX) scrapy_dir=$(mktemp /tmp/test-scrapyd.XXXXXXX -d) feed_path=$(mktemp /tmp/test-scrapyd.XXXXXXX) twistd -ny extras/scrapyd.tac -d $scrapyd_dir -l $scrapyd_log & cd $scrapy_dir scrapy startproject testproj cd testproj cat > testproj/spiders/insophia.py < scrapy.cfg <>FILES fi if [ -d ${RPM_BUILD_ROOT}/$i ]; then echo %dir $i >>DIRS fi done # Make sure we match foo.pyo and foo.pyc along with foo.py (but only once each) sed -e "/\.py[co]$/d" -e "s/\.py$/.py*/" DIRS FILES >INSTALLED_FILES Scrapy-0.14.4/extras/scrapyd.tac0000600000016101777760000000010411754531743016602 0ustar buildbotnogroupfrom scrapyd import get_application application = get_application() Scrapy-0.14.4/extras/coverage-report.sh0000700000016101777760000000041111754531743020106 0ustar buildbotnogroup# Run tests, generate coverage report and open it on a browser # # Requires: coverage 3.3 or above from http://pypi.python.org/pypi/coverage coverage run --branch $(which trial) --reporter=text scrapy.tests coverage html -i python -m webbrowser htmlcov/index.html Scrapy-0.14.4/extras/scrapy-ws.py0000700000016101777760000000715011754531743016757 0ustar buildbotnogroup#!/usr/bin/env python """ Example script to control a Scrapy server using its JSON-RPC web service. It only provides a reduced functionality as its main purpose is to illustrate how to write a web service client. Feel free to improve or write you own. Also, keep in mind that the JSON-RPC API is not stable. The recommended way for controlling a Scrapy server is through the execution queue (see the "queue" command). """ import sys, optparse, urllib from urlparse import urljoin from scrapy.utils.jsonrpc import jsonrpc_client_call, JsonRpcError from scrapy.utils.py26 import json def get_commands(): return { 'help': cmd_help, 'stop': cmd_stop, 'list-available': cmd_list_available, 'list-running': cmd_list_running, 'list-resources': cmd_list_resources, 'get-global-stats': cmd_get_global_stats, 'get-spider-stats': cmd_get_spider_stats, } def cmd_help(args, opts): """help - list available commands""" print "Available commands:" for _, func in sorted(get_commands().items()): print " ", func.__doc__ def cmd_stop(args, opts): """stop - stop a running spider""" jsonrpc_call(opts, 'crawler/engine', 'close_spider', args[0]) def cmd_list_running(args, opts): """list-running - list running spiders""" for x in json_get(opts, 'crawler/engine/open_spiders'): print x def cmd_list_available(args, opts): """list-available - list name of available spiders""" for x in jsonrpc_call(opts, 'crawler/spiders', 'list'): print x def cmd_list_resources(args, opts): """list-resources - list available web service resources""" for x in json_get(opts, '')['resources']: print x def cmd_get_spider_stats(args, opts): """get-spider-stats - get stats of a running spider""" stats = jsonrpc_call(opts, 'stats', 'get_stats', args[0]) for name, value in stats.items(): print "%-40s %s" % (name, value) def cmd_get_global_stats(args, opts): """get-global-stats - get global stats""" stats = jsonrpc_call(opts, 'stats', 'get_stats') for name, value in stats.items(): print "%-40s %s" % (name, value) def get_wsurl(opts, path): return urljoin("http://%s:%s/"% (opts.host, opts.port), path) def jsonrpc_call(opts, path, method, *args, **kwargs): url = get_wsurl(opts, path) return jsonrpc_client_call(url, method, *args, **kwargs) def json_get(opts, path): url = get_wsurl(opts, path) return json.loads(urllib.urlopen(url).read()) def parse_opts(): usage = "%prog [options] [arg] ..." description = "Scrapy web service control script. Use '%prog help' " \ "to see the list of available commands." op = optparse.OptionParser(usage=usage, description=description) op.add_option("-H", dest="host", default="localhost", \ help="Scrapy host to connect to") op.add_option("-P", dest="port", type="int", default=6080, \ help="Scrapy port to connect to") opts, args = op.parse_args() if not args: op.print_help() sys.exit(2) cmdname, cmdargs, opts = args[0], args[1:], opts commands = get_commands() if cmdname not in commands: sys.stderr.write("Unknown command: %s\n\n" % cmdname) cmd_help(None, None) sys.exit(1) return commands[cmdname], cmdargs, opts def main(): cmd, args, opts = parse_opts() try: cmd(args, opts) except IndexError: print cmd.__doc__ except JsonRpcError, e: print str(e) if e.data: print "Server Traceback below:" print e.data if __name__ == '__main__': main() Scrapy-0.14.4/extras/scrapy_bash_completion0000600000016101777760000000127011754531743021123 0ustar buildbotnogroup# bash completion for the Scrapy command-line tool _scrapy_completion() { local cmd cur commands spiders cmd=${COMP_WORDS[1]} cur=${COMP_WORDS[2]} case "$cmd" in crawl|edit) spiders=$(scrapy list 2>/dev/null) || spiders="" COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W "$spiders" -- "$cur")) ;; *) if [ $COMP_CWORD -eq 1 ]; then commands="crawl deploy fetch genspider list parse queue runserver runspider settings shell startproject view" COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W "$commands" -- "$cmd")) fi ;; esac } complete -F _scrapy_completion -o default scrapy Scrapy-0.14.4/README0000600000016101777760000000021411754531743014020 0ustar buildbotnogroupThis is Scrapy, an opensource screen scraping framework written in Python. For more info visit the project home page at http://scrapy.org
    123