---- clive-0.3.1/clive/parse.py 2007-10-28 23:11:16.000000000 +0200
-+++ clive-0.3.1.delfi/clive/parse.py 2007-11-21 23:21:45.850962793 +0200
-@@ -52,6 +52,8 @@
- video_url = self._parse_guba(data)
- elif url.find('stage6.divx.com') != -1:
- video_url = self._parse_stage6(data)
-+ elif url.find('delfi.') != -1:
-+ video_url = self._parse_delfi(data)
- else:
- (scheme, host, path, params, query, fragment) = \
- urlparse.urlparse(url)
-@@ -136,6 +138,14 @@
- return self._parse_from_to(data,
- 'http://video.stage6.com/', '&')
+--- clive-0.4.10/src/clive/parse.py 2008-04-21 10:54:55.000000000 +0300
++++ clive-0.4.10-delfi/src/clive/parse.py 2008-04-26 01:09:31.000000000 +0300
+@@ -50,6 +50,7 @@
+ ('dailymotion.', 'dmotion', self._parse_dmotion),
+ ('guba.com', 'guba', self._parse_guba),
+ ('metacafe.', 'metac', self._parse_metacafe),
++ ('delfi.', 'delfi', self._parse_delfi),
+ ]
-+ def _parse_delfi(self, data):
-+ text = self._parse_from_to(data, "flv_url:", ",")
-+ try:
-+ return urllib.unquote(text.replace("'", "").split('flv_url: ',2)[1])
-+ except IndexError:
-+ pass
-+ return ''
+ ## Parses a video page data (HTML)
+@@ -221,6 +222,72 @@
+ url = self._parse_from_to(data, 'mediaURL=', '&', skip_from=1)
+ return (url, vid, low_quality)
+
++ def _graburl(self, url, read=1):
++ from urlgrabber.grabber import URLGrabber, URLGrabError
++ data = ''
++ try:
++ g = URLGrabber(
++ http_headers = (('accept-encoding', 'gzip'),),
++ user_agent = self._opts.http_agent,
++ proxies = self._opts.http_proxy
++ )
++ o = g.urlopen(url)
++ if read:
++ data = o.read()
++ if o.hdr.get('content-encoding') == 'gzip':
++ data = gzip.GzipFile(fileobj=StringIO(data)).read()
++ return data
++ except URLGrabError, err:
++ self._say('%s [%s]' % (e.strerror,url), is_error=1)
++ raise SystemExit
++ return data
++
++ def _parse_delfi(self, url, data, low_quality):
++ url = None
++
++ s = self._parse_from_to(data, "flv_url:", ",")
++ if s:
++ # videobox
++ # http://www.delfi.ee/news/paevauudised/paevavideo/article.php?id=15218215
++ try:
++ url = s.replace("'", "").split('flv_url: ',2)[1]
++ except IndexError:
++ pass
++
++ if not url:
++ # videoproject embed
++ # http://www.delfi.ee/news/paevauudised/paevavideo/article.php?id=18759038
++ s = self._parse_from_to(data, "_delfiVideoSalt", ";")
++ salt = None
++ try:
++ salt = s.split('"',2)[1]
++ except IndexError:
++ pass
+
- def _parse_from_to(self, data, _from, to, skip_from=0):
- start = data.find(_from)
- end = data.find(to, start)
++ if salt != None:
++ # find url to videoproject
++ s = self._parse_from_to(data, "src=", "/js/embed.js")
++ s = s[5 + s.rfind("src"):] + "/video/" + salt + "/"
++ # fetch video page
++ s = self._graburl(s)
++ s = self._parse_from_to(s, "s1.addVariable('file'", ";");
++ try:
++ url = s.split("'")[3]
++ except IndexError:
++ pass
++
++ if not url:
++ # videoproject
++ # http://video.delfi.ee/video/Uu4gF58g/
++ s = self._parse_from_to(data, "s1.addVariable('file'", ";");
++ try:
++ url = s.split("'")[3]
++ except IndexError:
++ pass
++
++ vid = md5.new(str(time.time())).hexdigest()[:8]
++ return (url, vid, low_quality)
++
+ def _random_vid(self):
+ return md5.new(str(time.time())).hexdigest()[:8]
+