Преглед изворни кода

md5 for feeds changed to include title and url

Markus Spring пре 1 година
родитељ
комит
03b2d8fbd5
1 измењених фајлова са 18 додато и 43 уклоњено
  1. 18 43
      blogs-i-read_v2.py

+ 18 - 43
blogs-i-read_v2.py

@@ -21,6 +21,8 @@ cronlinks_file = appconfig['blogsiread']['cronlinksfile']
 
 fileConfig('logging_config.ini')
 logger = logging.getLogger("blogs-i-read_v2")
+if os.environ.get('LOGLEVEL'):
+    logger.setLevel(level=os.environ.get('LOGLEVEL', 'WARNING').upper())
 
 with open(blogs_to_read, 'r') as blogfile:
     blogs = json.load(blogfile)
@@ -36,55 +38,28 @@ except:
 
 # Dictionary to store the results
 results = {}
-
-def get_timestamp(ts):
-    logger.debug(ts)
-    if bool(re.search('\dT\d\d:\d\d:\d\dZ$', ts)): # 2024-01-19T16:25:19Z
-        return time.mktime(datetime.strptime(ts, "%Y-%m-%dT%H:%M:%SZ").timetuple())
-    elif bool(re.search('\dT\d\d:\d\d:\d\d[+\-]\d\d', ts)): # 2024-01-30T12:51:31-06:00
-        return time.mktime(datetime.strptime(ts, "%Y-%m-%dT%H:%M:%S%z").timetuple())
-    elif bool(re.search('\dT\d', ts)):             # 2024-01-19T16:25:19Z
-        return time.mktime(datetime.strptime(ts, "%Y-%m-%dT%H:%M:%S.%f%z").timetuple())
-    elif bool(re.search('^\D\D\D.*GMT$', ts)):     # Tue, 09 Jan 2024 14:15:58 GMT
-        return time.mktime(datetime.strptime(ts, "%a, %d %b %Y %H:%M:%S GMT").timetuple())
-    elif bool(re.search('^\D\D\D,', ts)):          # Thu, 01 Feb 2024 11:00:56 +0000
-        return time.mktime(datetime.strptime(ts, "%a, %d %b %Y %H:%M:%S %z").timetuple())
-    else:
-        sys.exit(1)
         
 def examine_feed(url):
     (md5, post_title, post_url, last_update) = get_default_values(url)
-    # logger.debug(f'examine_feed {url}')
-    if url in md5_sums:
-        # get stored values if they exist
-        try:
-            last_update = md5_sums[url]['timestamp']
-            post_title = md5_sums[url]['post_title']
-            post_url = md5_sums[url]['post_url']
-        except:
-            pass
     try:
-    #if True:
+    # if True:
         feed = feedparser.parse(url)
         post_title = feed.entries[0].title
-        md5 = hashlib.md5( post_title.encode('utf-8') + feed.entries[0].updated.encode('utf-8') ).hexdigest()
         post_url = feed.entries[0].link
+        old_md5 = hashlib.md5( post_title.encode('utf-8')
+                               + feed.entries[0].updated.encode('utf-8') ).hexdigest()
+        logger.debug( post_title.encode('utf-8') + post_url.encode('utf-8') )
+        md5 = 'v2_' + hashlib.md5( post_title.encode('utf-8') + post_url.encode('utf-8') ).hexdigest()
         # make it dependant on change
         if url in md5_sums:
-            # logger.debug(f'url {url} in md5_sums')
-            if md5_sums[url]['md5'] != md5:
-                # logger.debug(f'hashes NOT equal')
-                utc_time = datetime.utcnow()
-                last_update = int(time.mktime(utc_time.timetuple())) + met_offset
+            logger.debug('existent feed')
+            if md5_sums[url]['md5'] not in [ md5, old_md5 ]:
+                logger.debug(f'hashes NOT equal')
             else:
-                # logger.debug('hashes are equal')
-                if md5_sums[url]['timestamp'] < 1:
-                    # logger.debug(f'first timestamp')
-                    last_update = get_timestamp(feed.entries[0].updated)
-                else:
-                    # logger.debug('keep timestamp')
-                    last_update = md5_sums[url]['timestamp']
-        # logger.debug( f'{post_title} , {post_url}, {last_update}, {md5}' )
+                logger.debug('hashes equal to old or new')
+                last_update = md5_sums[url]['timestamp']
+        else:
+            logger.debug('new feed')
     except:
         logger.info(f'error when parsing feed {url}')
     return md5, post_title, post_url, last_update
@@ -155,7 +130,8 @@ def examine_generic_website(soup, url, md5):
 
 def get_default_values(url):
     # initialize variables, suitable for new urls
-    (md5, post_title, post_url, last_update) = ['', '', '', time.time()]
+    (md5, post_title, post_url, last_update) = ['', '', '',
+                        int(time.mktime(datetime.utcnow().timetuple())) + met_offset]
     if url in md5_sums:
         # get stored values if they exist
         try:
@@ -176,15 +152,12 @@ def examine_url(url):
     response = requests.get(url, cookies=loaded_cookies)
     #if True:
     try:
-        # logger.debug(response.cookies)
         saved_cookies = requests.utils.dict_from_cookiejar(response.cookies)
         cookies_json = json.dumps(saved_cookies, indent=4)
-        # logger.debug(cookies_json)
         md5_sums[url]['cookies'] = saved_cookies
         soup = BeautifulSoup(response.text, 'html.parser')
         all_text = "".join(soup.body.get_text())
         md5 = hashlib.sha256(all_text.encode('utf-8')).hexdigest()
-        #md5 = hashlib.md5(response.content).hexdigest()  # Calculate the MD5 hash
         body = soup.find('body')
         if 'lfi-online.de' in url:
             (md5, post_title, post_url, last_update) = examine_lfionline(soup, url, md5)
@@ -197,6 +170,8 @@ def examine_url(url):
     return md5, post_title, post_url, last_update
 
 def needs_update(url):
+    if len(sys.argv) > 1:
+        return True
     if url not in md5_sums:
         return True
     last_update = md5_sums[url]['timestamp']