[website] [fix] fixes in sitemap and rss generators
diff --git a/startup/website.py b/startup/website.py
index be8eba6..5e6c311 100644
--- a/startup/website.py
+++ b/startup/website.py
@@ -1,5 +1,5 @@
import webnotes, conf, os
-from webnotes.utils import cint, cstr
+from webnotes.utils import cint, cstr, encode
def get_templates_path():
return os.path.join(os.path.dirname(conf.__file__), "app", "website", "templates")
@@ -72,7 +72,7 @@
args[k] = cint(args.get(k) or 0)
args.url = quote(str(get_request_site_address(full_address=True)), str(""))
- args.encoded_title = quote(str(args.title or ""), str(""))
+ args.encoded_title = quote(encode(args.title or ""), str(""))
return args
\ No newline at end of file
diff --git a/website/helpers/blog_feed.py b/website/helpers/blog_feed.py
index 41c203e..c79c5cc 100644
--- a/website/helpers/blog_feed.py
+++ b/website/helpers/blog_feed.py
@@ -49,9 +49,10 @@
def generate():
"""generate rss feed"""
- import webnotes, os
+ import os, urllib
+ import webnotes
from webnotes.model.doc import Document
- from website.helpers.blog import get_blog_content
+ from webnotes.utils import escape_html
host = (os.environ.get('HTTPS') and 'https://' or 'http://') + os.environ.get('HTTP_HOST')
@@ -62,7 +63,8 @@
order by published_on desc limit 20""", as_dict=1)
for blog in blog_list:
- blog.link = host + '/' + blog.name + '.html'
+ blog.link = urllib.quote(host + '/' + blog.name + '.html')
+ blog.content = escape_html(blog.content or "")
items += rss_item % blog
diff --git a/website/helpers/sitemap.py b/website/helpers/sitemap.py
index c8b6fd0..3956da1 100644
--- a/website/helpers/sitemap.py
+++ b/website/helpers/sitemap.py
@@ -2,6 +2,7 @@
# License: GNU General Public License (v3). For more information see license.txt
from __future__ import unicode_literals
+
frame_xml = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">%s
</urlset>"""
@@ -32,10 +33,11 @@
for p in pages:
if count >= max_items: break
- page_url = os.path.join(domain, urllib.quote(p[0]))
- modified = p[1].strftime('%Y-%m-%d')
- site_map += link_xml % (page_url, modified)
- count += 1
+ if p[0]:
+ page_url = os.path.join(domain, urllib.quote(p[0]))
+ modified = p[1].strftime('%Y-%m-%d')
+ site_map += link_xml % (page_url, modified)
+ count += 1
if count >= max_items: break