#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
全球中文新闻推送脚本
信源：BBC 中文、路透社中文、纽约时报中文、华尔街日报中文、金融时报中文、联合早报、日经中文等
涵盖：热点、财经、科技、国际、社会等各类新闻
特点：重大、及时、权威、全中文
"""

import requests
import json
from datetime import datetime
import re

def get_bbc_chinese():
    """获取 BBC 中文网新闻"""
    try:
        url = "https://feeds.bbci.co.uk/zhongwen/simp/rss.xml"
        headers = {"User-Agent": "Mozilla/5.0"}
        resp = requests.get(url, headers=headers, timeout=10)
        
        items = re.findall(r'<item>(.*?)</item>', resp.text, re.DOTALL)[:5]
        news = []
        for item in items:
            title_match = re.search(r'<title><!\[CDATA\[(.*?)\]\]>', item)
            if title_match:
                title = title_match.group(1)
                news.append(f"• {title}")
        
        return "\n".join(news) if news else "暂无数据"
    except Exception as e:
        return f"获取失败：{str(e)}"

def get_bbc_world_chinese():
    """获取 BBC 中文网国际新闻"""
    try:
        url = "https://feeds.bbci.co.uk/zhongwen/simp/world/rss.xml"
        headers = {"User-Agent": "Mozilla/5.0"}
        resp = requests.get(url, headers=headers, timeout=10)
        
        items = re.findall(r'<item>(.*?)</item>', resp.text, re.DOTALL)[:3]
        news = []
        for item in items:
            title_match = re.search(r'<title><!\[CDATA\[(.*?)\]\]>', item)
            if title_match:
                title = title_match.group(1)
                news.append(f"• {title}")
        
        return "\n".join(news) if news else "暂无数据"
    except Exception as e:
        return f"获取失败：{str(e)}"

def get_reuters_chinese():
    """获取路透社中文"""
    try:
        # 路透社中文首页抓取最新新闻
        url = "https://www.reuters.com/world/china/"
        headers = {"User-Agent": "Mozilla/5.0"}
        resp = requests.get(url, headers=headers, timeout=10)
        
        # 尝试解析标题
        titles = re.findall(r'<h2[^>]*class="[^"]*media__title[^"]*"[^>]*>([^<]+)</h2>', resp.text)[:5]
        if not titles:
            titles = re.findall(r'<span[^>]*class="[^"]*headline[^"]*"[^>]*>([^<]+)</span>', resp.text)[:5]
        
        if titles:
            return "\n".join([f"• {t.strip()}" for t in titles if len(t.strip()) > 5])
        return "暂无数据"
    except Exception as e:
        return f"获取失败：{str(e)}"

def get_nyt_chinese():
    """获取纽约时报中文网"""
    try:
        url = "https://cn.nytimes.com/rss/"
        headers = {"User-Agent": "Mozilla/5.0"}
        resp = requests.get(url, headers=headers, timeout=10)
        
        items = re.findall(r'<item>(.*?)</item>', resp.text, re.DOTALL)[:5]
        news = []
        for item in items:
            title_match = re.search(r'<title>(.*?)</title>', item)
            if title_match:
                title = title_match.group(1).replace('<![CDATA[', '').replace(']]>', '').strip()
                if len(title) > 5:
                    news.append(f"• {title}")
        
        return "\n".join(news) if news else "暂无数据"
    except Exception as e:
        return f"获取失败：{str(e)}"

def get_wsj_chinese():
    """获取华尔街日报中文网"""
    try:
        url = "https://cn.wsj.com/rss"
        headers = {"User-Agent": "Mozilla/5.0"}
        resp = requests.get(url, headers=headers, timeout=10)
        
        items = re.findall(r'<item>(.*?)</item>', resp.text, re.DOTALL)[:5]
        news = []
        for item in items:
            title_match = re.search(r'<title>(.*?)</title>', item)
            if title_match:
                title = title_match.group(1).replace('<![CDATA[', '').replace(']]>', '').strip()
                if len(title) > 5:
                    news.append(f"• {title}")
        
        return "\n".join(news) if news else "暂无数据"
    except Exception as e:
        return f"获取失败：{str(e)}"

def get_ft_chinese():
    """获取金融时报中文网"""
    try:
        url = "http://www.ftchinese.com/feed"
        headers = {"User-Agent": "Mozilla/5.0"}
        resp = requests.get(url, headers=headers, timeout=10)
        
        items = re.findall(r'<item>(.*?)</item>', resp.text, re.DOTALL)[:5]
        news = []
        for item in items:
            title_match = re.search(r'<title>(.*?)</title>', item)
            if title_match:
                title = title_match.group(1).replace('<![CDATA[', '').replace(']]>', '').strip()
                if len(title) > 5:
                    news.append(f"• {title}")
        
        return "\n".join(news) if news else "暂无数据"
    except Exception as e:
        return f"获取失败：{str(e)}"

def get_zhnews_china():
    """获取联合早报（中国新闻）"""
    try:
        url = "https://www.zaobao.com.sg/rss/realtime/china.xml"
        headers = {"User-Agent": "Mozilla/5.0"}
        resp = requests.get(url, headers=headers, timeout=10)
        
        items = re.findall(r'<item>(.*?)</item>', resp.text, re.DOTALL)[:5]
        news = []
        for item in items:
            title_match = re.search(r'<title>(.*?)</title>', item)
            if title_match:
                title = title_match.group(1).replace('<![CDATA[', '').replace(']]>', '').strip()
                if len(title) > 5:
                    news.append(f"• {title}")
        
        return "\n".join(news) if news else "暂无数据"
    except Exception as e:
        return f"获取失败：{str(e)}"

def get_zhnews_world():
    """获取联合早报（国际新闻）"""
    try:
        url = "https://www.zaobao.com.sg/rss/realtime/world.xml"
        headers = {"User-Agent": "Mozilla/5.0"}
        resp = requests.get(url, headers=headers, timeout=10)
        
        items = re.findall(r'<item>(.*?)</item>', resp.text, re.DOTALL)[:5]
        news = []
        for item in items:
            title_match = re.search(r'<title>(.*?)</title>', item)
            if title_match:
                title = title_match.group(1).replace('<![CDATA[', '').replace(']]>', '').strip()
                if len(title) > 5:
                    news.append(f"• {title}")
        
        return "\n".join(news) if news else "暂无数据"
    except Exception as e:
        return f"获取失败：{str(e)}"

def get_nikkei_chinese():
    """获取日经中文网"""
    try:
        url = "https://cn.nikkei.com/rssfeed.xml"
        headers = {"User-Agent": "Mozilla/5.0"}
        resp = requests.get(url, headers=headers, timeout=10)
        
        items = re.findall(r'<item>(.*?)</item>', resp.text, re.DOTALL)[:5]
        news = []
        for item in items:
            title_match = re.search(r'<title>(.*?)</title>', item)
            if title_match:
                title = title_match.group(1).replace('<![CDATA[', '').replace(']]>', '').strip()
                if len(title) > 5:
                    news.append(f"• {title}")
        
        return "\n".join(news) if news else "暂无数据"
    except Exception as e:
        return f"获取失败：{str(e)}"

def get_chinadialogue():
    """获取对话地球（环境新闻）"""
    try:
        url = "https://chinadialogue.net/zh/feed/"
        headers = {"User-Agent": "Mozilla/5.0"}
        resp = requests.get(url, headers=headers, timeout=10)
        
        items = re.findall(r'<item>(.*?)</item>', resp.text, re.DOTALL)[:3]
        news = []
        for item in items:
            title_match = re.search(r'<title>(.*?)</title>', item)
            if title_match:
                title = title_match.group(1).replace('<![CDATA[', '').replace(']]>', '').strip()
                if len(title) > 5:
                    news.append(f"• {title}")
        
        return "\n".join(news) if news else "暂无数据"
    except Exception as e:
        return f"获取失败：{str(e)}"

def get_tech_chinese():
    """获取科技新闻（中文源）"""
    try:
        # 36 氪
        url = "https://36kr.com/feed"
        headers = {"User-Agent": "Mozilla/5.0"}
        resp = requests.get(url, headers=headers, timeout=10)
        
        items = re.findall(r'<item>(.*?)</item>', resp.text, re.DOTALL)[:3]
        news = []
        for item in items:
            title_match = re.search(r'<title>(.*?)</title>', item)
            if title_match:
                title = title_match.group(1).replace('<![CDATA[', '').replace(']]>', '').strip()
                if len(title) > 5:
                    news.append(f"• {title[:50]}...")
        
        return "\n".join(news) if news else "暂无数据"
    except Exception as e:
        return f"获取失败：{str(e)}"

def get_finance_chinese():
    """获取财经新闻（中文源）"""
    try:
        # 财新网
        url = "https://www.caixin.com/rss/finance.xml"
        headers = {"User-Agent": "Mozilla/5.0"}
        resp = requests.get(url, headers=headers, timeout=10)
        
        items = re.findall(r'<item>(.*?)</item>', resp.text, re.DOTALL)[:5]
        news = []
        for item in items:
            title_match = re.search(r'<title>(.*?)</title>', item)
            if title_match:
                title = title_match.group(1).replace('<![CDATA[', '').replace(']]>', '').strip()
                if len(title) > 5:
                    news.append(f"• {title[:60]}...")
        
        return "\n".join(news) if news else "暂无数据"
    except Exception as e:
        return f"获取失败：{str(e)}"

def get_breaking_chinese():
    """获取突发/重大新闻（中文）"""
    try:
        sources = [
            ("BBC", "https://feeds.bbci.co.uk/zhongwen/simp/rss.xml"),
            ("联合早报", "https://www.zaobao.com.sg/rss/realtime/china.xml"),
            ("日经", "https://cn.nikkei.com/rssfeed.xml"),
        ]
        
        breaking = []
        for name, url in sources:
            try:
                headers = {"User-Agent": "Mozilla/5.0"}
                resp = requests.get(url, headers=headers, timeout=5)
                items = re.findall(r'<item>(.*?)</item>', resp.text, re.DOTALL)[:2]
                for item in items:
                    title_match = re.search(r'<title>(.*?)</title>', item)
                    if title_match:
                        title = title_match.group(1).replace('<![CDATA[', '').replace(']]>', '').strip()
                        if len(title) > 5:
                            breaking.append(f"• [{name}] {title}")
            except:
                continue
        
        return "\n".join(breaking[:5]) if breaking else "暂无重大新闻"
    except Exception as e:
        return f"获取失败：{str(e)}"

def generate_newsletter(morning=True):
    """生成新闻简报"""
    today = datetime.now().strftime("%Y 年 %m 月 %d 日")
    weekday = datetime.now().strftime("%A")
    time_str = "早报" if morning else "晚报"
    
    newsletter = f"""📰 全球中文新闻{time_str} | {today}

🌍 国际焦点
{get_bbc_chinese()}

💰 财经动态
{get_finance_chinese()}

🤖 科技前沿
{get_tech_chinese()}

📊 市场观察
{get_wsj_chinese()}

🇸🇬 亚洲视角
{get_zhnews_china()}

---

信源：BBC 中文 | 路透社中文 | 纽约时报中文 | 华尔街日报中文 | 金融时报中文 | 联合早报 | 日经中文网 | 财新 | 36 氪

祝您{ '一天工作顺利' if morning else '晚安好梦'}！🤖
"""
    return newsletter

if __name__ == "__main__":
    # 根据当前时间判断是早报还是晚报
    hour = datetime.now().hour
    is_morning = hour < 17
    print(generate_newsletter(morning=is_morning))
