您当前的位置:首页 > IT编程 > python
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:python解析网页上的json数据并保存到EXCEL

51自学网 2025-02-05 12:15:22
  python
这篇教程python解析网页上的json数据并保存到EXCEL写得很实用,希望能帮到您。

安装必要的库

import requestsimport pandas as pdimport osimport sysimport ioimport urllib3import json

测试数据

网页上的数据结构如下

{    "success": true,    "code": "CIFM_0000",    "encode": null,    "message": "ok",    "url": null,    "total": 3,    "items": [        {            "summaryDate": "20240611",            "summaryType": "naturalDay",            "workday": true,            "newCustNum": 1,            "haveCustNum": 1691627,            "newAccountNum": 2,            "haveAccountNum": 1692934,            "totalShare": 4947657341.69,            "netCash": -3523387.25,            "yield": 0.01386        },        {            "summaryDate": "20240612",            "summaryType": "naturalDay",            "workday": true,            "newCustNum": 5,            "haveCustNum": 1672766,            "newAccountNum": 5,            "haveAccountNum": 1674071,            "totalShare": 4927109080.29,            "netCash": -20735233.55,            "yield": 0.01387        },        {            "summaryDate": "20240613",            "summaryType": "naturalDay",            "workday": true,            "newCustNum": 4,            "haveCustNum": 1662839,            "newAccountNum": 5,            "haveAccountNum": 1664146,            "totalShare": 4927405885.59,            "netCash": 110659.8,            "yield": 0.01389        }    ],    "data": null,    "info": null}

详细逻辑代码

import requestsimport pandas as pdimport osimport sysimport ioimport urllib3import jsonurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')url = "https://ip/ma/web/trade/dailySummary?startDate={pi_startdate}&endDate={pi_enddate}"headers = {    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",    "Accept-Language": "zh-CN,zh;q=0.9",    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0",}def save_data(data, columns, excel_path, sheet_name):    df = pd.DataFrame(data, columns=columns)    if not os.path.exists(excel_path):        df.to_excel(excel_path, sheet_name=sheet_name, index=False)    else:        with pd.ExcelWriter(excel_path, engine='openpyxl', mode='a') as writer:            df.to_excel(writer, sheet_name=sheet_name, index=False)def json2list(response_text):    # 把json数据转化为python用的类型    json_dict = json.loads(response_text)    src_total = json_dict["total"]    print("src_total: {}".format(src_total))    items = json_dict["items"]    excel_columns = ['summaryDate',                     'summaryType',                     'workday',                     'newCustNum',                     'haveCustNum',                     'newAccountNum',                     'haveAccountNum',                     'totalShare',                     'netCash',                     'yield'                     ]    excel_data = []    # 使用XPath定位元素并打印内容    for item in items:        excel_row_data = []        for column_index in range(len(excel_columns)):            data = str(item[excel_columns[column_index]])            if excel_columns[column_index] == 'workday':                data = str(0 if data == "False" else 1)            excel_row_data.append(data)        excel_data.append(excel_row_data)    trg_total = len(excel_data)    # 稽核    print("trg_total: {}".format(trg_total))    vn_biasval = trg_total - src_total    if vn_biasval != 0:        print("This audit-rule is not passed,diff: {}".format(vn_biasval))        exit(-1)    else:        print("This audit-rule is passed,diff: {}".format(vn_biasval))    return excel_columns, excel_dataif __name__ == '__main__':    try:        excel_path = "C:/xxx/temp/ylb_dailySummary_{pi_startdate}_{pi_enddate}.xlsx"        sheet_name = 'result_data'        pi_startdate = 20240611        pi_enddate = 20240613        excel_path = excel_path.format(pi_startdate=pi_startdate, pi_enddate=pi_enddate)        url = url.format(pi_startdate=pi_startdate, pi_enddate=pi_enddate)        print("url:{}".format(url))        print("excel_path:{}".format(excel_path))        response_text = requests.get(url, headers=headers, timeout=(21, 300), verify=False).content.decode("utf8")        excel_columns, excel_data = json2list(response_text)        print("=================excel_columns=======================")        print(excel_columns)        print("=================excel_data==========================")        for x in excel_data:            print(x)        print("=====================================================")        # 文件存在,则删除        if os.path.exists(excel_path):            os.remove(excel_path)        # 保存文件        save_data(excel_data, excel_columns, excel_path, sheet_name)        print("save_data is end.")    except Exception as e:        print("[ERROR]:" + str(e))        exit(-1)

代码解析

1.请求头

构造请求头

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')url = "https://ip/ma/web/trade/dailySummary?startDate={pi_startdate}&endDate={pi_enddate}"headers = {    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",    "Accept-Language": "zh-CN,zh;q=0.9",    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0",}

2.数据保存到excel

如果excel已经存在,那么则会将数据追加到excel中

def save_data(data, columns, excel_path, sheet_name):    df = pd.DataFrame(data, columns=columns)    if not os.path.exists(excel_path):        df.to_excel(excel_path, sheet_name=sheet_name, index=False)    else:        with pd.ExcelWriter(excel_path, engine='openpyxl', mode='a') as writer:            df.to_excel(writer, sheet_name=sheet_name, index=False)

解析json数据获取字段名称以及对应的数据list列表

def json2list(response_text):    # 把json数据转化为python用的类型    json_dict = json.loads(response_text)    src_total = json_dict["total"]    print("src_total: {}".format(src_total))    items = json_dict["items"]    excel_columns = ['summaryDate',                     'summaryType',                     'workday',                     'newCustNum',                     'haveCustNum',                     'newAccountNum',                     'haveAccountNum',                     'totalShare',                     'netCash',                     'yield'                     ]    excel_data = []    # 使用XPath定位元素并打印内容    for item in items:        excel_row_data = []        for column_index in range(len(excel_columns)):            data = str(item[excel_columns[column_index]])            if excel_columns[column_index] == 'workday':                data = str(0 if data == "False" else 1)            excel_row_data.append(data)        excel_data.append(excel_row_data)    trg_total = len(excel_data)    # 稽核    print("trg_total: {}".format(trg_total))    vn_biasval = trg_total - src_total    if vn_biasval != 0:        print("This audit-rule is not passed,diff: {}".format(vn_biasval))        exit(-1)    else:        print("This audit-rule is passed,diff: {}".format(vn_biasval))    return excel_columns, excel_data

3.测试方法入口

if __name__ == '__main__':

测试结果

会生成ylb_dailySummary_20240611_20240613.xlsx文件

以上就是python解析网页上的json数据并保存到EXCEL的详细内容,更多关于python解析网页json数据的资料请关注本站其它相关文章!


python中Class(类)的超详细说明
使用Python快速遍历文件夹下所有文件的方法总结
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。