mirror of
https://github.com/1nchaos/adata.git
synced 2024-11-25 16:32:39 +08:00
Merge pull request #110 from Lorry1123/feat/lint_and_format
Feat/lint and format
This commit is contained in:
commit
022a44171a
@ -1,7 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
@desc: readme
|
||||
@author: 1nchaos
|
||||
@time: 2023/4/4
|
||||
@log: change log
|
||||
"""
|
@ -3,7 +3,7 @@
|
||||
@desc: 新闻&舆情相关的数据
|
||||
@author: 1nchaos
|
||||
@time:2023/04/06
|
||||
@log:
|
||||
@log:
|
||||
"""
|
||||
from adata.sentiment.hot import Hot
|
||||
from adata.sentiment.north_flow import NorthFlow
|
||||
@ -20,4 +20,3 @@ class Sentiment(StockLifting, SecuritiesMargin):
|
||||
|
||||
|
||||
sentiment = Sentiment()
|
||||
|
||||
|
@ -20,12 +20,35 @@ from adata.common.utils import requests
|
||||
class AList(BaseThs):
|
||||
"""龙虎榜单"""
|
||||
|
||||
__A_LIST_DAILY_COLUMNS = ['trade_date', 'short_name', 'stock_code', 'close', 'change_cpt', 'turnover_ratio',
|
||||
'a_net_amount', 'a_buy_amount', 'a_sell_amount', 'a_amount', 'amount',
|
||||
'net_amount_rate', 'a_amount_rate', 'reason']
|
||||
__A_LIST_DAILY_COLUMNS = [
|
||||
"trade_date",
|
||||
"short_name",
|
||||
"stock_code",
|
||||
"close",
|
||||
"change_cpt",
|
||||
"turnover_ratio",
|
||||
"a_net_amount",
|
||||
"a_buy_amount",
|
||||
"a_sell_amount",
|
||||
"a_amount",
|
||||
"amount",
|
||||
"net_amount_rate",
|
||||
"a_amount_rate",
|
||||
"reason",
|
||||
]
|
||||
|
||||
__A_LIST_INFO_COLUMNS = ['trade_date', 'stock_code', 'operate_code', 'operate_name', 'a_buy_amount',
|
||||
'a_sell_amount', 'a_net_amount', 'a_buy_amount_rate', 'a_sell_amount_rate', 'reason']
|
||||
__A_LIST_INFO_COLUMNS = [
|
||||
"trade_date",
|
||||
"stock_code",
|
||||
"operate_code",
|
||||
"operate_name",
|
||||
"a_buy_amount",
|
||||
"a_sell_amount",
|
||||
"a_net_amount",
|
||||
"a_buy_amount_rate",
|
||||
"a_sell_amount_rate",
|
||||
"reason",
|
||||
]
|
||||
|
||||
# 东方财富人气榜
|
||||
def list_a_list_daily(self, report_date=None):
|
||||
@ -40,21 +63,31 @@ class AList(BaseThs):
|
||||
url = f"https://datacenter-web.eastmoney.com/api/data/v1/get?callback=jQuery1123047223270591945665_1716975719487&sortColumns=SECURITY_CODE,TRADE_DATE&sortTypes=1,-1&pageSize=500&pageNumber=1&reportName=RPT_DAILYBILLBOARD_DETAILSNEW&columns=SECURITY_CODE,SECUCODE,SECURITY_NAME_ABBR,TRADE_DATE,EXPLAIN,CLOSE_PRICE,CHANGE_RATE,BILLBOARD_NET_AMT,BILLBOARD_BUY_AMT,BILLBOARD_SELL_AMT,BILLBOARD_DEAL_AMT,ACCUM_AMOUNT,DEAL_NET_RATIO,DEAL_AMOUNT_RATIO,TURNOVERRATE,FREE_MARKET_CAP,EXPLANATION,D1_CLOSE_ADJCHRATE,D2_CLOSE_ADJCHRATE,D5_CLOSE_ADJCHRATE,D10_CLOSE_ADJCHRATE,SECURITY_TYPE_CODE&source=WEB&client=WEB&filter=(TRADE_DATE<='{report_date}')(TRADE_DATE>='{report_date}')"
|
||||
|
||||
# 2. 请求数据
|
||||
text = requests.request(method='post', url=url).text
|
||||
res = json.loads(text[text.index('{'):-2])
|
||||
if res['result'] is None:
|
||||
text = requests.request(method="post", url=url).text
|
||||
res = json.loads(text[text.index("{") : -2])
|
||||
if res["result"] is None:
|
||||
return pd.DataFrame()
|
||||
df = pd.DataFrame(res['result']["data"])
|
||||
df = pd.DataFrame(res["result"]["data"])
|
||||
# 3. 解析封装数据
|
||||
rename = {'SECURITY_CODE': 'stock_code', 'SECURITY_NAME_ABBR': 'short_name', 'TRADE_DATE': 'trade_date',
|
||||
'CLOSE_PRICE': 'close', 'CHANGE_RATE': 'change_cpt', 'TURNOVERRATE': 'turnover_ratio',
|
||||
'BILLBOARD_NET_AMT': 'a_net_amount', 'BILLBOARD_BUY_AMT': 'a_buy_amount',
|
||||
'BILLBOARD_SELL_AMT': 'a_sell_amount', 'BILLBOARD_DEAL_AMT': 'a_amount',
|
||||
'ACCUM_AMOUNT': 'amount', 'DEAL_NET_RATIO': 'net_amount_rate', 'DEAL_AMOUNT_RATIO': 'a_amount_rate',
|
||||
'EXPLANATION': 'reason', }
|
||||
rename = {
|
||||
"SECURITY_CODE": "stock_code",
|
||||
"SECURITY_NAME_ABBR": "short_name",
|
||||
"TRADE_DATE": "trade_date",
|
||||
"CLOSE_PRICE": "close",
|
||||
"CHANGE_RATE": "change_cpt",
|
||||
"TURNOVERRATE": "turnover_ratio",
|
||||
"BILLBOARD_NET_AMT": "a_net_amount",
|
||||
"BILLBOARD_BUY_AMT": "a_buy_amount",
|
||||
"BILLBOARD_SELL_AMT": "a_sell_amount",
|
||||
"BILLBOARD_DEAL_AMT": "a_amount",
|
||||
"ACCUM_AMOUNT": "amount",
|
||||
"DEAL_NET_RATIO": "net_amount_rate",
|
||||
"DEAL_AMOUNT_RATIO": "a_amount_rate",
|
||||
"EXPLANATION": "reason",
|
||||
}
|
||||
df = df.rename(columns=rename)
|
||||
df['trade_date'] = pd.to_datetime(df['trade_date']).dt.strftime('%Y-%m-%d')
|
||||
df['short_name'] = df['short_name'].str.replace(' ', '')
|
||||
df["trade_date"] = pd.to_datetime(df["trade_date"]).dt.strftime("%Y-%m-%d")
|
||||
df["short_name"] = df["short_name"].str.replace(" ", "")
|
||||
return df[self.__A_LIST_DAILY_COLUMNS]
|
||||
|
||||
def get_a_list_info(self, stock_code, report_date=None):
|
||||
@ -65,29 +98,37 @@ class AList(BaseThs):
|
||||
# 1. url
|
||||
urls = [
|
||||
f"""https://datacenter-web.eastmoney.com/api/data/v1/get?reportName=RPT_BILLBOARD_DAILYDETAILSBUY&columns=ALL&filter=(TRADE_DATE='{report_date}')(SECURITY_CODE="{stock_code}")&pageNumber=1&pageSize=50&sortTypes=-1&sortColumns=BUY&source=WEB&client=WEB&_=1721014447040""",
|
||||
f"""https://datacenter-web.eastmoney.com/api/data/v1/get?reportName=RPT_BILLBOARD_DAILYDETAILSSELL&columns=ALL&filter=(TRADE_DATE='{report_date}')(SECURITY_CODE="{stock_code}")&pageNumber=1&pageSize=50&sortTypes=-1&sortColumns=BUY&source=WEB&client=WEB&_=1721014447040"""]
|
||||
f"""https://datacenter-web.eastmoney.com/api/data/v1/get?reportName=RPT_BILLBOARD_DAILYDETAILSSELL&columns=ALL&filter=(TRADE_DATE='{report_date}')(SECURITY_CODE="{stock_code}")&pageNumber=1&pageSize=50&sortTypes=-1&sortColumns=BUY&source=WEB&client=WEB&_=1721014447040""",
|
||||
]
|
||||
|
||||
# 2. 请求数据
|
||||
data = []
|
||||
for url in urls:
|
||||
res = requests.request(method='post', url=url).json()
|
||||
if res['result'] is None:
|
||||
res = requests.request(method="post", url=url).json()
|
||||
if res["result"] is None:
|
||||
return pd.DataFrame()
|
||||
data.extend(res['result']["data"])
|
||||
data.extend(res["result"]["data"])
|
||||
# ['trade_date', 'stock_code', 'operate_code', 'operate_name', 'buy_amount',
|
||||
# 'sell_amount','net_amount', 'buy_amount_rate', 'sell_amount_rate', 'reason']
|
||||
# 3. 解析封装数据
|
||||
rename = {'SECURITY_CODE': 'stock_code', 'TRADE_DATE': 'trade_date',
|
||||
'OPERATEDEPT_CODE': 'operate_code', 'OPERATEDEPT_NAME': 'operate_name',
|
||||
'BUY': 'a_buy_amount', 'SELL': 'a_sell_amount', 'NET': 'a_net_amount',
|
||||
'TOTAL_BUYRIO': 'a_buy_amount_rate', 'TOTAL_SELLRIO': 'a_sell_amount_rate',
|
||||
'EXPLANATION': 'reason', }
|
||||
rename = {
|
||||
"SECURITY_CODE": "stock_code",
|
||||
"TRADE_DATE": "trade_date",
|
||||
"OPERATEDEPT_CODE": "operate_code",
|
||||
"OPERATEDEPT_NAME": "operate_name",
|
||||
"BUY": "a_buy_amount",
|
||||
"SELL": "a_sell_amount",
|
||||
"NET": "a_net_amount",
|
||||
"TOTAL_BUYRIO": "a_buy_amount_rate",
|
||||
"TOTAL_SELLRIO": "a_sell_amount_rate",
|
||||
"EXPLANATION": "reason",
|
||||
}
|
||||
df = pd.DataFrame(data).rename(columns=rename)
|
||||
df['trade_date'] = pd.to_datetime(df['trade_date']).dt.strftime('%Y-%m-%d')
|
||||
df = df.sort_values(by=['reason', 'a_buy_amount', 'a_sell_amount'], ascending=[True, False, False])
|
||||
df["trade_date"] = pd.to_datetime(df["trade_date"]).dt.strftime("%Y-%m-%d")
|
||||
df = df.sort_values(by=["reason", "a_buy_amount", "a_sell_amount"], ascending=[True, False, False])
|
||||
return df[self.__A_LIST_INFO_COLUMNS]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(AList().list_a_list_daily(report_date='2024-07-04'))
|
||||
print(AList().get_a_list_info(stock_code='600297', report_date='2024-07-12'))
|
||||
if __name__ == "__main__":
|
||||
print(AList().list_a_list_daily(report_date="2024-07-04"))
|
||||
print(AList().get_a_list_info(stock_code="600297", report_date="2024-07-12"))
|
||||
|
@ -16,11 +16,12 @@ from adata.common.utils import requests
|
||||
from adata.sentiment.alist import AList
|
||||
|
||||
|
||||
class Hot(AList):
|
||||
class Hot(AList): # 参考 pylint 改完之后实际上这个 Hot 和 AList 没有啥实例化的意义
|
||||
"""热门榜单"""
|
||||
|
||||
# 东方财富人气榜
|
||||
def pop_rank_100_east(self):
|
||||
@staticmethod
|
||||
def pop_rank_100_east():
|
||||
"""
|
||||
东方财富人气榜100
|
||||
http://guba.eastmoney.com/rank/
|
||||
@ -29,23 +30,35 @@ class Hot(AList):
|
||||
url = "https://emappdata.eastmoney.com/stockrank/getAllCurrentList"
|
||||
|
||||
# 2. 请求数据
|
||||
params = {"appId": "appId01", "globalId": "786e4c21-70dc-435a-93bb-38",
|
||||
"marketType": "", "pageNo": 1, "pageSize": 100, }
|
||||
res = requests.request(method='post', url=url, json=params).json()
|
||||
params = {
|
||||
"appId": "appId01",
|
||||
"globalId": "786e4c21-70dc-435a-93bb-38",
|
||||
"marketType": "",
|
||||
"pageNo": 1,
|
||||
"pageSize": 100,
|
||||
}
|
||||
res = requests.request(method="post", url=url, json=params).json()
|
||||
df = pd.DataFrame(res["data"])
|
||||
|
||||
df["mark"] = ["0" + "." + item[2:] if "SZ" in item else "1" + "." + item[2:]
|
||||
for item in df["sc"]]
|
||||
",".join(df["mark"]) + "?v=08926209912590994"
|
||||
params = {"ut": "f057cbcbce2a86e2866ab8877db1d059",
|
||||
"fltt": "2", "invt": "2", "fields": "f14,f3,f12,f2",
|
||||
"secids": ",".join(df["mark"]) + ",?v=08926209912590994", }
|
||||
df["mark"] = ["0" + "." + item[2:] if "SZ" in item else "1" + "." + item[2:] for item in df["sc"]]
|
||||
params = {
|
||||
"ut": "f057cbcbce2a86e2866ab8877db1d059",
|
||||
"fltt": "2",
|
||||
"invt": "2",
|
||||
"fields": "f14,f3,f12,f2",
|
||||
"secids": ",".join(df["mark"]) + ",?v=08926209912590994",
|
||||
}
|
||||
url = "https://push2.eastmoney.com/api/qt/ulist.np/get"
|
||||
res = requests.request(method='get', url=url, params=params)
|
||||
res = requests.request(method="get", url=url, params=params)
|
||||
|
||||
# 3. 解析封装数据
|
||||
data = res.json()["data"]["diff"]
|
||||
rename = {'f2': 'price', 'f3': 'change_pct', 'f12': 'stock_code', 'f14': 'short_name', }
|
||||
rename = {
|
||||
"f2": "price",
|
||||
"f3": "change_pct",
|
||||
"f12": "stock_code",
|
||||
"f14": "short_name",
|
||||
}
|
||||
rank_df = pd.DataFrame(data).rename(columns=rename)
|
||||
rank_df["change_pct"] = pd.to_numeric(rank_df["change_pct"], errors="coerce")
|
||||
rank_df["price"] = pd.to_numeric(rank_df["price"], errors="coerce")
|
||||
@ -53,50 +66,65 @@ class Hot(AList):
|
||||
rank_df["rank"] = range(1, len(rank_df) + 1)
|
||||
return rank_df[["rank", "stock_code", "short_name", "price", "change", "change_pct"]]
|
||||
|
||||
def hot_rank_100_ths(self):
|
||||
@staticmethod
|
||||
def hot_rank_100_ths():
|
||||
"""
|
||||
同花顺热股100
|
||||
https://dq.10jqka.com.cn/fuyao/hot_list_data/out/hot_list/v1/stock?stock_type=a&type=hour&list_type=normal
|
||||
"""
|
||||
api_url = 'https://dq.10jqka.com.cn/fuyao/hot_list_data/out/hot_list/v1/stock?stock_type=a&type=hour&list_type=normal'
|
||||
api_url = (
|
||||
"https://dq.10jqka.com.cn/fuyao/hot_list_data/out/hot_list/v1/stock?stock_type=a&type=hour&list_type=normal"
|
||||
)
|
||||
headers = ths_headers.json_headers
|
||||
headers['Host'] = 'dq.10jqka.com.cn'
|
||||
res = requests.request(method='get', url=api_url, params={}, headers=headers)
|
||||
data = res.json()['data']['stock_list']
|
||||
headers["Host"] = "dq.10jqka.com.cn"
|
||||
res = requests.request(method="get", url=api_url, params={}, headers=headers)
|
||||
data = res.json()["data"]["stock_list"]
|
||||
data_list = []
|
||||
for d in data:
|
||||
d['concept_tag'] = ";".join(d['tag']['concept_tag'])
|
||||
if 'popularity_tag' in d['tag']:
|
||||
d['pop_tag'] = d['tag']['popularity_tag'].replace('\n', '')
|
||||
d["concept_tag"] = ";".join(d["tag"]["concept_tag"])
|
||||
if "popularity_tag" in d["tag"]:
|
||||
d["pop_tag"] = d["tag"]["popularity_tag"].replace("\n", "")
|
||||
data_list.append(d)
|
||||
rename = {'order': 'rank', 'rise_and_fall': 'change_pct', 'code': 'stock_code', 'name': 'short_name',
|
||||
'rate': 'hot_value', 'concept_tag': 'concept_tag'}
|
||||
rename = {
|
||||
"order": "rank",
|
||||
"rise_and_fall": "change_pct",
|
||||
"code": "stock_code",
|
||||
"name": "short_name",
|
||||
"rate": "hot_value",
|
||||
"concept_tag": "concept_tag",
|
||||
}
|
||||
rank_df = pd.DataFrame(data).rename(columns=rename)
|
||||
rank_df = rank_df[["rank", "stock_code", "short_name", "change_pct", "hot_value", "pop_tag", "concept_tag"]]
|
||||
return rank_df
|
||||
|
||||
def hot_concept_20_ths(self, plate_type=1):
|
||||
@staticmethod
|
||||
def hot_concept_20_ths(plate_type=1):
|
||||
"""
|
||||
同花热门概念板块
|
||||
:param plate_type: 1.概念板块,2.行业板块;默认:概念板块
|
||||
"""
|
||||
plate_type = 'concept' if plate_type == 1 else 'industry'
|
||||
api_url = f'https://dq.10jqka.com.cn/fuyao/hot_list_data/out/hot_list/v1/plate?type={plate_type}'
|
||||
plate_type = "concept" if plate_type == 1 else "industry"
|
||||
api_url = f"https://dq.10jqka.com.cn/fuyao/hot_list_data/out/hot_list/v1/plate?type={plate_type}"
|
||||
headers = ths_headers.json_headers
|
||||
headers['Host'] = 'dq.10jqka.com.cn'
|
||||
res = requests.request(method='get', url=api_url, params={}, headers=headers)
|
||||
data = res.json()['data']['plate_list']
|
||||
headers["Host"] = "dq.10jqka.com.cn"
|
||||
res = requests.request(method="get", url=api_url, params={}, headers=headers)
|
||||
data = res.json()["data"]["plate_list"]
|
||||
data_list = []
|
||||
for d in data:
|
||||
data_list.append(d)
|
||||
rename = {'order': 'rank', 'rise_and_fall': 'change_pct', 'rate': 'hot_value', 'code': 'concept_code',
|
||||
'name': 'concept_name'}
|
||||
rename = {
|
||||
"order": "rank",
|
||||
"rise_and_fall": "change_pct",
|
||||
"rate": "hot_value",
|
||||
"code": "concept_code",
|
||||
"name": "concept_name",
|
||||
}
|
||||
rank_df = pd.DataFrame(data).rename(columns=rename)
|
||||
rank_df = rank_df[["rank", "concept_code", "concept_name", "change_pct", "hot_value", "hot_tag"]]
|
||||
return rank_df
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
print(Hot().hot_rank_100_ths())
|
||||
print(Hot().pop_rank_100_east())
|
||||
print(Hot().hot_concept_20_ths(plate_type=1))
|
||||
|
@ -19,6 +19,7 @@ import math
|
||||
|
||||
import pandas as pd
|
||||
|
||||
import adata # 函数内 import,挪过来
|
||||
from adata.common import requests
|
||||
from adata.common.base.base_ths import BaseThs
|
||||
from adata.common.exception.exception_msg import THS_IP_LIMIT_RES, THS_IP_LIMIT_MSG
|
||||
@ -26,15 +27,22 @@ from adata.common.headers import ths_headers
|
||||
|
||||
|
||||
class NorthFlow(BaseThs):
|
||||
__NORTH_FLOW_COLUMNS = ['trade_date', 'net_hgt', 'buy_hgt', 'sell_hgt', 'net_sgt', 'buy_sgt', 'sell_sgt',
|
||||
'net_tgt', 'buy_tgt', 'sell_tgt']
|
||||
__NORTH_FLOW_COLUMNS = [
|
||||
"trade_date",
|
||||
"net_hgt",
|
||||
"buy_hgt",
|
||||
"sell_hgt",
|
||||
"net_sgt",
|
||||
"buy_sgt",
|
||||
"sell_sgt",
|
||||
"net_tgt",
|
||||
"buy_tgt",
|
||||
"sell_tgt",
|
||||
]
|
||||
|
||||
__NORTH_FLOW_MIN_COLUMNS = ['trade_time', 'net_hgt', 'net_sgt', 'net_tgt']
|
||||
__NORTH_FLOW_MIN_COLUMNS = ["trade_time", "net_hgt", "net_sgt", "net_tgt"]
|
||||
__NORTH_FLOW_CURRENT_COLUMNS = __NORTH_FLOW_MIN_COLUMNS
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
def north_flow(self, start_date=None):
|
||||
"""
|
||||
获取北向资金历史的数据,开始时间到最新的历史数据,
|
||||
@ -59,54 +67,56 @@ class NorthFlow(BaseThs):
|
||||
https://datacenter-web.eastmoney.com/api/data/v1/get?callback=jQuery112307442704592215257_1690813516314&sortColumns=TRADE_DATE&sortTypes=-1&pageSize=10&pageNumber=2&reportName=RPT_MUTUAL_DEAL_HISTORY&columns=ALL&source=WEB&client=WEB&filter=(MUTUAL_TYPE="001")
|
||||
"""
|
||||
if start_date:
|
||||
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
|
||||
date_min = datetime.datetime.strptime('2017-01-01', '%Y-%m-%d')
|
||||
if start_date < date_min:
|
||||
start_date = date_min
|
||||
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
|
||||
date_min = datetime.datetime.strptime("2017-01-01", "%Y-%m-%d")
|
||||
start_date = max(start_date, date_min)
|
||||
curr_page = 1
|
||||
data = []
|
||||
while curr_page < 18:
|
||||
url = f'https://datacenter-web.eastmoney.com/api/data/v1/get?callback=jQuery112307442704592215257_1690813516314' \
|
||||
f'&sortColumns=TRADE_DATE&sortTypes=-1&pageSize=1000&pageNumber={curr_page}&' \
|
||||
f'reportName=RPT_MUTUAL_DEAL_HISTORY&columns=ALL&source=WEB&client=WEB&'
|
||||
url = (
|
||||
f"https://datacenter-web.eastmoney.com/api/data/v1/get?callback=jQuery112307442704592215257_1690813516314"
|
||||
f"&sortColumns=TRADE_DATE&sortTypes=-1&pageSize=1000&pageNumber={curr_page}&"
|
||||
f"reportName=RPT_MUTUAL_DEAL_HISTORY&columns=ALL&source=WEB&client=WEB&"
|
||||
)
|
||||
sgt_url = f"""{url}filter=(MUTUAL_TYPE="001")"""
|
||||
hgt_url = f"""{url}filter=(MUTUAL_TYPE="003")"""
|
||||
|
||||
sgt = requests.request('get', sgt_url, headers={}, proxies={}).text.replace('null', '0')
|
||||
hgt = requests.request('get', hgt_url, headers={}, proxies={}).text.replace('null', '0')
|
||||
sgt = requests.request("get", sgt_url, headers={}, proxies={}).text.replace("null", "0")
|
||||
hgt = requests.request("get", hgt_url, headers={}, proxies={}).text.replace("null", "0")
|
||||
|
||||
# 2. 解析数据
|
||||
sgt_json = json.loads(sgt[sgt.index('{'):-2])
|
||||
hgt_json = json.loads(hgt[hgt.index('{'):-2])
|
||||
sgt_json = json.loads(sgt[sgt.index("{") : -2])
|
||||
hgt_json = json.loads(hgt[hgt.index("{") : -2])
|
||||
sgt_data = sgt_json["result"]["data"]
|
||||
hgt_data = hgt_json["result"]["data"]
|
||||
if not sgt_data:
|
||||
break
|
||||
is_end = False
|
||||
for i in range(len(sgt_data)):
|
||||
# pylint 报的用 enumerate,所以这里保留一下 i 做示范
|
||||
for i, (hgt_item, sgt_item) in enumerate(zip(hgt_data, sgt_data)):
|
||||
if not start_date and i >= 30:
|
||||
is_end = True
|
||||
break
|
||||
if start_date:
|
||||
date_min = datetime.datetime.strptime(hgt_data[i]['TRADE_DATE'], '%Y-%m-%d %H:%M:%S')
|
||||
date_min = datetime.datetime.strptime(hgt_item["TRADE_DATE"], "%Y-%m-%d %H:%M:%S")
|
||||
if start_date > date_min:
|
||||
is_end = True
|
||||
break
|
||||
|
||||
data.append({
|
||||
'trade_date': hgt_data[i]['TRADE_DATE'],
|
||||
'net_hgt': math.ceil(hgt_data[i]['NET_DEAL_AMT'] * 1000000),
|
||||
'buy_hgt': math.ceil(hgt_data[i]['BUY_AMT'] * 1000000),
|
||||
'sell_hgt': math.ceil(hgt_data[i]['SELL_AMT'] * 1000000),
|
||||
|
||||
'net_sgt': math.ceil(sgt_data[i]['NET_DEAL_AMT'] * 1000000),
|
||||
'buy_sgt': math.ceil(sgt_data[i]['BUY_AMT'] * 1000000),
|
||||
'sell_sgt': math.ceil(sgt_data[i]['SELL_AMT'] * 1000000),
|
||||
|
||||
'net_tgt': math.ceil((hgt_data[i]['NET_DEAL_AMT'] + sgt_data[i]['NET_DEAL_AMT']) * 1000000),
|
||||
'buy_tgt': math.ceil((hgt_data[i]['BUY_AMT'] + sgt_data[i]['BUY_AMT']) * 1000000),
|
||||
'sell_tgt': math.ceil((hgt_data[i]['SELL_AMT'] + sgt_data[i]['SELL_AMT']) * 1000000)
|
||||
})
|
||||
data.append(
|
||||
{
|
||||
"trade_date": hgt_item["TRADE_DATE"],
|
||||
"net_hgt": math.ceil(hgt_item["NET_DEAL_AMT"] * 1000000),
|
||||
"buy_hgt": math.ceil(hgt_item["BUY_AMT"] * 1000000),
|
||||
"sell_hgt": math.ceil(hgt_item["SELL_AMT"] * 1000000),
|
||||
"net_sgt": math.ceil(sgt_item["NET_DEAL_AMT"] * 1000000),
|
||||
"buy_sgt": math.ceil(sgt_item["BUY_AMT"] * 1000000),
|
||||
"sell_sgt": math.ceil(sgt_item["SELL_AMT"] * 1000000),
|
||||
"net_tgt": math.ceil((hgt_item["NET_DEAL_AMT"] + sgt_item["NET_DEAL_AMT"]) * 1000000),
|
||||
"buy_tgt": math.ceil((hgt_item["BUY_AMT"] + sgt_item["BUY_AMT"]) * 1000000),
|
||||
"sell_tgt": math.ceil((hgt_item["SELL_AMT"] + sgt_item["SELL_AMT"]) * 1000000),
|
||||
}
|
||||
)
|
||||
|
||||
if is_end:
|
||||
break
|
||||
@ -114,7 +124,7 @@ class NorthFlow(BaseThs):
|
||||
|
||||
# 3.封装数据
|
||||
result_df = pd.DataFrame(data=data, columns=self.__NORTH_FLOW_COLUMNS)
|
||||
result_df['trade_date'] = pd.to_datetime(result_df['trade_date']).dt.strftime('%Y-%m-%d')
|
||||
result_df["trade_date"] = pd.to_datetime(result_df["trade_date"]).dt.strftime("%Y-%m-%d")
|
||||
|
||||
return result_df[self.__NORTH_FLOW_COLUMNS]
|
||||
|
||||
@ -137,10 +147,10 @@ class NorthFlow(BaseThs):
|
||||
|
||||
def __north_flow_min_ths(self):
|
||||
# 1.接口 url
|
||||
api_url = f" https://data.hexin.cn/market/hsgtApi/method/dayChart/"
|
||||
api_url = "https://data.hexin.cn/market/hsgtApi/method/dayChart/"
|
||||
headers = copy.deepcopy(ths_headers.json_headers)
|
||||
headers['Host'] = 'data.hexin.cn'
|
||||
res = requests.request('get', api_url, headers=headers, proxies={})
|
||||
headers["Host"] = "data.hexin.cn"
|
||||
res = requests.request("get", api_url, headers=headers, proxies={})
|
||||
text = res.text
|
||||
if THS_IP_LIMIT_RES in text:
|
||||
return Exception(THS_IP_LIMIT_MSG)
|
||||
@ -148,29 +158,33 @@ class NorthFlow(BaseThs):
|
||||
return pd.DataFrame(data=[], columns=self.__NORTH_FLOW_CURRENT_COLUMNS)
|
||||
# 2. 解析数据
|
||||
result_json = json.loads(text)
|
||||
time_list = result_json['time']
|
||||
hgt_list = result_json['hgt']
|
||||
sgt_list = result_json['sgt']
|
||||
time_list = result_json["time"]
|
||||
hgt_list = result_json["hgt"]
|
||||
sgt_list = result_json["sgt"]
|
||||
data = []
|
||||
for i in range(len(time_list)):
|
||||
row = [time_list[i], math.ceil(hgt_list[i] * 100000000), math.ceil(sgt_list[i] * 100000000),
|
||||
math.ceil((hgt_list[i] + sgt_list[i]) * 100000000)]
|
||||
for time_item, hgt_item, sgt_item in zip(time_list, hgt_list, sgt_list):
|
||||
row = [
|
||||
time_item,
|
||||
math.ceil(hgt_item * 100000000),
|
||||
math.ceil(sgt_item * 100000000),
|
||||
math.ceil((hgt_item + sgt_item) * 100000000),
|
||||
]
|
||||
data.append(row)
|
||||
# 3. 封装数据
|
||||
result_df = pd.DataFrame(data=data, columns=self.__NORTH_FLOW_MIN_COLUMNS)
|
||||
import adata
|
||||
|
||||
trade_year = adata.stock.info.trade_calendar()
|
||||
# 获取当前日期
|
||||
today = datetime.datetime.today().date()
|
||||
# 筛选出小于等于今天并且 trade_status=1 的记录
|
||||
trade_year['trade_date'] = pd.to_datetime(trade_year['trade_date'])
|
||||
filtered_df = trade_year[(trade_year['trade_date'].dt.date <= today) & (trade_year['trade_status'] == 1)]
|
||||
max_date = filtered_df.loc[filtered_df['trade_date'].idxmax()]
|
||||
trade_year["trade_date"] = pd.to_datetime(trade_year["trade_date"])
|
||||
filtered_df = trade_year[(trade_year["trade_date"].dt.date <= today) & (trade_year["trade_status"] == 1)]
|
||||
max_date = filtered_df.loc[filtered_df["trade_date"].idxmax()]
|
||||
|
||||
result_df['trade_time'] = max_date['trade_date'].strftime('%Y-%m-%d') + ' ' + result_df['trade_time']
|
||||
result_df["trade_time"] = max_date["trade_date"].strftime("%Y-%m-%d") + " " + result_df["trade_time"]
|
||||
|
||||
# 将 trade_time 字符串转换为日期时间类型
|
||||
result_df['trade_time'] = pd.to_datetime(result_df['trade_time'])
|
||||
result_df["trade_time"] = pd.to_datetime(result_df["trade_time"])
|
||||
return result_df[self.__NORTH_FLOW_MIN_COLUMNS]
|
||||
|
||||
def __north_flow_min_east(self):
|
||||
@ -182,30 +196,36 @@ class NorthFlow(BaseThs):
|
||||
url = "https://push2.eastmoney.com/api/qt/kamt.rtmin/get?fields1=f1,f3&fields2=f51,f52,f54,f56&ut=b2884a393a59ad64002292a3e90d46a5&cb=jQuery112308613678156517719_1690861908580&_=1690861908581"
|
||||
data = []
|
||||
try:
|
||||
gt = requests.request('get', url, headers={}, proxies={}).text
|
||||
gt = requests.request("get", url, headers={}, proxies={}).text
|
||||
|
||||
# 2. 解析数据
|
||||
gt_json = json.loads(gt[gt.index('{'):-2])
|
||||
gt_json = json.loads(gt[gt.index("{") : -2])
|
||||
gt_date = gt_json["data"]["s2nDate"]
|
||||
gt_data = gt_json["data"]["s2n"]
|
||||
for _ in gt_data:
|
||||
row = str(_).split(',')
|
||||
if row[1] != '-':
|
||||
data.append([row[0], math.ceil(float(row[1]) * 10000),
|
||||
math.ceil(float(row[2]) * 10000), math.ceil(float(row[3]) * 10000)])
|
||||
except Exception as e:
|
||||
row = str(_).split(",")
|
||||
if row[1] != "-":
|
||||
data.append(
|
||||
[
|
||||
row[0],
|
||||
math.ceil(float(row[1]) * 10000),
|
||||
math.ceil(float(row[2]) * 10000),
|
||||
math.ceil(float(row[3]) * 10000),
|
||||
]
|
||||
)
|
||||
except Exception: # pylint: disable=broad-except
|
||||
print("north_flow_min_east is ERROR!!!")
|
||||
return pd.DataFrame(data=data, columns=self.__NORTH_FLOW_MIN_COLUMNS)
|
||||
result_df = pd.DataFrame(data=data, columns=self.__NORTH_FLOW_MIN_COLUMNS)
|
||||
|
||||
# 3. 封装数据
|
||||
result_df['trade_time'] = str(datetime.datetime.now().year) + '-' + gt_date + ' ' + result_df['trade_time']
|
||||
result_df['trade_time'] = pd.to_datetime(result_df['trade_time'])
|
||||
result_df["trade_time"] = str(datetime.datetime.now().year) + "-" + gt_date + " " + result_df["trade_time"]
|
||||
result_df["trade_time"] = pd.to_datetime(result_df["trade_time"])
|
||||
result_df = result_df.dropna()
|
||||
return result_df[self.__NORTH_FLOW_MIN_COLUMNS]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
print(NorthFlow().north_flow_min())
|
||||
print(NorthFlow().north_flow_current())
|
||||
print(NorthFlow().north_flow(start_date='2000-11-01'))
|
||||
print(NorthFlow().north_flow(start_date="2000-11-01"))
|
||||
|
@ -21,11 +21,8 @@ from adata.common import requests
|
||||
from adata.common.headers import east_headers
|
||||
|
||||
|
||||
class SecuritiesMargin(object):
|
||||
__SECURITIES_MARGIN_COLUMN = ['trade_date', 'rzye', 'rqye', 'rzrqye', 'rzrqyecz']
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
class SecuritiesMargin:
|
||||
__SECURITIES_MARGIN_COLUMN = ["trade_date", "rzye", "rqye", "rzrqye", "rzrqyecz"]
|
||||
|
||||
def securities_margin(self, start_date=None):
|
||||
"""
|
||||
@ -52,43 +49,50 @@ class SecuritiesMargin(object):
|
||||
page_size = 250
|
||||
start_date_str = start_date
|
||||
if start_date:
|
||||
start_date = datetime.strptime(start_date, '%Y-%m-%d')
|
||||
start_date = datetime.strptime(start_date, "%Y-%m-%d")
|
||||
while curr_page <= total_pages:
|
||||
api_url = f"https://datacenter-web.eastmoney.com/api/data/v1/get?" \
|
||||
f"reportName=RPTA_RZRQ_LSHJ&columns=ALL&source=WEB&sortColumns=dim_date&sortTypes=-1&" \
|
||||
f"pageNumber={curr_page}&pageSize={page_size}&_=1690176931022"
|
||||
api_url = (
|
||||
f"https://datacenter-web.eastmoney.com/api/data/v1/get?"
|
||||
f"reportName=RPTA_RZRQ_LSHJ&columns=ALL&source=WEB&sortColumns=dim_date&sortTypes=-1&"
|
||||
f"pageNumber={curr_page}&pageSize={page_size}&_=1690176931022"
|
||||
)
|
||||
|
||||
res = requests.request(method='get', url=api_url, headers=east_headers.json_headers, proxies={})
|
||||
res = requests.request(method="get", url=api_url, headers=east_headers.json_headers, proxies={})
|
||||
# 2. 判断请求是否成功
|
||||
if res.status_code != 200:
|
||||
continue
|
||||
res_json = res.json()
|
||||
if not res_json['success']:
|
||||
if not res_json["success"]:
|
||||
continue
|
||||
if curr_page == 1:
|
||||
total_pages = res_json['result']['pages']
|
||||
res_json = res_json['result']['data']
|
||||
total_pages = res_json["result"]["pages"]
|
||||
res_json = res_json["result"]["data"]
|
||||
# 2.1 日期范围判断
|
||||
data.extend(res_json)
|
||||
if not start_date:
|
||||
break
|
||||
if start_date:
|
||||
date_min = datetime.strptime(res_json[-1]['DIM_DATE'], '%Y-%m-%d %H:%M:%S')
|
||||
date_min = datetime.strptime(res_json[-1]["DIM_DATE"], "%Y-%m-%d %H:%M:%S")
|
||||
if start_date >= date_min:
|
||||
break
|
||||
curr_page += 1
|
||||
|
||||
# 3. 解析数据
|
||||
result_df = pd.DataFrame(data=data)
|
||||
rename_columns = {'RZYE': 'rzye', 'RQYE': 'rqye', 'RZRQYE': 'rzrqye', 'RZRQYECZ': 'rzrqyecz',
|
||||
'DIM_DATE': 'trade_date'}
|
||||
rename_columns = {
|
||||
"RZYE": "rzye",
|
||||
"RQYE": "rqye",
|
||||
"RZRQYE": "rzrqye",
|
||||
"RZRQYECZ": "rzrqyecz",
|
||||
"DIM_DATE": "trade_date",
|
||||
}
|
||||
result_df = result_df.rename(columns=rename_columns)[self.__SECURITIES_MARGIN_COLUMN]
|
||||
|
||||
# 4. 数据清洗
|
||||
result_df['trade_date'] = pd.to_datetime(result_df['trade_date']).dt.strftime('%Y-%m-%d')
|
||||
result_df = result_df[result_df['trade_date'] > start_date_str]
|
||||
result_df["trade_date"] = pd.to_datetime(result_df["trade_date"]).dt.strftime("%Y-%m-%d")
|
||||
result_df = result_df[result_df["trade_date"] > start_date_str]
|
||||
return result_df
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(SecuritiesMargin().securities_margin('2022-01-01'))
|
||||
if __name__ == "__main__":
|
||||
print(SecuritiesMargin().securities_margin("2022-01-01"))
|
||||
|
@ -20,11 +20,8 @@ from adata.common.headers import ths_headers
|
||||
from adata.common.utils import cookie
|
||||
|
||||
|
||||
class StockLifting(object):
|
||||
__STOCK_LIFTING_COLUMN = ['stock_code', 'short_name', 'lift_date', 'volume', 'amount', 'ratio', 'price']
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
class StockLifting:
|
||||
__STOCK_LIFTING_COLUMN = ["stock_code", "short_name", "lift_date", "volume", "amount", "ratio", "price"]
|
||||
|
||||
def stock_lifting_last_month(self):
|
||||
"""
|
||||
@ -45,36 +42,43 @@ class StockLifting(object):
|
||||
total_pages = 1
|
||||
curr_page = 1
|
||||
while curr_page <= total_pages:
|
||||
api_url = f"http://data.10jqka.com.cn/market/xsjj/field/enddate/order/desc/ajax/1/free/1/"
|
||||
api_url = "http://data.10jqka.com.cn/market/xsjj/field/enddate/order/desc/ajax/1/free/1/"
|
||||
if curr_page > 1:
|
||||
api_url = api_url + f"page/{curr_page}/free/1/"
|
||||
headers = copy.deepcopy(ths_headers.text_headers)
|
||||
headers['Host'] = 'data.10jqka.com.cn'
|
||||
headers['Referer'] = None
|
||||
headers['Cookie'] = cookie.ths_cookie()
|
||||
res = requests.request(method='get', url=api_url, headers=headers, proxies={})
|
||||
headers["Host"] = "data.10jqka.com.cn"
|
||||
headers["Referer"] = None
|
||||
headers["Cookie"] = cookie.ths_cookie()
|
||||
res = requests.request(method="get", url=api_url, headers=headers, proxies={})
|
||||
curr_page += 1
|
||||
# 2. 判断请求是否成功
|
||||
if res.status_code != 200:
|
||||
continue
|
||||
text = res.text
|
||||
if not ('解禁日期' in text or '解禁股' in text):
|
||||
if not ("解禁日期" in text or "解禁股" in text):
|
||||
break
|
||||
soup = BeautifulSoup(text, 'html.parser')
|
||||
soup = BeautifulSoup(text, "html.parser")
|
||||
# 3 .获取总的页数
|
||||
if total_pages == 1:
|
||||
page_info = soup.find('span', {'class': 'page_info'})
|
||||
page_info = soup.find("span", {"class": "page_info"})
|
||||
if page_info:
|
||||
total_pages = int(page_info.text.split("/")[1])
|
||||
# 4. 解析数据
|
||||
page_data = []
|
||||
for idx, tr in enumerate(soup.find_all('tr')):
|
||||
for idx, tr in enumerate(soup.find_all("tr")):
|
||||
if idx != 0:
|
||||
tds = tr.find_all('td')
|
||||
page_data.append({'stock_code': tds[1].contents[0].text, 'short_name': tds[2].contents[0].text,
|
||||
'lift_date': tds[3].contents[0].text, 'volume': tds[4].contents[0].text,
|
||||
'ratio': tds[7].contents[0].text, 'price': tds[5].contents[0].text,
|
||||
'amount': tds[6].contents[0].text})
|
||||
tds = tr.find_all("td")
|
||||
page_data.append(
|
||||
{
|
||||
"stock_code": tds[1].contents[0].text,
|
||||
"short_name": tds[2].contents[0].text,
|
||||
"lift_date": tds[3].contents[0].text,
|
||||
"volume": tds[4].contents[0].text,
|
||||
"ratio": tds[7].contents[0].text,
|
||||
"price": tds[5].contents[0].text,
|
||||
"amount": tds[6].contents[0].text,
|
||||
}
|
||||
)
|
||||
data.extend(page_data)
|
||||
# 5. 封装数据
|
||||
if not data:
|
||||
@ -82,16 +86,18 @@ class StockLifting(object):
|
||||
result_df = pd.DataFrame(data=data)
|
||||
data.clear()
|
||||
# 6. 单位换算
|
||||
result_df['volume'] = result_df['volume'].apply(lambda x: str(float(x[:-1]) * 10000) if '万' in x else x)
|
||||
result_df['volume'] = result_df['volume'].apply(
|
||||
lambda x: round(float(x[:-1]) * 100000000) if '亿' in x else round(float(x)))
|
||||
result_df["volume"] = result_df["volume"].apply(lambda x: str(float(x[:-1]) * 10000) if "万" in x else x)
|
||||
result_df["volume"] = result_df["volume"].apply(
|
||||
lambda x: round(float(x[:-1]) * 100000000) if "亿" in x else round(float(x))
|
||||
)
|
||||
|
||||
# convert amount to yuan
|
||||
result_df['amount'] = result_df['amount'].apply(lambda x: str(float(x[:-1]) * 10000) if '万' in x else x)
|
||||
result_df['amount'] = result_df['amount'].apply(
|
||||
lambda x: round(float(x[:-1]) * 100000000) if '亿' in x else round(float(x)))
|
||||
result_df["amount"] = result_df["amount"].apply(lambda x: str(float(x[:-1]) * 10000) if "万" in x else x)
|
||||
result_df["amount"] = result_df["amount"].apply(
|
||||
lambda x: round(float(x[:-1]) * 100000000) if "亿" in x else round(float(x))
|
||||
)
|
||||
return result_df[self.__STOCK_LIFTING_COLUMN]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
print(StockLifting().stock_lifting_last_month())
|
||||
|
593
pylintrc
Normal file
593
pylintrc
Normal file
@ -0,0 +1,593 @@
|
||||
[MASTER]
|
||||
|
||||
# A comma-separated list of package or module names from where C extensions may
|
||||
# be loaded. Extensions are loading into the active Python interpreter and may
|
||||
# run arbitrary code.
|
||||
extension-pkg-allow-list=
|
||||
|
||||
# A comma-separated list of package or module names from where C extensions may
|
||||
# be loaded. Extensions are loading into the active Python interpreter and may
|
||||
# run arbitrary code. (This is an alternative name to extension-pkg-allow-list
|
||||
# for backward compatibility.)
|
||||
extension-pkg-whitelist=
|
||||
|
||||
# Return non-zero exit code if any of these messages/categories are detected,
|
||||
# even if score is above --fail-under value. Syntax same as enable. Messages
|
||||
# specified are enabled, while categories only check already-enabled messages.
|
||||
fail-on=
|
||||
|
||||
# Specify a score threshold to be exceeded before program exits with error.
|
||||
fail-under=10.0
|
||||
|
||||
# Files or directories to be skipped. They should be base names, not paths.
|
||||
ignore=CVS,protobuf
|
||||
|
||||
# Add files or directories matching the regex patterns to the ignore-list. The
|
||||
# regex matches against paths and can be in Posix or Windows format.
|
||||
ignore-paths=
|
||||
|
||||
# Files or directories matching the regex patterns are skipped. The regex
|
||||
# matches against base names, not paths. The default value ignores emacs file
|
||||
# locks
|
||||
ignore-patterns=^\.#
|
||||
|
||||
# Python code to execute, usually for sys.path manipulation such as
|
||||
# pygtk.require().
|
||||
init-hook="from pylint.config import find_default_config_files; import os, sys; sys.path.append(os.path.dirname(next(find_default_config_files())))"
|
||||
|
||||
|
||||
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
|
||||
# number of processors available to use.
|
||||
jobs=1
|
||||
|
||||
# Control the amount of potential inferred values when inferring a single
|
||||
# object. This can help the performance when dealing with large functions or
|
||||
# complex, nested conditions.
|
||||
limit-inference-results=100
|
||||
|
||||
# List of plugins (as comma separated values of python module names) to load,
|
||||
# usually to register additional checkers.
|
||||
load-plugins=
|
||||
|
||||
# Pickle collected data for later comparisons.
|
||||
persistent=yes
|
||||
|
||||
# Minimum Python version to use for version dependent checks. Will default to
|
||||
# the version used to run pylint.
|
||||
py-version=3.7
|
||||
|
||||
# Discover python modules and packages in the file system subtree.
|
||||
recursive=no
|
||||
|
||||
# When enabled, pylint would attempt to guess common misconfiguration and emit
|
||||
# user-friendly hints instead of false-positive error messages.
|
||||
suggestion-mode=yes
|
||||
|
||||
# Allow loading of arbitrary C extensions. Extensions are imported into the
|
||||
# active Python interpreter and may run arbitrary code.
|
||||
unsafe-load-any-extension=no
|
||||
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
|
||||
# Only show warnings with the listed confidence levels. Leave empty to show
|
||||
# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE,
|
||||
# UNDEFINED.
|
||||
confidence=
|
||||
|
||||
# Disable the message, report, category or checker with the given id(s). You
|
||||
# can either give multiple identifiers separated by comma (,) or put this
|
||||
# option multiple times (only on the command line, not in the configuration
|
||||
# file where it should appear only once). You can also use "--disable=all" to
|
||||
# disable everything first and then re-enable specific checks. For example, if
|
||||
# you want to run only the similarities checker, you can use "--disable=all
|
||||
# --enable=similarities". If you want to run only the classes checker, but have
|
||||
# no Warning level messages displayed, use "--disable=all --enable=classes
|
||||
# --disable=W".
|
||||
disable=raw-checker-failed,
|
||||
bad-inline-option,
|
||||
locally-disabled,
|
||||
file-ignored,
|
||||
suppressed-message,
|
||||
useless-suppression,
|
||||
deprecated-pragma,
|
||||
use-symbolic-message-instead,
|
||||
missing-docstring,
|
||||
missing-function-docstring,
|
||||
missing-module-docstring,
|
||||
invalid-name,
|
||||
unsubscriptable-object, # dataframe 相关操作
|
||||
unsupported-assignment-operation, # dataframe 相关操作
|
||||
|
||||
|
||||
# Enable the message, report, category or checker with the given id(s). You can
|
||||
# either give multiple identifier separated by comma (,) or put this option
|
||||
# multiple time (only on the command line, not in the configuration file where
|
||||
# it should appear only once). See also the "--disable" option for examples.
|
||||
enable=c-extension-no-member
|
||||
|
||||
|
||||
[REPORTS]
|
||||
|
||||
# Python expression which should return a score less than or equal to 10. You
|
||||
# have access to the variables 'fatal', 'error', 'warning', 'refactor',
|
||||
# 'convention', and 'info' which contain the number of messages in each
|
||||
# category, as well as 'statement' which is the total number of statements
|
||||
# analyzed. This score is used by the global evaluation report (RP0004).
|
||||
evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
|
||||
|
||||
# Template used to display messages. This is a python new-style format string
|
||||
# used to format the message information. See doc for all details.
|
||||
#msg-template=
|
||||
|
||||
# Set the output format. Available formats are text, parseable, colorized, json
|
||||
# and msvs (visual studio). You can also give a reporter class, e.g.
|
||||
# mypackage.mymodule.MyReporterClass.
|
||||
output-format=text
|
||||
|
||||
# Tells whether to display a full report or only the messages.
|
||||
reports=no
|
||||
|
||||
# Activate the evaluation score.
|
||||
score=yes
|
||||
|
||||
|
||||
[REFACTORING]
|
||||
|
||||
# Maximum number of nested blocks for function / method body
|
||||
max-nested-blocks=5
|
||||
|
||||
# Complete name of functions that never returns. When checking for
|
||||
# inconsistent-return-statements if a never returning function is called then
|
||||
# it will be considered as an explicit return statement and no message will be
|
||||
# printed.
|
||||
never-returning-functions=sys.exit,argparse.parse_error
|
||||
|
||||
|
||||
[SIMILARITIES]
|
||||
|
||||
# Comments are removed from the similarity computation
|
||||
ignore-comments=yes
|
||||
|
||||
# Docstrings are removed from the similarity computation
|
||||
ignore-docstrings=yes
|
||||
|
||||
# Imports are removed from the similarity computation
|
||||
ignore-imports=no
|
||||
|
||||
# Signatures are removed from the similarity computation
|
||||
ignore-signatures=no
|
||||
|
||||
# Minimum lines number of a similarity.
|
||||
min-similarity-lines=4
|
||||
|
||||
|
||||
[LOGGING]
|
||||
|
||||
# The type of string formatting that logging methods do. `old` means using %
|
||||
# formatting, `new` is for `{}` formatting.
|
||||
logging-format-style=old
|
||||
|
||||
# Logging modules to check that the string format arguments are in logging
|
||||
# function parameter format.
|
||||
logging-modules=logging
|
||||
|
||||
|
||||
[SPELLING]
|
||||
|
||||
# Limits count of emitted suggestions for spelling mistakes.
|
||||
max-spelling-suggestions=4
|
||||
|
||||
# Spelling dictionary name. Available dictionaries: none. To make it work,
|
||||
# install the 'python-enchant' package.
|
||||
spelling-dict=
|
||||
|
||||
# List of comma separated words that should be considered directives if they
|
||||
# appear and the beginning of a comment and should not be checked.
|
||||
spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
|
||||
|
||||
# List of comma separated words that should not be checked.
|
||||
spelling-ignore-words=
|
||||
|
||||
# A path to a file that contains the private dictionary; one word per line.
|
||||
spelling-private-dict-file=
|
||||
|
||||
# Tells whether to store unknown words to the private dictionary (see the
|
||||
# --spelling-private-dict-file option) instead of raising a message.
|
||||
spelling-store-unknown-words=no
|
||||
|
||||
|
||||
[FORMAT]
|
||||
|
||||
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
|
||||
expected-line-ending-format=
|
||||
|
||||
# Regexp for a line that is allowed to be longer than the limit.
|
||||
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
|
||||
|
||||
# Number of spaces of indent required inside a hanging or continued line.
|
||||
indent-after-paren=4
|
||||
|
||||
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
||||
# tab).
|
||||
indent-string=' '
|
||||
|
||||
# Maximum number of characters on a single line.
|
||||
max-line-length=128
|
||||
|
||||
# Maximum number of lines in a module.
|
||||
max-module-lines=1000
|
||||
|
||||
# Allow the body of a class to be on the same line as the declaration if body
|
||||
# contains single statement.
|
||||
single-line-class-stmt=no
|
||||
|
||||
# Allow the body of an if to be on the same line as the test if there is no
|
||||
# else.
|
||||
single-line-if-stmt=no
|
||||
|
||||
|
||||
[VARIABLES]
|
||||
|
||||
# List of additional names supposed to be defined in builtins. Remember that
|
||||
# you should avoid defining new builtins when possible.
|
||||
additional-builtins=
|
||||
|
||||
# Tells whether unused global variables should be treated as a violation.
|
||||
allow-global-unused-variables=yes
|
||||
|
||||
# List of names allowed to shadow builtins
|
||||
allowed-redefined-builtins=
|
||||
|
||||
# List of strings which can identify a callback function by name. A callback
|
||||
# name must start or end with one of those strings.
|
||||
callbacks=cb_,
|
||||
_cb
|
||||
|
||||
# A regular expression matching the name of dummy variables (i.e. expected to
|
||||
# not be used).
|
||||
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
|
||||
|
||||
# Argument names that match this expression will be ignored. Default to name
|
||||
# with leading underscore.
|
||||
ignored-argument-names=_.*|^ignored_|^unused_
|
||||
|
||||
# Tells whether we should check for unused import in __init__ files.
|
||||
init-import=no
|
||||
|
||||
# List of qualified module names which can have objects that can redefine
|
||||
# builtins.
|
||||
redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
|
||||
|
||||
|
||||
[BASIC]
|
||||
|
||||
# Naming style matching correct argument names.
|
||||
argument-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct argument names. Overrides argument-
|
||||
# naming-style. If left empty, argument names will be checked with the set
|
||||
# naming style.
|
||||
#argument-rgx=
|
||||
|
||||
# Naming style matching correct attribute names.
|
||||
attr-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct attribute names. Overrides attr-naming-
|
||||
# style. If left empty, attribute names will be checked with the set naming
|
||||
# style.
|
||||
#attr-rgx=
|
||||
|
||||
# Bad variable names which should always be refused, separated by a comma.
|
||||
bad-names=foo,
|
||||
bar,
|
||||
baz,
|
||||
toto,
|
||||
tutu,
|
||||
tata
|
||||
|
||||
# Bad variable names regexes, separated by a comma. If names match any regex,
|
||||
# they will always be refused
|
||||
bad-names-rgxs=
|
||||
|
||||
# Naming style matching correct class attribute names.
|
||||
class-attribute-naming-style=any
|
||||
|
||||
# Regular expression matching correct class attribute names. Overrides class-
|
||||
# attribute-naming-style. If left empty, class attribute names will be checked
|
||||
# with the set naming style.
|
||||
#class-attribute-rgx=
|
||||
|
||||
# Naming style matching correct class constant names.
|
||||
class-const-naming-style=UPPER_CASE
|
||||
|
||||
# Regular expression matching correct class constant names. Overrides class-
|
||||
# const-naming-style. If left empty, class constant names will be checked with
|
||||
# the set naming style.
|
||||
#class-const-rgx=
|
||||
|
||||
# Naming style matching correct class names.
|
||||
class-naming-style=PascalCase
|
||||
|
||||
# Regular expression matching correct class names. Overrides class-naming-
|
||||
# style. If left empty, class names will be checked with the set naming style.
|
||||
#class-rgx=
|
||||
|
||||
# Naming style matching correct constant names.
|
||||
const-naming-style=UPPER_CASE
|
||||
|
||||
# Regular expression matching correct constant names. Overrides const-naming-
|
||||
# style. If left empty, constant names will be checked with the set naming
|
||||
# style.
|
||||
#const-rgx=
|
||||
|
||||
# Minimum line length for functions/classes that require docstrings, shorter
|
||||
# ones are exempt.
|
||||
docstring-min-length=-1
|
||||
|
||||
# Naming style matching correct function names.
|
||||
function-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct function names. Overrides function-
|
||||
# naming-style. If left empty, function names will be checked with the set
|
||||
# naming style.
|
||||
#function-rgx=
|
||||
|
||||
# Good variable names which should always be accepted, separated by a comma.
|
||||
good-names=i,
|
||||
j,
|
||||
k,
|
||||
ex,
|
||||
Run,
|
||||
_
|
||||
|
||||
# Good variable names regexes, separated by a comma. If names match any regex,
|
||||
# they will always be accepted
|
||||
good-names-rgxs=
|
||||
|
||||
# Include a hint for the correct naming format with invalid-name.
|
||||
include-naming-hint=no
|
||||
|
||||
# Naming style matching correct inline iteration names.
|
||||
inlinevar-naming-style=any
|
||||
|
||||
# Regular expression matching correct inline iteration names. Overrides
|
||||
# inlinevar-naming-style. If left empty, inline iteration names will be checked
|
||||
# with the set naming style.
|
||||
#inlinevar-rgx=
|
||||
|
||||
# Naming style matching correct method names.
|
||||
method-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct method names. Overrides method-naming-
|
||||
# style. If left empty, method names will be checked with the set naming style.
|
||||
#method-rgx=
|
||||
|
||||
# Naming style matching correct module names.
|
||||
module-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct module names. Overrides module-naming-
|
||||
# style. If left empty, module names will be checked with the set naming style.
|
||||
#module-rgx=
|
||||
|
||||
# Colon-delimited sets of names that determine each other's naming style when
|
||||
# the name regexes allow several styles.
|
||||
name-group=
|
||||
|
||||
# Regular expression which should only match function or class names that do
|
||||
# not require a docstring.
|
||||
no-docstring-rgx=^_
|
||||
|
||||
# List of decorators that produce properties, such as abc.abstractproperty. Add
|
||||
# to this list to register other decorators that produce valid properties.
|
||||
# These decorators are taken in consideration only for invalid-name.
|
||||
property-classes=abc.abstractproperty
|
||||
|
||||
# Regular expression matching correct type variable names. If left empty, type
|
||||
# variable names will be checked with the set naming style.
|
||||
#typevar-rgx=
|
||||
|
||||
# Naming style matching correct variable names.
|
||||
variable-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct variable names. Overrides variable-
|
||||
# naming-style. If left empty, variable names will be checked with the set
|
||||
# naming style.
|
||||
#variable-rgx=
|
||||
|
||||
|
||||
[TYPECHECK]
|
||||
|
||||
# List of decorators that produce context managers, such as
|
||||
# contextlib.contextmanager. Add to this list to register other decorators that
|
||||
# produce valid context managers.
|
||||
contextmanager-decorators=contextlib.contextmanager
|
||||
|
||||
# List of members which are set dynamically and missed by pylint inference
|
||||
# system, and so shouldn't trigger E1101 when accessed. Python regular
|
||||
# expressions are accepted.
|
||||
generated-members=
|
||||
|
||||
# Tells whether missing members accessed in mixin class should be ignored. A
|
||||
# class is considered mixin if its name matches the mixin-class-rgx option.
|
||||
ignore-mixin-members=yes
|
||||
|
||||
# Tells whether to warn about missing members when the owner of the attribute
|
||||
# is inferred to be None.
|
||||
ignore-none=yes
|
||||
|
||||
# This flag controls whether pylint should warn about no-member and similar
|
||||
# checks whenever an opaque object is returned when inferring. The inference
|
||||
# can return multiple potential results while evaluating a Python object, but
|
||||
# some branches might not be evaluated, which results in partial inference. In
|
||||
# that case, it might be useful to still emit no-member and other checks for
|
||||
# the rest of the inferred objects.
|
||||
ignore-on-opaque-inference=yes
|
||||
|
||||
# List of class names for which member attributes should not be checked (useful
|
||||
# for classes with dynamically set attributes). This supports the use of
|
||||
# qualified names.
|
||||
ignored-classes=optparse.Values,thread._local,_thread._local
|
||||
|
||||
# List of module names for which member attributes should not be checked
|
||||
# (useful for modules/projects where namespaces are manipulated during runtime
|
||||
# and thus existing member attributes cannot be deduced by static analysis). It
|
||||
# supports qualified module names, as well as Unix pattern matching.
|
||||
ignored-modules=
|
||||
|
||||
# Show a hint with possible names when a member name was not found. The aspect
|
||||
# of finding the hint is based on edit distance.
|
||||
missing-member-hint=yes
|
||||
|
||||
# The minimum edit distance a name should have in order to be considered a
|
||||
# similar match for a missing member name.
|
||||
missing-member-hint-distance=1
|
||||
|
||||
# The total number of similar names that should be taken in consideration when
|
||||
# showing a hint for a missing member.
|
||||
missing-member-max-choices=1
|
||||
|
||||
# Regex pattern to define which classes are considered mixins ignore-mixin-
|
||||
# members is set to 'yes'
|
||||
mixin-class-rgx=.*[Mm]ixin
|
||||
|
||||
# List of decorators that change the signature of a decorated function.
|
||||
signature-mutators=
|
||||
|
||||
|
||||
[MISCELLANEOUS]
|
||||
|
||||
# List of note tags to take in consideration, separated by a comma.
|
||||
notes=FIXME,
|
||||
XXX,
|
||||
TODO
|
||||
|
||||
# Regular expression of note tags to take in consideration.
|
||||
#notes-rgx=
|
||||
|
||||
|
||||
[STRING]
|
||||
|
||||
# This flag controls whether inconsistent-quotes generates a warning when the
|
||||
# character used as a quote delimiter is used inconsistently within a module.
|
||||
check-quote-consistency=no
|
||||
|
||||
# This flag controls whether the implicit-str-concat should generate a warning
|
||||
# on implicit string concatenation in sequences defined over several lines.
|
||||
check-str-concat-over-line-jumps=no
|
||||
|
||||
|
||||
[IMPORTS]
|
||||
|
||||
# List of modules that can be imported at any level, not just the top level
|
||||
# one.
|
||||
allow-any-import-level=
|
||||
|
||||
# Allow wildcard imports from modules that define __all__.
|
||||
allow-wildcard-with-all=no
|
||||
|
||||
# Analyse import fallback blocks. This can be used to support both Python 2 and
|
||||
# 3 compatible code, which means that the block might have code that exists
|
||||
# only in one or another interpreter, leading to false positives when analysed.
|
||||
analyse-fallback-blocks=no
|
||||
|
||||
# Deprecated modules which should not be used, separated by a comma.
|
||||
deprecated-modules=
|
||||
|
||||
# Output a graph (.gv or any supported image format) of external dependencies
|
||||
# to the given file (report RP0402 must not be disabled).
|
||||
ext-import-graph=
|
||||
|
||||
# Output a graph (.gv or any supported image format) of all (i.e. internal and
|
||||
# external) dependencies to the given file (report RP0402 must not be
|
||||
# disabled).
|
||||
import-graph=
|
||||
|
||||
# Output a graph (.gv or any supported image format) of internal dependencies
|
||||
# to the given file (report RP0402 must not be disabled).
|
||||
int-import-graph=
|
||||
|
||||
# Force import order to recognize a module as part of the standard
|
||||
# compatibility libraries.
|
||||
known-standard-library=
|
||||
|
||||
# Force import order to recognize a module as part of a third party library.
|
||||
known-third-party=enchant
|
||||
|
||||
# Couples of modules and preferred modules, separated by a comma.
|
||||
preferred-modules=
|
||||
|
||||
|
||||
[CLASSES]
|
||||
|
||||
# Warn about protected attribute access inside special methods
|
||||
check-protected-access-in-special-methods=no
|
||||
|
||||
# List of method names used to declare (i.e. assign) instance attributes.
|
||||
defining-attr-methods=__init__,
|
||||
__new__,
|
||||
setUp,
|
||||
__post_init__
|
||||
|
||||
# List of member names, which should be excluded from the protected access
|
||||
# warning.
|
||||
exclude-protected=_asdict,
|
||||
_fields,
|
||||
_replace,
|
||||
_source,
|
||||
_make
|
||||
|
||||
# List of valid names for the first argument in a class method.
|
||||
valid-classmethod-first-arg=cls
|
||||
|
||||
# List of valid names for the first argument in a metaclass class method.
|
||||
valid-metaclass-classmethod-first-arg=cls
|
||||
|
||||
|
||||
[DESIGN]
|
||||
|
||||
# List of regular expressions of class ancestor names to ignore when counting
|
||||
# public methods (see R0903)
|
||||
exclude-too-few-public-methods=
|
||||
|
||||
# List of qualified class names to ignore when counting class parents (see
|
||||
# R0901)
|
||||
ignored-parents=
|
||||
|
||||
# Maximum number of arguments for function / method.
|
||||
max-args=5
|
||||
|
||||
# Maximum number of attributes for a class (see R0902).
|
||||
max-attributes=7
|
||||
|
||||
# Maximum number of boolean expressions in an if statement (see R0916).
|
||||
max-bool-expr=5
|
||||
|
||||
# Maximum number of branch for function / method body.
|
||||
max-branches=12
|
||||
|
||||
# Maximum number of locals for function / method body.
|
||||
max-locals=15
|
||||
|
||||
# Maximum number of parents for a class (see R0901).
|
||||
max-parents=7
|
||||
|
||||
# Maximum number of public methods for a class (see R0904).
|
||||
max-public-methods=20
|
||||
|
||||
# Maximum number of return / yield for function / method body.
|
||||
max-returns=6
|
||||
|
||||
# Maximum number of statements in function / method body.
|
||||
max-statements=50
|
||||
|
||||
# Minimum number of public methods for a class (see R0903).
|
||||
min-public-methods=2
|
||||
|
||||
|
||||
[EXCEPTIONS]
|
||||
|
||||
# Exceptions that will emit a warning when being caught. Defaults to
|
||||
# "BaseException, Exception".
|
||||
overgeneral-exceptions=BaseException,
|
||||
Exception
|
5
pyproject.toml
Normal file
5
pyproject.toml
Normal file
@ -0,0 +1,5 @@
|
||||
[tool.black]
|
||||
line-length = 120
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
Loading…
Reference in New Issue
Block a user