import pandas as pd
import finnhub
import requests
import numpy as np
import time
from datetime import datetime
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd
import mplfinance as mpf
from mplfinance.original_flavor import candlestick_ohlc
import finnhub
import matplotlib.dates as mpl_dates
import numpy as np
from datetime import * 
import seaborn as sns
from datetime import datetime
import requests
#f = pd.DataFrame(ticker_sum)
#f =pd.DataFrame(symbols)
from datetime import datetime as dt
import datetime
# today = dt.today()
# a = str(today.year)
# b = str(today.month)
# c = str(today.day)
# f = pd.read_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\three_strategy_buy_point_'+a+'_'+b+'_'+c+'.csv')
#ticker_sum.remove('GOOG')
# ticker_sum = f['0'].tolist()
#ticker_sum.remove()
import pandas as pd
import requests
import matplotlib.pyplot as plt
import numpy as np
import requests
import matplotlib.pyplot as plt
from math import floor
import finnhub
from termcolor import colored as cl
# image_trading_signal_green .to_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\image_trading_signal_green_'+a+'_'+b+'_'+c+'.csv') 
# image_trading_signal_purple.to_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\image_trading_signal_purple_'+a+'_'+b+'_'+c+'.csv') 
# image_trading_signal_blue .to_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\image_trading_signal_blue_'+a+'_'+b+'_'+c+'.csv') 
from datetime import datetime as dt
import datetime
# today = dt.today()
# a=str(today.year)
# b=str(today.month)
# c=str(today.day)
# #c1= pd.read_csv(r'C:\Users\jizha\Desktop\Strategy_auto_2_21_2022\results\startegy1.csv_'+a+'_'+b+'_'+c+'.csv') 
# c1= pd.read_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\startegy1.csv_'+a+'_'+b+'_'+c+'.csv') 
# c1 = c1.rename(columns = {'Unnamed: 0':'symbol'})
# strategy1 = c1.set_index('symbol')

# today = dt.today()
# a = str(today.year)
# b = str(today.month)
# c = str(today.day)
# d = pd.read_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\startegy2.csv_'+a+'_'+b+'_'+c+'.csv')
# d = d.rename(columns = {'Unnamed: 0':'symbol'})
# strategy2  = d.set_index('symbol')

# ticker_sum.remove('L&TFH.NS')
# #strategy1=pd.read_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\startegy1.csv_'+a+'_'+b+'_'+c+'.csv')

# today = dt.today()
# a = str(today.year)
# b = str(today.month)
# c = str(today.day)
# e = pd.read_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\startegy3.csv_'+a+'_'+b+'_'+c+'.csv')
# e = e.rename(columns = {'Unnamed: 0':'symbol'})
# strategy3 = e.set_index('symbol')

ticker1 = strategy1.index.tolist()
ticker2 = strategy2.index.tolist()
ticker3 = strategy3.index.tolist()
ticker_sum = ticker1+ticker2+ticker3
ticker_sum = np.unique(ticker_sum).tolist()
len(ticker_sum)
f = pd.DataFrame(ticker_sum)
#f =pd.DataFrame(symbols)
today = dt.today()
a = str(today.year)
b = str(today.month)
c = str(today.day)
#f.to_csv(r'C:\Users\jizha\Desktop\Strategy_auto_2_21_2022\results\three_strategy_buy_point_'+a+'_'+b+'_'+c+'.csv')
#stock_info_data5['price'] = stock_info_data5['price'].astype(float)
#stock_info_data5 =stock_info_data5[stock_info_data5['price'] >=60 ]
#ticker_sum.remove('LPI')
#ticker_sum = stock_info_data5.index.tolist()
f.to_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\three_strategy_buy_point_'+a+'_'+b+'_'+c+'.csv')
#ticker_sum = ticker_sum2
# ticker1.remove('M&MFIN.NS')
# ticker1.remove('REGI')
# ticker1.remove('ADSK')
# ticker_sum.remove('XYL')
# strategy1 = strategy1.drop('REGI')
# strategy1 = strategy1.drop('COUP')
# strategy1 = strategy1.drop('AFL')
# strategy1 = strategy1.drop('CBOE')
# strategy1= strategy1.drop('XYL')
# strategy1= strategy1.drop('L&TFH.NS')
# strategy1= strategy1.drop('PPHC.L')
# strategy3= strategy1.drop('CME')
# strategy1= strategy1.drop('OPK.TA')
# J&KBANK.NS
# L&TFH.NS
# L&TFH.NS
pd.set_option('display.float_format', '{:.2E}'.format)
from IPython.display import display
 
#     return stocks
api_key = '86dd63f6b8ae774b061232685b78eb52'    
def get_profile(symbols):
    def get_companyinfo(symbols):
        import requests
        from datetime import datetime
        api_key = '86dd63f6b8ae774b061232685b78eb52'    
        stocks = pd.DataFrame(columns = [ 'description','symbol', 'logourl', 'name','mktCap','lastDiv'])
        for symb  in symbols:
            print(symb)
            comp = requests.get(f'https://financialmodelingprep.com/api/v3/profile/{symb}?apikey={api_key}').json()[0]
            stocks = stocks.append({'description': comp['description'], 'symbol': comp['symbol'],
                           'logourl': comp['image'], 'name': comp['companyName'], 'mktCap':comp['mktCap'],'lastDiv':comp['lastDiv']}, ignore_index = True)
            
        stocks['Date_access'] = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
       # stocks['div_yiel'] = stocks['lastDiv']/stocks['price']
      #  stocks[['lastDiv','price']] = stocks[['lastDiv','price']].applymap("{0:,.2f}".format) 
      #  stocks['div_yiel'] =stocks['div_yiel'].apply(lambda x: "{0:.2f}%".format(x*100))
       # df['var3'] = df['var3'].applymap(lambda x: "{0:.2f}%".format(x*100))
       # stocks = stocks.style.format({'lastDiv': "{:.2f}".format,'price': "{:.2f}".format,'div_yiel': "{:.2%}".format})
        stocks['mktCap'] = ( stocks['mktCap'].astype(float)/1000000).round(2).astype(str) + 'MM'
        stocks =stocks.set_index('symbol')
        stocks =stocks.sort_index()
    
       # stocks = stocks.style.format({'mktCap': "{:.2f}",'volAvg': "{:.2f}",'var3': "{:.2%}"})
        return stocks
    
    stocks =  get_companyinfo(symbols)
    
    # df[['var1','var2']] = df[['var1','var2']].applymap("{0:.2f}".format)
    # df['var3'] = df['var3'].applymap(lambda x: "{0:.2f}%".format(x*100))
    
    
    
    ######################earningdate#########################
    
    def earning_date1(symbols):
        api_key = '86dd63f6b8ae774b061232685b78eb52'
        bs = requests.get(f'https://financialmodelingprep.com/api/v3/earning_calendar?apikey={api_key}').json()
        #bs = pd.DataFrame(bs)
        df =pd.DataFrame()
        for i in range(len(bs)):
            d =pd.DataFrame()
            d1=bs[i]['symbol']
            d2 = bs[i]['date']
            d['earning_date'] =[d2]
            d.index = [d1]
            df = pd.concat([d,df],axis =0)
          #  earning_date =  pd.DataFrame()
        earning_date =  pd.DataFrame()
        for tick in symbols:
            dd= pd.DataFrame()
            if tick not in  df.index:
                d1 ="Not Available"
            else:
                d1=df['earning_date'][tick]
        
            dd['symbol'] = [tick]
            dd['earning_date'] = [d1]
            dd = dd.set_index('symbol')
            earning_date = pd.concat([earning_date ,dd],axis =0)
        earning_date= earning_date.sort_index()
    
    
        return  earning_date
    
    earning_date = earning_date1(symbols)
    
    #  earning_date = earning_date1(ticker_sum)
    ##################peratio##################
    def eps_pe(symbols):
        api_key1 = '86dd63f6b8ae774b061232685b78eb52'    
        da1 = pd.DataFrame(columns = [ 'pe_ratio(TTM)','symbol', 'eps(TTM)'])
        for symb  in symbols:
            comp = requests.get(f'https://financialmodelingprep.com/api/v3/quote/{symb}?apikey={api_key1}').json()[0]
            da1 = da1.append({'pe_ratio(TTM)': comp['pe'], 'symbol': comp['symbol'],
                          'eps(TTM)': comp['eps']}, ignore_index = True)
        da1 = da1.set_index('symbol')
        da1[['pe_ratio(TTM)','eps(TTM)']] = da1[['pe_ratio(TTM)','eps(TTM)']] .applymap("{0:,.2f}".format) 
        da1= da1.sort_index()
    
        return da1
    #    stocks['Date_access'] = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
    da2 = eps_pe(symbols)
    
    
    stock_info=stocks.merge(earning_date, left_index=True, right_index=True)

    stock_info_data = stock_info.merge(da2,  left_index=True, right_index=True)
    stock_info_data = stock_info_data.sort_index()
    return stock_info_data

stock_info_data = get_profile(ticker_sum)
#stock_info_data1 =stock_info_data.merge(strategy_sum3, right_index = True, left_index = True)
stock_info_data =stock_info_data .sort_index()

# n = 3# number of groups
# groups = np.array_split(symbols, n)
# da2 =pd.DataFrame()
# import time
# for group in groups:
#     da1 = eps_pe(group)
#     da2 =pd.concat([da2,da1], axis =0)
#     time.sleep(60)
stock_info_data.to_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\stock_info_data.csv')

#stock_info_data = stock_info_data[['description','logourl',	'name','mktCap','volAvg','lastDiv',	'earning_date',	'pe_ratio(TTM)', 	'eps(TTM)']]


   
def get_image(symbols):
    image_strategy_return = pd.DataFrame()
    for ticker in symbols:
        image = pd.DataFrame()
        dr = "https://3arbzfbsh-cname-us.ngrok.io/Desktop/seabridge%20fintech/profit_graph_app/" + ticker + "_Return.jpg"
        tick = ticker
        image['image_return'] =[ dr]
        image.index =[tick]
        image_strategy_return  = pd.concat([image_strategy_return ,image], axis =0)
     
    
    
    ################resistance and resiistance #############
    
    image_rs = pd.DataFrame()
    for ticker in symbols:
        image = pd.DataFrame()
        dr = "https://3arbzfbsh-cname-us.ngrok.io/Desktop/seabridge%20fintech/image_app/" + ticker+"_resistance_support.jpg"
        tick = ticker
        image['im_res_sup']=[ dr]
        image.index =[tick]
        image_rs = pd.concat([image_rs,image], axis =0)
    return    image_strategy_return,image_rs

image_strategy_return,image_rs = get_image(ticker_sum)
                                          
image_rs = image_rs.sort_index()
image_strategy_return = image_strategy_return.sort_index()
image_data = image_rs.merge(image_strategy_return, left_index=True, right_index=True) 

stock_info_data1 = stock_info_data.merge(image_data, right_index =True, left_index =True)
stock_info_data1 .to_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\company_profile.csv')

#pip install python-dateutil



##GAI###############################################################
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd
import mplfinance as mpf
from mplfinance.original_flavor import candlestick_ohlc
import matplotlib.dates as mpl_dates
import numpy as np
from datetime import *
import seaborn as sns
from datetime import datetime
import requests
from dateutil.relativedelta import relativedelta

# ----------- 安全的 KMeans 聚类 (就地替换原 get_optimum_clusters 内部函数逻辑) -----------
def _safe_kmeans_labels(df: pd.DataFrame, saturation_point: float = 0.05):
    """
    df: 单列 DataFrame（High 或 Low），index 为日期
    返回：与 df 同长度的 labels（np.ndarray[int]）
    - 空数据、仅1个样本 → 返回全0或空
    - k 的选择不超过 样本数-1 和 唯一值数量
    - 惰性“肘部法”：当 inertia 的相邻差值小于 saturation_point 时停止
    """
    # 只取数值列（第一列）
    if df is None or df.empty:
        return np.array([], dtype=int)
    X = pd.to_numeric(df.iloc[:, 0], errors='coerce').dropna().values.reshape(-1, 1)
    if X.size == 0:
        return np.array([], dtype=int)

    n = X.shape[0]
    uniq = np.unique(X)
    if n == 1 or len(uniq) == 1:
        return np.zeros(n, dtype=int)

    # 可行的 k 集合：1..8，但不超过 n-1、uniq 数
    max_k = max(1, min(8, n - 1, len(uniq)))
    ks = list(range(1, max_k + 1))

    wcss, k_models = [], []
    for k in ks:
        km = KMeans(n_clusters=k, init='random', max_iter=300, n_init=10, random_state=42)
        km.fit(X)
        wcss.append(km.inertia_)
        k_models.append(km)

    # 惰性“肘部”选择
    optimum_idx = len(wcss) - 1
    for i in range(0, len(wcss) - 1):
        if abs(wcss[i + 1] - wcss[i]) < saturation_point:
            optimum_idx = i
            break

    labels = k_models[optimum_idx].labels_
    # 注意：labels 长度与 dropna 后的 X 相同；需要与原 df 对齐
    # 用全长数组并在非 NaN 位置填入 labels，其余填充为 -1 以示“未参与聚类”
    full = pd.to_numeric(df.iloc[:, 0], errors='coerce')
    out = np.full(full.shape[0], -1, dtype=int)
    mask = full.notna().values
    out[mask] = labels
    return out

# ----------- 你的 get_data（已加健壮性） -----------
def get_data(ticker):
    print(ticker)

    # 时间窗：过去6个月至昨天
    today = datetime.today()
    end = (today - timedelta(days=1)).strftime('%Y-%m-%d')
    start = (today - relativedelta(months=6)).strftime('%Y-%m-%d')

    # 拉取数据（FMP）
    api_key = '86dd63f6b8ae774b061232685b78eb52'
    url = f'https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}'
    params = {"from": start, "to": end, "apikey": api_key}

    try:
        js = requests.get(url, params=params, timeout=15).json()
        bs = js.get('historical', [])
    except Exception as e:
        # 返回固定结构，避免崩溃
        last_day = pd.DataFrame(index=[ticker])
        last_day['center_gravity'] = np.nan
        last_day['vol_up1'] = np.nan
        last_day['vol_up2'] = np.nan
        last_day['vol_up3'] = np.nan
        last_day['vol_down1'] = np.nan
        last_day['vol_down2'] = np.nan
        last_day['vol_down3'] = np.nan
        last_day['recent_support'] = np.nan
        last_day['recent_resistance'] = np.nan
        return last_day

    if not bs:
        # 无数据：返回 NaN 行
        last_day = pd.DataFrame(index=[ticker])
        for col in ['center_gravity','vol_up1','vol_up2','vol_up3','vol_down1','vol_down2','vol_down3','recent_support','recent_resistance']:
            last_day[col] = np.nan
        return last_day

    stock = pd.DataFrame(bs)

    # 列检查
    need = {'date','open','high','low','close','volume'}
    if not need.issubset(stock.columns):
        last_day = pd.DataFrame(index=[ticker])
        for col in ['center_gravity','vol_up1','vol_up2','vol_up3','vol_down1','vol_down2','vol_down3','recent_support','recent_resistance']:
            last_day[col] = np.nan
        return last_day

    # 清洗
    stock = stock[['date','open','high','low','close','volume']].rename(
        columns={'date':'Date','open':'Open','high':'High','low':'Low','close':'Close','volume':'volume'}
    )
    stock['Date'] = pd.to_datetime(stock['Date'], errors='coerce')
    stock = stock.dropna(subset=['Date']).set_index('Date').sort_index()

    # 体量过滤：先数值化
    stock['volume'] = pd.to_numeric(stock['volume'], errors='coerce').fillna(0)
    stock = stock[stock['volume'] >= 1000]

    if stock.empty:
        last_day = pd.DataFrame(index=[ticker])
        for col in ['center_gravity','vol_up1','vol_up2','vol_up3','vol_down1','vol_down2','vol_down3','recent_support','recent_resistance']:
            last_day[col] = np.nan
        return last_day

    # 删去最后4行计算聚类（保留你原逻辑）
    stock1 = stock.iloc[:-4, :]
    if stock1.empty:
        last_day = stock.iloc[-1:].copy()
    else:
        last_day = stock.iloc[-1:].copy()

    # --- 聚类：对 High/Low 做安全 KMeans ---
    Lows = pd.DataFrame(data=stock1, index=stock1.index, columns=['Low']).copy()
    Highs = pd.DataFrame(data=stock1, index=stock1.index, columns=['High']).copy()

    # 若全 NaN；做防护
    if Lows['Low'].dropna().empty or Highs['High'].dropna().empty:
        # 无法聚类，后面支撑/压力统一给空
        low_labels = np.full(Lows.shape[0], -1, dtype=int)
        high_labels = np.full(Highs.shape[0], -1, dtype=int)
    else:
        low_labels = _safe_kmeans_labels(Lows)
        high_labels = _safe_kmeans_labels(Highs)

    Lows['labels'] = pd.Series(low_labels, index=Lows.index)
    Highs['labels'] = pd.Series(high_labels, index=Highs.index)

    # 计算聚类得到的支撑/压力候选：按 label 分组取 min/max（忽略 label==-1）
    res = []
    sup = []
    if (Highs['labels'] >= 0).any():
        for i in np.unique(high_labels[high_labels >= 0]):
            res.append(Highs.loc[Highs.labels == i, 'High'].max())
    if (Lows['labels'] >= 0).any():
        for i in np.unique(low_labels[low_labels >= 0]):
            sup.append(Lows.loc[Lows.labels == i, 'Low'].min())

    # --- 枢轴点（保持你原写法） ---
    # last_day 是 1 行 DataFrame
    last_day = last_day[['Open','High','Low','Close','volume']].copy()
    last_day['center_gravity'] = (last_day['High'] + last_day['Low'] + last_day['Close']) / 3
    last_day['vol_up1'] = 2 * last_day['center_gravity'] - last_day['Low']
    last_day['vol_down1'] = 2 * last_day['center_gravity'] - last_day['High']
    last_day['vol_up2'] = last_day['center_gravity'] + (last_day['High'] - last_day['Low'])
    last_day['vol_down2'] = last_day['center_gravity'] - (last_day['High'] - last_day['Low'])
    last_day['vol_up3'] = last_day['center_gravity'] + 2 * (last_day['High'] - last_day['Low'])
    last_day['vol_down3'] = last_day['center_gravity'] - 2 * (last_day['High'] - last_day['Low'])

    # --- 时间列（若为空需防守） ---
    if not Highs.empty:
        Highs = Highs.copy()
        Highs['Date'] = pd.to_datetime(Highs.index, errors='coerce')
        Highs = Highs.dropna(subset=['Date'])
        if not Highs.empty:
            Highs['Date'] = Highs['Date'].apply(mpl_dates.date2num)

    if not Lows.empty:
        Lows = Lows.copy()
        Lows['Date'] = pd.to_datetime(Lows.index, errors='coerce')
        Lows = Lows.dropna(subset=['Date'])
        if not Lows.empty:
            Lows['Date'] = Lows['Date'].apply(mpl_dates.date2num)

    # --- 标记是否落在支撑/压力集合（向量化替代 for 循环） ---
    Highs['res'] = 0
    if res:
        Highs['res'] = Highs['High'].isin(res).astype(int)

    Lows['sup'] = 0
    if sup:
        Lows['sup'] = Lows['Low'].isin(sup).astype(int)

    # --- 取最近（最后出现的）压力/支撑（避免 append） ---
    resistance = Highs.loc[Highs['res'] == 1]
    support = Lows.loc[Lows['sup'] == 1]

    # 近期压力/支撑，若为空则给 NaN
    rec_res = resistance['High'].iloc[-1] if not resistance.empty else np.nan
    rec_sup = support['Low'].iloc[-1] if not support.empty else np.nan

    last_day['recent_support'] = rec_sup
    last_day['recent_resistance'] = rec_res

    # 索引改为 ticker（保持你原写法）
    last_day.index = [ticker]
    return last_day

# ----------- 你的 strategy_signal4（仅改成一次 concat，容错单票） -----------
def strategy_signal4(symbols):
    frames = []
    for tick in symbols:
        try:
            data = get_data(tick)
        except Exception as e:
            # 单只出错不影响整体
            data = pd.DataFrame(index=[tick])
            for col in ['center_gravity','vol_up1','vol_up2','vol_up3','vol_down1','vol_down2','vol_down3','recent_support','recent_resistance']:
                data[col] = np.nan
        frames.append(data)
    return pd.concat(frames, axis=0) if frames else pd.DataFrame(
        columns=['center_gravity','vol_up1','vol_up2','vol_up3','vol_down1','vol_down2','vol_down3','recent_support','recent_resistance']
    )


from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd
import mplfinance as mpf
from mplfinance.original_flavor import candlestick_ohlc
import finnhub
import matplotlib.dates as mpl_dates
import numpy as np
from datetime import * 
import seaborn as sns
from datetime import datetime
support_resistance_data= strategy_signal4(ticker_sum) 
support_resistance_data = support_resistance_data[['center_gravity','vol_up1','vol_up2','vol_up3','vol_down1', "vol_down2" , "vol_down3","recent_support","recent_resistance"]]
support_resistance_data .to_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\support_resistance_data.csv')
 
#support_resistance_data = pd.concat([data1, data2, data3, data4, data5, data6, data7], axis = 0)


# strategy_sum1 = pd.read_csv(r'C:\Users\jizha\Desktop\seabridge_datapool\support_resistop_last_peak_data.csv')   
# strategy_sum1 =strategy_sum1.rename({'Unnamed: 0':'symbol'}, axis =1)
# strategy_sum1= strategy_sum1.rename({'pivot':'center_gravity'}, axis =1)
#####################stop loss####################################
# def get_data2(ticker):
#     print(ticker)
#     from datetime import datetime as dt
#     import datetime
#     import dateutil
#     import requests
#     import pandas as pd
#     import numpy as np

#     today = datetime.datetime.today()
#     delta = dateutil.relativedelta.relativedelta(months=6)
#     end = (today - datetime.timedelta(1)).strftime('%Y-%m-%d')
#     start = (today - delta).strftime('%Y-%m-%d')

#     # 优先用外部 api_key，缺失时兜底（建议改为环境变量）
#     try:
#         _api_key = api_key
#     except NameError:
#         _api_key = '86dd63f6b8ae774b061232685b78eb52'

#     url = f'https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}'

#     # 拉取数据 + 容错
#     try:
#         resp = requests.get(url, params={"from": start, "to": end, "apikey": _api_key}, timeout=15)
#         resp.raise_for_status()
#         js = resp.json()
#         bs = js.get('historical', [])
#     except Exception:
#         bs = []

#     def _empty_out():
#         cols = ['Open','High','Low','Close','volume',
#                 'center_gravity','vol_up1','vol_down1','vol_up2','vol_down2','vol_up3','vol_down3',
#                 'stop_loss','stop_loss_pct']
#         return pd.DataFrame(index=[ticker], columns=cols, dtype=float)

#     if not bs:
#         return _empty_out()

#     stock = pd.DataFrame(bs)

#     # 列检查
#     need = {'date','open','high','low','close','volume'}
#     if not need.issubset(stock.columns):
#         return _empty_out()

#     # 清洗与索引（保持你原来的列名）
#     stock = stock[['date','open','high','low','close','volume']].rename(
#         columns={'date':'Date','open':'Open','high':'High','low':'Low','close':'Close','volume':'volume'}
#     )
#     stock['Date'] = pd.to_datetime(stock['Date'], errors='coerce')
#     stock = stock.dropna(subset=['Date']).set_index('Date').sort_index()

#     # 数值化，避免字符串导致计算异常
#     for c in ['Open','High','Low','Close','volume']:
#         stock[c] = pd.to_numeric(stock[c], errors='coerce')

#     # 可选：过滤极低成交量（如果你需要与其他函数口径一致，可解除下一行注释）
#     # stock = stock[stock['volume'].fillna(0) >= 1000]

#     if stock.empty:
#         return _empty_out()

#     def get_stop(stock):    # pivotpoints（保留你的公式与列名）
#         last_day = stock.iloc[-1].to_frame().T
#         not_pivots = last_day.columns  # 保留你的变量（未使用也不删除）

#         last_day['center_gravity'] = (last_day['High'] + last_day['Low'] + last_day['Close']) / 3
#         last_day['vol_up1'] = 2 * last_day['center_gravity'] - last_day['Low']
#         last_day['vol_down1'] = 2 * last_day['center_gravity'] - last_day['High']
#         last_day['vol_up2'] = last_day['center_gravity'] + (last_day['High'] - last_day['Low'])
#         last_day['vol_down2'] = last_day['center_gravity'] - (last_day['High'] - last_day['Low'])
#         last_day['vol_up3'] = last_day['center_gravity'] + 2 * (last_day['High'] - last_day['Low'])
#         last_day['vol_down3'] = last_day['center_gravity'] - 2 * (last_day['High'] - last_day['Low'])

#         tick = ticker
#         last_day.index = [tick]

#         # 止损与百分比（增加除零保护与非链式赋值）
#         last_day['stop_loss'] = (last_day['vol_down2'] + last_day['vol_down3']) / 2

#         close_val = float(last_day['Close'].iloc[0]) if pd.notna(last_day['Close'].iloc[0]) else np.nan
#         if close_val and close_val != 0:
#             sl_pct = (close_val - float(last_day['stop_loss'].iloc[0])) / close_val * 100.0
#         else:
#             sl_pct = np.nan
#         last_day.loc[tick, 'stop_loss_pct'] = sl_pct

#         # 若止损过宽，按你原逻辑改为更紧的均值
#         slp = last_day['stop_loss_pct'].iloc[0]
#         if pd.notna(slp) and slp > 3.5:
#             new_sl = (float(last_day['vol_down1'].iloc[0]) + float(last_day['vol_down2'].iloc[0])) / 2.0
#             last_day.loc[tick, 'stop_loss'] = new_sl

#         return last_day

#     last_day = get_stop(stock)
#     return last_day
# last_day = get_data2(ticker_sum)



def get_data2(ticker):
    print(ticker)
    from datetime import datetime as dt
    import datetime
    import dateutil
    today = datetime.datetime.today()
    delta = dateutil.relativedelta.relativedelta(months=6)
    end = ( today-datetime.timedelta(1)).strftime('%Y-%m-%d') 
    start = (today - delta).strftime('%Y-%m-%d')
 
    import requests
  
    from datetime import datetime
    bs = requests.get(f'https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}?from={start}&to={end}&apikey={api_key}').json()['historical']
    stock = pd.DataFrame(bs)
    stock =stock[['date','open','high','low','close','volume']]
    stock = stock.rename(columns = {'date':'Date', 'open':'Open', 'high':'High','low':'Low', 'close':'Close', 'volume':'volume'})
    stock = stock.set_index('Date')
    stock= stock.sort_index()
        
    def get_stop(stock):    # pivotpoints
        last_day = stock.iloc[-1].to_frame().T
        not_pivots = last_day.columns
        last_day['center_gravity'] = (last_day['High'] + last_day['Low'] + last_day['Close'])/3
        last_day['vol_up1'] = 2*last_day['center_gravity'] - last_day['Low']
        last_day['vol_down1'] = 2*last_day['center_gravity'] - last_day['High']
        last_day['vol_up2'] = last_day['center_gravity'] + (last_day['High'] - last_day['Low'])
        last_day['vol_down2'] = last_day['center_gravity'] - (last_day['High'] - last_day['Low'])
        last_day['vol_up3'] = last_day['center_gravity'] + 2*(last_day['High'] - last_day['Low'])
        last_day['vol_down3'] = last_day['center_gravity'] - 2*(last_day['High'] - last_day['Low'])
        tick = ticker
        last_day.index = [tick]
        last_day['stop_loss'] = (last_day['vol_down2'] +last_day['vol_down3'] )/2
        last_day['stop_loss_pct'] = (last_day['Close']- last_day['stop_loss'])/last_day['Close']*100
        if last_day['stop_loss_pct'][-1] >3.5:
            last_day['stop_loss'][-1]= (last_day['vol_down1'][-1] +last_day['vol_down2'][-1] )/2
        
        return last_day

    last_day = get_stop(stock)
    
import pandas as pd
import numpy as np
import requests
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import pandas as pd
import numpy as np
import requests
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta

def get_data3(ticker):
    print(ticker)

    # ---- 时间窗口 ----
    today = datetime.today()
    end = (today - timedelta(days=1)).strftime('%Y-%m-%d')
    start = (today - relativedelta(months=6)).strftime('%Y-%m-%d')

    # ---- 拉数据 + 容错 ----
    api_key = '86dd63f6b8ae774b061232685b78eb52'
    url = f'https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}'
    params = {"from": start, "to": end, "apikey": api_key}

    try:
        r = requests.get(url, params=params, timeout=15)
        r.raise_for_status()
        js = r.json()
        bs = js.get('historical', [])
    except Exception as e:
        out = pd.DataFrame(index=[ticker])
        out['last_peak'] = [f'{ticker} data error: {type(e).__name__}']
        out['last_peakday'] = [pd.NaT]
        return out

    if not bs:
        out = pd.DataFrame(index=[ticker])
        out['last_peak'] = [f'{ticker} has not formed a green line yet']
        out['last_peakday'] = [pd.NaT]
        return out

    stock = pd.DataFrame(bs)

    # ---- 列检查与清洗 ----
    need = {'date','open','high','low','close','volume'}
    if not need.issubset(stock.columns):
        out = pd.DataFrame(index=[ticker])
        miss = sorted(list(need - set(stock.columns)))
        out['last_peak'] = [f'{ticker} missing columns: {miss}']
        out['last_peakday'] = [pd.NaT]
        return out

    stock = stock[['date','open','high','low','close','volume']].rename(
        columns={'date':'Date','open':'Open','high':'High','low':'Low','close':'Close','volume':'volumn'}
    )

    stock['Date'] = pd.to_datetime(stock['Date'], errors='coerce')
    stock = stock.dropna(subset=['Date']).set_index('Date').sort_index()

    # 体量过滤：先转为数值
    stock['volumn'] = pd.to_numeric(stock['volumn'], errors='coerce').fillna(0)
    stock = stock[stock['volumn'] >= 1000]

    if stock.empty:
        out = pd.DataFrame(index=[ticker])
        out['last_peak'] = [f'{ticker} has not formed a green line yet']
        out['last_peakday'] = [pd.NaT]
        return out

    # 日频 High 最大值，去 NaN
    dfday = stock.groupby(pd.Grouper(freq='D'))['High'].max().dropna()
    if dfday.empty:
        out = pd.DataFrame(index=[ticker])
        out['last_peak'] = [f'{ticker} has not formed a green line yet']
        out['last_peakday'] = [pd.NaT]
        return out

    # ---- 绿线逻辑（保持你原写法）----
    now = datetime.now().date()
    glDate = pd.NaT
    lastGLV = 0.0
    currentDate = pd.NaT
    curentGLV = 0.0
    counter = 0  # 先初始化

    for index, value in dfday.items():
        if pd.isna(value):
            continue

        if value > curentGLV:
            curentGLV = float(value)
            currentDate = index
            counter = 0
        elif value < curentGLV:
            counter += 1
            if counter == 5 and index.date() != now:
                if curentGLV != lastGLV:
                    pass  # 可保留 print(curentGLV)
                glDate = currentDate
                lastGLV = curentGLV
                counter = 0

    if lastGLV == 0:
        last_peak = f"{ticker} has not formed a green line yet"
        last_peak_date = pd.NaT
    else:
        last_peak = lastGLV
        last_peak_date = glDate

    out = pd.DataFrame(index=[ticker])
    out['last_peak'] = [last_peak]
    out['last_peakday'] = [last_peak_date]
    return out


    def strategy_signal6(symbols):
        frames = []
        for tick in symbols:
            print(tick)
            try:
                data = get_data3(ticker=tick)
            except Exception as e:
                data = pd.DataFrame(index=[tick])
                data['last_peak'] = [f'error: {type(e).__name__}']
                data['last_peakday'] = [pd.NaT]
            frames.append(data)
            return pd.concat(frames, axis=0) if frames else pd.DataFrame(columns=['last_peak','last_peakday'])
#用法（保持你原来的两行）
    last_peak_data = strategy_signal6(ticker_sum)
    last_peak_data.to_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\last_peak_data.csv')


    ema_s = 13
    ema_l = 21
    stock["EMA_S"] = stock.Close.ewm(span = ema_s, min_periods = ema_s).mean()
    stock["EMA_L"] = stock.Close.ewm(span = ema_l, min_periods = ema_l).mean()
    
    # display(stock.head(30))
    def get_ATR(stock, n):
        hl = stock.High - stock.Low
        hc = abs(stock.High - stock.Close.shift(1))
        lc = abs(stock.Low - stock.Close.shift(1))
        
        stock['TR'] = np.maximum(np.maximum(hl, hc), lc)
        
        stock['ATR'] = stock.TR.rolling(n).mean()
        
        return stock
    stock = get_ATR(stock, 20)

    stock = stock.dropna()
    mul =2
    stock['ATR_stop'] = (stock.Close - mul * stock.ATR).shift(1) # ATR_stop for current day
    
    rec_trailingstoploss = stock.loc[stock.index[-1],'ATR_stop']
    rec_stoploss = last_day.loc[last_day.index[-1],'stop_loss']   
    atr = stock.loc[stock.index[-1],'ATR']
    strategy_data2=pd.DataFrame()
    tick = ticker
    strategy_data2 ['recent_stoploss'] =[rec_stoploss] 
    strategy_data2['recent_trailingstoploss']  = [rec_trailingstoploss]
    strategy_data2['atr'] = [atr] 
    strategy_data2.index = [tick]
    
    return strategy_data2



# from datetime import datetime as dt
# import datetime
# today = dt.today()
# yesterday = today-datetime.timedelta(1)
# start = (2021,1,1)
# end = (yesterday.year, yesterday.month, yesterday.day)
####################################优化


def strategy_signal5(symbols):
    strategy_data3= pd.DataFrame()
    for ticker in symbols:
        data = get_data2( ticker)
        strategy_data3= pd.concat([strategy_data3,data], axis=0)
    return strategy_data3

stop_loss_data = strategy_signal5(ticker_sum) 
#stop_loss = strategy_signal5(symbols)

# n= 4# number of groups
# groups = np.array_split(symbols, n)
# import time 
# stop_loss_data = pd.DataFrame()
# for symbol in groups:
#     f= strategy_signal5(symbol) 
#     stop_loss_data = pd.concat([f, stop_loss_data], axis =0)
#     time.sleep(60)
    
stop_loss_data .to_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\stop_loss_data.csv')    

   #####################last peak price###################
def get_data3(ticker):
    print(ticker)
  
    from datetime import datetime as dt
    import datetime
    import dateutil
    today = datetime.datetime.today()
    delta = dateutil.relativedelta.relativedelta(months=6)
    end = ( today-datetime.timedelta(1)).strftime('%Y-%m-%d') 
    start = (today - delta).strftime('%Y-%m-%d')
   # ticker = 'ACM'
    import requests
    api_key = '86dd63f6b8ae774b061232685b78eb52'
    from datetime import datetime
    bs = requests.get(f'https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}?from={start}&to={end}&apikey={api_key}').json()['historical']
    stock = pd.DataFrame(bs)
    stock =stock[['date','open','high','low','close','volume']]
    stock = stock.rename(columns = {'date':'Date', 'open':'Open', 'high':'High','low':'Low', 'close':'Close', 'volume':'volumn'})
    stock = stock.set_index('Date')
    stock= stock.sort_index()
  
    
    # from datetime import datetime
    # finnhub_client = finnhub.Client(api_key = "bt3efpf48v6tfcs816eg")
    # start = np.array(start).tolist()
    # end =np.array(end).tolist()
    # start = start
    # end = end
  
    # start_time = int(datetime(start[0], start[1], start[2], 0, 0).replace(tzinfo = timezone.utc).timestamp())
    # end_time = int(datetime(end[0], end[1], end[2], 0, 0).replace(tzinfo = timezone.utc).timestamp())
    # res_d = finnhub_client.stock_candles(ticker, 'D', start_time, end_time)

    # stock = pd.DataFrame(res_d)
    # stock = stock.rename(columns = {'t':'Date', 'o':'Open', 'h':'High', 'l':'Low', 'c':'Close', 's':'status', 'v':'volumn'})
    # stock['Date'] = pd.to_datetime(stock['Date'], unit = 's')
    # stock = stock.set_index('Date')
    # stock= stock[['Close', 'High', 'Low','Open','volumn']]   
    
 
    df =stock.copy()
    # df['Date'] = pd.to_datetime(df.index, format='%d-%b-%y %H.%M.%S.%f %p', errors='coerce')
    # df =df.set_index('Date')
    
    
    df.drop(df[df["volumn"]<1000].index, inplace=True)
    df.index = pd.to_datetime(df.index)
    dfday=df.groupby(pd.Grouper(freq="D"))["High"].max()
    now = datetime.now() 
    glDate=0
    lastGLV=0
    currentDate=""
    curentGLV=0
    for index, value in dfday.items():
      if value > curentGLV:
        curentGLV=value
        currentDate=index
        counter=0
      if value < curentGLV:
        counter=counter+1
        if counter==5 and ((index.day != now.day) or (index.month != now.month)):
            if curentGLV != lastGLV:
               print(curentGLV)
            glDate=currentDate
            lastGLV=curentGLV
            counter=0
    if lastGLV==0:
       last_peak = ticker+" has not formed a green line yet"
       last_peak_date = 0
    else:
        last_peak =  lastGLV
        last_peak_date = glDate
        
    strategy_data6 =pd.DataFrame()
    strategy_data6['last_peak'] = [last_peak]
    strategy_data6['last_peakday'] = [last_peak_date]
    tick = ticker
    strategy_data6.index= [tick]
    return strategy_data6

# from datetime import datetime as dt
# import datetime
# today = dt.today()
# yesterday = today-datetime.timedelta(1)
# start = (2021,1,15)
# end = (yesterday.year, yesterday.month, yesterday.day)

def strategy_signal6(symbols):
    strategy_data7= pd.DataFrame()
    for tick in symbols:
        print(tick)
        data = get_data3( ticker =tick)
        strategy_data7 = pd.concat([strategy_data7,data], axis=0)
    return strategy_data7




last_peak_data = strategy_signal6(ticker_sum) 

last_peak_data.to_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\last_peak_data.csv')    
    
    
# last_peak_data.to_csv(r'C:\Users\jizha\Desktop\seabridge_datapool\last_peak_data.csv')
##########################################
#strategy= strategy.sort_index()

support_resistance_data = support_resistance_data.sort_index()
stop_loss_data = stop_loss_data.sort_index()
last_peak_data = last_peak_data.sort_index()


strategy_sum = support_resistance_data .merge(stop_loss_data , left_index=True, right_index=True)

strategy_sum1 =strategy_sum.merge(last_peak_data,left_index=True, right_index=True)
strategy_sum1 =strategy_sum1.copy()
strategy_sum1 = strategy_sum1 .sort_index()

strategy_sum1[['center_gravity', 'vol_up1', 'vol_up2', 'vol_up3', 'vol_down1',
       'vol_down2', 'vol_down3', 'recent_support', 'recent_resistance',
       'recent_stoploss', 'recent_trailingstoploss', 'atr','last_peak']]=strategy_sum1[['center_gravity', 'vol_up1', 'vol_up2', 'vol_up3', 'vol_down1',
       'vol_down2', 'vol_down3', 'recent_support', 'recent_resistance',
       'recent_stoploss', 'recent_trailingstoploss', 'atr','last_peak']].astype(float)
strategy_sum1[['center_gravity', 'vol_up1', 'vol_up2', 'vol_up3', 'vol_down1',
       'vol_down2', 'vol_down3', 'recent_support', 'recent_resistance',
       'recent_stoploss', 'recent_trailingstoploss', 'atr','last_peak']]= strategy_sum1[['center_gravity', 'vol_up1', 'vol_up2', 'vol_up3', 'vol_down1',
       'vol_down2', 'vol_down3', 'recent_support', 'recent_resistance',
       'recent_stoploss', 'recent_trailingstoploss', 'atr','last_peak']].applymap("{0:,.2f}".format) 

# strategy_sum2 =strategy_sum1.merge(strategy_data4,left_index=True, right_index=True)

# strategy_sum2['market_trend'] =1

stock_info_data2= stock_info_data1.merge(strategy_sum1,right_index = True, left_index = True)

#stock_info_data2.to_csv(r'C:\Users\jizha\Desktop\Strategy_auto_2_21_2022\results\support_resistop_last_peak_data.csv')   
stock_info_data2.to_csv(r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly\support_resistop_last_peak_data.csv')   
#r'C:\Users\jizha\Desktop\seabridge_datapool1\final_strategy_data_temporaly