将ggplot2安装到jupyter笔记本中

问题描述 投票:0回答:1

我是一名大学生,他并不真正理解我在做什么,但是当我在我的 jupyter 笔记本中输入 install.packages("ggplot2") 时,我收到错误 警告信息: “无法访问存储库的索引https://cran.r-project.org/bin/windows/contrib/3.6: 无法打开 URL 'https://cran.r-project.org/bin/windows/contrib/3.6/PACKAGES'"仅以源代码形式提供的包,并且可能需要 C/C++/Fortran 编译:'cli' 'farver' 'isoband' 'rlang' '鳞片' 'tibble' 'vctrs' 这些不会被安装 安装源包“pillar”、“lifecycle”、“ggplot2”

install.packages(“ggplot2”)中的警告消息: “安装包‘lifecycle’的退出状态为非零”install.packages(“ggplot2”)中的警告消息: “安装包‘pillar’的退出状态为非零”install.packages(“ggplot2”)中的警告消息: “包‘ggplot2’的安装具有非零退出状态”

当我试图寻找答案时,我真的无法弄清楚。

我使用的笔记本版本是6.5.4 服务器运行在这个版本的 Python 上: Python 3.11.5 |由 Anaconda, Inc. 包装 | (主要,2023 年 9 月 11 日,13:26:23)[MSC v.1916 64 位 (AMD64)]

如果有人可以帮助(非常!)易于遵循的说明,那就太好了。 我尝试安装 tidyverse, install.packages("ggplot2", source="type") 并得到相同的错误。

提前致谢!

如果有人可以帮助(非常!)易于遵循的说明,那就太好了。 我尝试安装 tidyverse, install.packages("ggplot2", source="type") 并得到相同的错误。

r ggplot2 jupyter-notebook
1个回答
0
投票
ANASTYLE
`def cure_cols(df, filename):
    for col in df.columns:
        if df[col].nunique() <= 5 and df[col].dtype.kind == 'O':
            df.drop(col, axis = 1, inplace = True)
        elif df[col].nunique() == 0:
            df.drop(col, axis = 1, inplace = True)
    if 'vector' in filename:
        df = pd.DataFrame(df.iloc[:, 1])
    return df`

`def cure_rows(df_column):
    df_column = df_column.values
    for row in range(len(df_column)):
        try:
            df_column[row] = df_column[row].astype(np.int64)
        except:
            try:
                df_column[row] = df_column[row].astype(np.float64)
            except:
                df_column[row] = np.nan
    return df_column`



`def sound_handle(data, sr):
    data_1s = np.ndarray((len(data) // sr, 4))
    for t in range(0, len(data) // sr):
        data_1s[t] = [np.mean(data[sr*t: sr*(t+1)]), np.min(data[sr*t: sr*(t+1)]),
                      np.max(data[sr*t: sr*(t+1)]), np.std(data[sr*t: sr*(t+1)])]
    return pd.DataFrame(data_1s)
 `(https://ru.wikipedia.org/wiki/%D0%94%D0%B5%D0%B9%D1%81%D1%82%D0%B2%D1%83%D1%8E%D1%89%D0%B5%D0%B5_%D0%B7%D0%BD%D0%B0%D1%87%D0%B5%D0%BD%D0%B8%D0%B5_%D0%BF%D0%B5%D1%80%D0%B5%D0%BC%D0%B5%D0%BD%D0%BD%D0%BE%D0%B3%D0%BE_%D1%82%D0%BE%D0%BA%D0%B0)
`
def el_handle(df, hz):
    df_1 = np.ndarray((len(df) // hz))
    for t in range(0, len(df) // hz):
        df_1[t] = np.sqrt(1/hz * df.iloc[t*hz: (t+1)*hz].apply(lambda x: x**2).sum())
        #df_1[t] = df.iloc[t*hz: (t+1)*hz].abs().mean()
    return pd.DataFrame(df_1)`


`for experiment in range(1, 10):
    temp_df = pd.DataFrame()
    for filename in table_exp['
File name'][table_exp["№"] == experiment]:
        if table_exp['В в э?'][table_exp['
File name'] == filename].values[0] == 'no': 
            continue
        print('в о: ', filename)
        if filename.split('.')[-1] == 'csv':
            if table_exp['Decoding'][table_exp['
File name'] == filename].values in \
            ['Т', "Н"]:
                df = pd.read_csv(filedir + '/' + filename, header = None, skiprows = 5, dtype = np.float32)
            else:
                df = pd.read_csv(filedir + '/' + filename, header = None, skiprows = 1)
        elif filename.split('.')[-1] == 'xlsx':
            print('Excel!')                
        elif filename.split('.')[-1] == 'wav':
            sr = table_exp['sampling rate'][table_exp['
File name'] == filename].values[0]
            data, sr = librosa.load(filedir + '/' + filename, sr = sr)
            with open('   ' + str(experiment) + '_raw_audio.pkl', 'wb') as file:
                pickle.dump([data, sr], file)
            df = sound_handle(data, sr)
        df = cure_cols(df, filename)
        df = memory_cut(df, filename)
        if table_exp['Decoding'][table_exp['
File name'] == filename].values in \
            ['Т', "Н"]:
            df = el_handle(df, \
                table_exp['sampling rate'][table_exp['
File name'] == filename].values[0])
        if filename.split('.')[-1] != 'wav':
            if filename[:2] == 't_':
                df.columns = ['coolant temperature']
            else:
                df.columns = table_exp['Decoding'][table_exp['
File name'] == filename].values
        else:
            df.columns = ['mean', 'max', 'min', 'std']
        if filename[:2] == 't_':
            df.iloc[:, 0].interpolate(inplace = True)
            length = df.iloc[:, 0].shape[0]
        temp_df = pd.concat([temp_df, df], axis  = 1)
        temp_df['experiment'] = experiment
    temp_df = temp_df.iloc[:length]   
    temp_df.to_pickle('' + str(experiment) + '_df.pkl')
    mem = temp_df.memory_usage(deep = True).sum() / (1024**3)
    del temp_df
    print(f'experiment #{experiment} processed', np.round(mem, 1))`

`def combine_experiments():
    temp_df = pd.DataFrame()
    for experiment in range(1, 10):
        df = pd.read_pickle('' + str(experiment) + '_df.pkl')
        temp_df = pd.concat([temp_df, df], axis = 0)
    temp_df.sort_values(['experiment'])
    return temp_df.reset_index()
df = combine_experiments()
df`

`def nan_handle(df):
    df.fillna(method = 'ffill', inplace = True)
    df.fillna(method = 'bfill', inplace = True)
    return df`

`for exp in range(1, 10):
    df = pd.read_pickle(r'D:\КЗ\КЗ\dataset' + str(experiment) + '_df.pkl')
    df = nan_handle(df)
    df.to_pickle(r'D:\КЗ\КЗ\dataset' + str(experiment) + '_df.pkl')`

`def combine_experiments():
    dfs = [] 
    for experiment in range(1, 10):
        df = pd.read_pickle(r'D:\КЗ\КЗ\dataset' + str(experiment) + '_df.pkl')
        dfs.append(df)
    temp_df = pd.concat(dfs, axis=0)
    temp_df = temp_df.sort_values(['experiment'])
    return temp_df.reset_index(drop=True) 

df = combine_experiments()

`
def combine_experiments():
    temp_df = pd.DataFrame()
    for experiment in range(1, 10):
        df = pd.read_pickle('' + str(experiment) + '_df.pkl')
        temp_df = pd.concat([temp_df, df], axis = 0)
    temp_df.sort_values(['experiment'])
    return temp_df.reset_index()
df = combine_experiments()
df`

`def nan_handle(df):
    df.fillna(method = 'ffill', inplace = True)
    df.fillna(method = 'bfill', inplace = True)
    return df`

`for exp in range(1, 10):
    df = pd.read_pickle(r'' + str(experiment) + '_df.pkl')
    df = nan_handle(df)
    df.to_pickle(r' + str(experiment) + '_df.pkl')`
© www.soinside.com 2019 - 2024. All rights reserved.