我可以将tensorflow汇总导出为CSV?

问题描述 投票:9回答:3

有没有一种方法来提取(优选tensorboard内)从tfevents文件标汇总到CSV?

示例代码

下面的代码在同一个目录内的summary_dir产生tfevent文件。假设你让它运行,你会发现一些有趣的事情。你想获得的原始数据进行进一步的调查。你会怎么做?

#!/usr/bin/env python
"""A very simple MNIST classifier."""
import argparse
import sys

from tensorflow.examples.tutorials.mnist import input_data

import tensorflow as tf
ce_with_logits = tf.nn.softmax_cross_entropy_with_logits

FLAGS = None


def inference(x):
    """
    Build the inference graph.

    Parameters
    ----------
    x : placeholder

    Returns
    -------
    Output tensor with the computed logits.
    """
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    y = tf.matmul(x, W) + b
    return y


def loss(logits, labels):
    """
    Calculate the loss from the logits and the labels.

    Parameters
    ----------
    logits : Logits tensor, float - [batch_size, NUM_CLASSES].
    labels : Labels tensor, int32 - [batch_size]
    """
    cross_entropy = tf.reduce_mean(ce_with_logits(labels=labels,
                                                  logits=logits))
    return cross_entropy


def training(loss, learning_rate=0.5):
    """
    Set up the training Ops.

    Parameters
    ----------
    loss : Loss tensor, from loss().
    learning_rate : The learning rate to use for gradient descent.

    Returns
    -------
    train_op: The Op for training.
    """
    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    train_step = optimizer.minimize(loss)
    return train_step


def main(_):
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)

    # Create the model
    x = tf.placeholder(tf.float32, [None, 784])
    y = inference(x)

    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, 10])
    loss_ = loss(logits=y, labels=y_)
    train_step = training(loss_)

    # Test trained model
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.name_scope('accuracy'):
        tf.summary.scalar('accuracy', accuracy)
    merged = tf.summary.merge_all()

    sess = tf.InteractiveSession()
    train_writer = tf.summary.FileWriter('summary_dir/train', sess.graph)
    test_writer = tf.summary.FileWriter('summary_dir/test', sess.graph)
    tf.global_variables_initializer().run()

    for train_step_i in range(100000):
        if train_step_i % 100 == 0:
            summary, acc = sess.run([merged, accuracy],
                                    feed_dict={x: mnist.test.images,
                                               y_: mnist.test.labels})
            test_writer.add_summary(summary, train_step_i)
            summary, acc = sess.run([merged, accuracy],
                                    feed_dict={x: mnist.train.images,
                                               y_: mnist.train.labels})
            train_writer.add_summary(summary, train_step_i)
        batch_xs, batch_ys = mnist.train.next_batch(100)
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

    print(sess.run(accuracy, feed_dict={x: mnist.test.images,
                                        y_: mnist.test.labels}))

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_dir',
                        type=str,
                        default='/tmp/tensorflow/mnist/input_data',
                        help='Directory for storing input data')
    FLAGS, unparsed = parser.parse_known_args()
    tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
csv tensorflow
3个回答
15
投票

只需选中“数据下载链接”选项,在左上角的TensorBoard,然后单击“CSV”按钮,将您的标总结下出现。

enter image description here


12
投票

虽然这里的答案是为tensorboard内要求只允许下载一个CSV单个标签的单次运行。如果您有例如10个标签和20个运行(究竟是不是在所有的多),你需要做上述步骤200倍(即单独恐怕需要你一个多小时)。如果现在你由于某种原因想了一个标签真正做到对所有的运行数据的东西,你需要写一些奇怪的CSV积累脚本或手工复制的一切(是什么可能会花费你超过一天)。

因此,我想补充的所有运行包含提取的CSV文件为每一个标签的解决方案。列标题的运行路径名称和行指数运行步骤编号。

import os
import numpy as np
import pandas as pd

from collections import defaultdict
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator


def tabulate_events(dpath):
    summary_iterators = [EventAccumulator(os.path.join(dpath, dname)).Reload() for dname in os.listdir(dpath)]

    tags = summary_iterators[0].Tags()['scalars']

    for it in summary_iterators:
        assert it.Tags()['scalars'] == tags

    out = defaultdict(list)
    steps = []

    for tag in tags:
        steps = [e.step for e in summary_iterators[0].Scalars(tag)]

        for events in zip(*[acc.Scalars(tag) for acc in summary_iterators]):
            assert len(set(e.step for e in events)) == 1

            out[tag].append([e.value for e in events])

    return out, steps


def to_csv(dpath):
    dirs = os.listdir(dpath)

    d, steps = tabulate_events(dpath)
    tags, values = zip(*d.items())
    np_values = np.array(values)

    for index, tag in enumerate(tags):
        df = pd.DataFrame(np_values[index], index=steps, columns=dirs)
        df.to_csv(get_file_path(dpath, tag))


def get_file_path(dpath, tag):
    file_name = tag.replace("/", "_") + '.csv'
    folder_path = os.path.join(dpath, 'csv')
    if not os.path.exists(folder_path):
        os.makedirs(folder_path)
    return os.path.join(folder_path, file_name)


if __name__ == '__main__':
    path = "path_to_your_summaries"
    to_csv(path)

我的解决方案是建立在:https://stackoverflow.com/a/48774926/2230045


编辑:

我创建了一个更复杂的版本,并将其发布在GitHub上:https://github.com/Spenhouet/tensorboard-aggregator

这个版本汇聚了多个tensorboard运行,并且能够将聚集保存到一个新的tensorboard摘要或.csv文件。


2
投票

这里是我的解决方案,它立足于以前的解决方案,但可以扩展。

import os
import numpy as np
import pandas as pd

from collections import defaultdict
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator


def tabulate_events(dpath):

    final_out = {}
    for dname in os.listdir(dpath):
        print(f"Converting run {dname}",end="")
        ea = EventAccumulator(os.path.join(dpath, dname)).Reload()
        tags = ea.Tags()['scalars']

        out = {}

        for tag in tags:
            tag_values=[]
            wall_time=[]
            steps=[]

            for event in ea.Scalars(tag):
                tag_values.append(event.value)
                wall_time.append(event.wall_time)
                steps.append(event.step)

            out[tag]=pd.DataFrame(data=dict(zip(steps,np.array([tag_values,wall_time]).transpose())), columns=steps,index=['value','wall_time'])

        if len(tags)>0:      
            df= pd.concat(out.values(),keys=out.keys())
            df.to_csv(f'{dname}.csv')
            print("- Done")
        else:
            print('- Not scalers to write')

        final_out[dname] = df


    return final_out
if __name__ == '__main__':
    path = "youre/path/here"
    steps = tabulate_events(path)
    pd.concat(steps.values(),keys=steps.keys()).to_csv('all_result.csv')

1
投票

只需添加到@Spen

如果你要导出的数据时,你有不同的步骤数。这将使一个大的CSV文件。可能需要周围的键更改为它为你工作。

import os
import numpy as np
import pandas as pd

from collections import defaultdict
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import glob
import pandas as pd
listOutput = (glob.glob("*/"))

listDF = []

for tb_output_folder in listOutput:
 print(tb_output_folder)
 x = EventAccumulator(path=tb_output_folder)
 x.Reload()
 x.FirstEventTimestamp()
 keys = ['loss', 'mean_absolute_error', 'val_loss', 'val_mean_absolute_error'] 

 listValues = {}

 steps = [e.step for e in x.Scalars(keys[0])]
 wall_time = [e.wall_time for e in x.Scalars(keys[0])]
 index = [e.index for e in x.Scalars(keys[0])]
 count = [e.count for e in x.Scalars(keys[0])]
 n_steps = len(steps)
 listRun = [tb_output_folder] * n_steps
 printOutDict = {}

 data = np.zeros((n_steps, len(keys)))
 for i in range(len(keys)):
     data[:,i] = [e.value for e in x.Scalars(keys[i])]

 printOutDict = {keys[0]: data[:,0], keys[1]: data[:,1],keys[2]: data[:,2],keys[3]: data[:,3]}

 printOutDict['Name'] = listRun

 DF = pd.DataFrame(data=printOutDict)

 listDF.append(DF)

df = pd.concat(listDF)
df.to_csv('Output.csv')   

© www.soinside.com 2019 - 2024. All rights reserved.