AWS CloudSearch导出/下载数据

问题描述 投票:6回答:3

我在AWS CloudSearch索引中有大约150万个文档。这太耗费我了,我希望从服务中迁移出去。我一直无法看到如何从索引中下载或导出文档。可能吗?

amazon-web-services amazon-cloudsearch
3个回答
3
投票

对于类似的需求,我不得不浏览整个CloudSearch域(超过10000限制)来生成文件。

我使用了nodeJS脚本来处理它,如下所示:

var AWS = require('aws-sdk');
var fs = require('fs');

AWS.config.update({
    accessKeyId: '<yourAccessKey>', secretAccessKey: '<yourSecretAccessKey>',
    region: '<yourRegion>',endpoint: '<YourSearchDomainEndPoint>'
});

var batchSize = 5000; //Number of item on every search... Max:10000    
var compteur = 0;
var result = [];

var params = {query:""};
var cloudsearchdomain = new AWS.CloudSearchDomain(params);

function launchSearch(theContext) {
    process.stdout.write('Launch AWS.CloudSearch ');

    if (theContext==null) {
        process.stdout.write('initial request ... ');
    } else {        
        var current  = (theContext.start/batchSize) +2 ;
        var totalRun = (Math.ceil(theContext.found/batchSize  * 10) / 10) + 1;
        process.stdout.write('( ' + current + ' / ' + totalRun + ' )       ... ');
    }

    params = {
           query:"-aQueryStringImpossibleToFind",
           cursor: (theContext==null)?"initial":theContext.cursor,
           size:batchSize 
    };  

    var forCursor = new AWS.CloudSearchDomain(params);

    forCursor.search(params, function(err, data) {
        if (err) {
            console.log("Failed with params :" );
            console.log(err);
        } else {
            resultMessage = data;       
            compteur = compteur + data.hits.hit.length;
            for(var i=0;i<data.hits.hit.length;i++){
                result.push(data.hits.hit[i]
                });
            }   
        }   

        process.stdout.write(resultMessage.hits.hit.length + ' hits found.');

        if (resultMessage.hits.hit.length==0) {
            process.stdout.write(' Done.\n\nLet\'s create thte file...\n');
            writeTheFile(result);
        } else {
            process.stdout.write('\n');
            var myContext = {};
            myContext.cursor = resultMessage.hits.cursor;
            myContext.start = resultMessage.hits.start;
            myContext.found = resultMessage.hits.found;
            myContext.retrived = resultMessage.hits.hit.length;
            launchSearch(myContext);
        }
    });
}

function writeTheFile(myResult) {

    fs.writeFile(process.argv[2], JSON.stringify(myResult), function(err) {
        if(err) {
            return console.log(err);
        }
    });
    process.stdout.write("DONE : File '"+ process.argv[2] + "' generated  ( " + compteur + " elements ).\n");
}



 /*Check parameters*/
if (!process.argv[2]) {
     //console.log(process.argv);
    process.stdout.write('ERROR : the output filename is expected as argumment.\n');
    process.exit();
 } else {
    launchSearch();
}

必须从命令行调用此脚本:node script.js fileToCreate.json

注意:我不知道这是否在1.5百万文档searchdomain上正常工作。我认为的风险是JSON变量大小。因此,必须调整此脚本(可能每隔10万个文档写一个文件?)。

Nekloth


2
投票

亚马逊(仍然)没有提供从Cloudsearch域导出所有数据的方法,但是,编写一个实用程序来自己完成这项工作并不困难。


0
投票

刚刚解决了一些问题,完全归功于@ Nek的回应https://stackoverflow.com/a/32119407/1894553


prerequisites, node + aws-sdk plugin

$ npm install aws-sdk

export-all.js

请注意,为了使用return: "_all_fields" param获取完整转储,此字段必须在模式的索引选项中启用标志return

var AWS = require('aws-sdk');
var fs = require('fs');

AWS.config.update({
        accessKeyId: 'xx',
        secretAccessKey: 'xx',
        region: 'xx',
        endpoint: 'xxx'
});

var batchSize = 10000;
var compteur = 0;
var result = [];
var resultMessage = [];

var params = {query:""};
var cloudsearchdomain = new AWS.CloudSearchDomain(params);

function launchSearch(theContext) {
    process.stdout.write('Launch AWS.CloudSearch ');

    if (theContext==null) {
        process.stdout.write('initial request ... ');
    } else {
        var current  = (theContext.start/batchSize) +2 ;
        var totalRun = (Math.ceil(theContext.found/batchSize  * 10) / 10) + 1;
        process.stdout.write('( ' + current + ' / ' + totalRun + ' )       ... ');
    }

// https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/CloudSearchDomain.html#search-property
params = {
    query:"matchall",
    cursor: (theContext==null)?"initial":theContext.cursor,
    size:batchSize,
    queryParser: "structured",
    return: "_all_fields"
};

    var forCursor = new AWS.CloudSearchDomain(params);

    forCursor.search(params, function(err, data) {
        if (err) {
            console.log("Failed with params :" );
            console.log(err);
        } else {
            resultMessage = data;
            compteur = compteur + data.hits.hit.length;
            for(var i=0;i<data.hits.hit.length;i++){
                result.push(data.hits.hit[i]);
                };
            }


        process.stdout.write(resultMessage.hits.hit.length + ' hits found.');

        if (resultMessage.hits.hit.length==0) {
            process.stdout.write(' Done.\n\nLet\'s create thte file...\n');
            writeTheFile(result);
        } else {
            process.stdout.write('\n');
            var myContext = {};
            myContext.cursor = resultMessage.hits.cursor;
            myContext.start = resultMessage.hits.start;
            myContext.found = resultMessage.hits.found;
            myContext.retrived = resultMessage.hits.hit.length;
            launchSearch(myContext);
        }
    });
}

function writeTheFile(myResult) {

    fs.writeFile(process.argv[2], JSON.stringify(myResult), function(err) {
        if(err) {
            return console.log(err);
        }
    });
    process.stdout.write("DONE : File '"+ process.argv[2] + "' generated  ( " + compteur + " elements ).\n");
}



 /*Check parameters*/
if (!process.argv[2]) {
     //console.log(process.argv);
    process.stdout.write('ERROR : the output filename is expected as argumment.\n');
    process.exit();
 } else {
    launchSearch();
}  

execution

$ node export-all.js all-data.json
© www.soinside.com 2019 - 2024. All rights reserved.