• 如果您觉得本站非常有看点,那么赶紧使用Ctrl+D 收藏吧

Java FeedForwardLayer类的典型用法和代码示例

java 1次浏览

本文整理汇总了Java中org.deeplearning4j.nn.conf.layers.FeedForwardLayer的典型用法代码示例。如果您正苦于以下问题:Java FeedForwardLayer类的具体用法?Java FeedForwardLayer怎么用?Java FeedForwardLayer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。

FeedForwardLayer类属于org.deeplearning4j.nn.conf.layers包,在下文中一共展示了FeedForwardLayer类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。

示例1: makeLayer

点赞 3

import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; //导入依赖的package包/类
public static FeedForwardLayer makeLayer(Config layerConfig){

        Type layerType = Type.valueOf(layerConfig.getString("type"));
        switch (layerType) {
            case GravesLSTM:
                return new GravesLSTM.Builder()
                        .activation(layerConfig.getString("activation"))
                        .nIn(layerConfig.getInt("nIn"))
                        .nOut(layerConfig.getInt("nOut")).build();

            case RnnOutputLayer:
                return new RnnOutputLayer.Builder()
                        .activation(layerConfig.getString("activation"))
                        .lossFunction(LossFunctions.LossFunction.valueOf(layerConfig.getString("lossFunction")))
                        .nIn(layerConfig.getInt("nIn"))
                        .nOut(layerConfig.getInt("nOut")).build();

            default:
                throw new RuntimeException("UNAVAILABLE LAYER TYPE CONFIG.");
        }



    }
 

开发者ID:claytantor,
项目名称:blueweave,
代码行数:25,
代码来源:NetworkTypeFactory.java

示例2: getGradientsFromFlattened

点赞 3

import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; //导入依赖的package包/类
/**
 * Return a map of gradients (in their standard non-flattened representation), taken from the flattened (row vector) gradientView array.
 * The idea is that operates in exactly the same way as the the paramsView does in
 * thus the position in the view (and, the array orders) must match those of the parameters
 *
 * @param conf         Configuration
 * @param gradientView The flattened gradients array, as a view of the larger array
 * @return A map containing an array by parameter type, that is a view of the full network gradients array
 */
@Override
public Map<String, INDArray> getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) {
    org.deeplearning4j.nn.conf.layers.FeedForwardLayer layerConf =
            (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf.getLayer();
    int nIn = layerConf.getNIn();
    int nOut = layerConf.getNOut();
    int nWeightParams = nIn ;

    INDArray weightGradientView = gradientView.get(NDArrayIndex.point(0), NDArrayIndex.interval(0, nWeightParams));
    INDArray biasView = gradientView.get(NDArrayIndex.point(0),
            NDArrayIndex.interval(nWeightParams, nWeightParams + nOut)); //Already a row vector

    Map<String, INDArray> out = new LinkedHashMap<>();
    out.put(WEIGHT_KEY, weightGradientView);
    out.put(BIAS_KEY, biasView);

    return out;
}
 

开发者ID:deeplearning4j,
项目名称:deeplearning4j,
代码行数:28,
代码来源:ElementWiseParamInitializer.java

示例3: addLayers

点赞 2

import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; //导入依赖的package包/类
@SuppressWarnings("rawtypes")
protected ListBuilder addLayers(FeedForwardLayer.Builder hiddelLayerBuilder,
	FeedForwardLayer.Builder outputLayerBuilder)
{
    final int[] hiddenLayerNodes = parameters.getHiddeLayerNodes();
    final int nLayers = hiddenLayerNodes.length + 1;
    for (int i = 0; i < nLayers; i++)
    {
	int nIn;
	if (i == 0)
	{
	    nIn = parameters.getInputSize();
	}
	else
	{
	    nIn = hiddenLayerNodes[i - 1];
	}
	if (i < nLayers - 1)
	{
	    layer(i, hiddelLayerBuilder.nIn(nIn).nOut(hiddenLayerNodes[i]).build());
	}
	else
	{
	    layer(i, outputLayerBuilder.nIn(nIn).nOut(parameters.getOutputSize()).build());
	}
    }
    return this;
}
 

开发者ID:amrabed,
项目名称:DL4J,
代码行数:29,
代码来源:Model.java

示例4: init

点赞 2

import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; //导入依赖的package包/类
/**
 * Initialize the parameters
 *
 * @param conf             the configuration
 * @param paramsView       a view of the full network (backprop) parameters
 * @param initializeParams if true: initialize the parameters according to the configuration. If false: don't modify the
 *                         values in the paramsView array (but do select out the appropriate subset, reshape etc as required)
 * @return Map of parameters keyed by type (view of the 'paramsView' array)
 */
@Override
public Map<String, INDArray> init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) {
    if (!(conf.getLayer() instanceof org.deeplearning4j.nn.conf.layers.FeedForwardLayer))
        throw new IllegalArgumentException("unsupported layer type: " + conf.getLayer().getClass().getName());

    Map<String, INDArray> params = Collections.synchronizedMap(new LinkedHashMap<String, INDArray>());

    int length = numParams(conf);
    if (paramsView.length() != length)
        throw new IllegalStateException(
                "Expected params view of length " + length + ", got length " + paramsView.length());

    org.deeplearning4j.nn.conf.layers.FeedForwardLayer layerConf =
            (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf.getLayer();
    int nIn = layerConf.getNIn();

    int nWeightParams = nIn ;
    INDArray weightView = paramsView.get(NDArrayIndex.point(0), NDArrayIndex.interval(0, nWeightParams));
    INDArray biasView = paramsView.get(NDArrayIndex.point(0),
            NDArrayIndex.interval(nWeightParams, nWeightParams + nIn));


    params.put(WEIGHT_KEY, createWeightMatrix(conf, weightView, initializeParams));
    params.put(BIAS_KEY, createBias(conf, biasView, initializeParams));
    conf.addVariable(WEIGHT_KEY);
    conf.addVariable(BIAS_KEY);

    return params;
}
 

开发者ID:deeplearning4j,
项目名称:deeplearning4j,
代码行数:39,
代码来源:ElementWiseParamInitializer.java

示例5: layerSize

点赞 2

import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; //导入依赖的package包/类
/**
 * Return the layer size (number of units) for the specified layer.
 * Note that the meaning of the "layer size" can depend on the type of layer. For example:<br>
 * - DenseLayer, OutputLayer, recurrent layers: number of units (nOut configuration option)<br>
 * - ConvolutionLayer: the depth (number of channels)<br>
 * - Subsampling layers, global pooling layers, etc: size of 0 is always returned<br>
 *
 * @param layer Index of the layer to get the size of. Must be in range 0 to nLayers-1 inclusive
 * @return Size of the layer
 */
public int layerSize(int layer) {
    if (layer < 0 || layer > layers.length) {
        throw new IllegalArgumentException("Invalid layer index: " + layer + ". Layer index must be between 0 and "
                + (layers.length - 1) + " inclusive");
    }
    org.deeplearning4j.nn.conf.layers.Layer conf = layers[layer].conf().getLayer();
    if (conf == null || !(conf instanceof FeedForwardLayer)) {
        return 0;
    }
    FeedForwardLayer ffl = (FeedForwardLayer) conf;
    return ffl.getNOut();
}
 

开发者ID:deeplearning4j,
项目名称:deeplearning4j,
代码行数:23,
代码来源:MultiLayerNetwork.java

示例6: layerSize

点赞 2

import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; //导入依赖的package包/类
/**
 * Return the layer size (number of units) for the specified layer.
 * Note that the meaning of the "layer size" can depend on the type of layer. For example:<br>
 * - DenseLayer, OutputLayer, recurrent layers: number of units (nOut configuration option)<br>
 * - ConvolutionLayer: the depth (number of channels)<br>
 * - Subsampling layers, global pooling layers, etc: size of 0 is always returned<br>
 *
 * @param layerName Name of the layer to get the size of
 * @return Size of the layer
 */
public int layerSize(String layerName) {
    Layer l = getLayer(layerName);
    if(l == null){
        throw new IllegalArgumentException("No layer with name \"" + layerName + "\" exists");
    }
    org.deeplearning4j.nn.conf.layers.Layer conf = l.conf().getLayer();
    if (conf == null || !(conf instanceof FeedForwardLayer)) {
        return 0;
    }
    FeedForwardLayer ffl = (FeedForwardLayer) conf;
    return ffl.getNOut();
}
 

开发者ID:deeplearning4j,
项目名称:deeplearning4j,
代码行数:23,
代码来源:ComputationGraph.java

示例7: testCnnToDense

点赞 2

import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; //导入依赖的package包/类
@Test
public void testCnnToDense() {
    MultiLayerConfiguration conf =
            new NeuralNetConfiguration.Builder()
                    .list().layer(0,
                    new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
                            4, 4) // 28*28*1 => 15*15*10
                            .nIn(1).nOut(10).padding(2, 2)
                            .stride(2, 2)
                            .weightInit(WeightInit.RELU)
                            .activation(Activation.RELU)
                            .build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder()
                            .activation(Activation.RELU).nOut(200).build())
                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(200)
                            .nOut(5).weightInit(WeightInit.RELU)
                            .activation(Activation.SOFTMAX).build())
                    .setInputType(InputType.convolutionalFlat(28, 28, 1)).backprop(true)
                    .pretrain(false).build();

    assertNotNull(conf.getInputPreProcess(0));
    assertNotNull(conf.getInputPreProcess(1));

    assertTrue(conf.getInputPreProcess(0) instanceof FeedForwardToCnnPreProcessor);
    assertTrue(conf.getInputPreProcess(1) instanceof CnnToFeedForwardPreProcessor);

    FeedForwardToCnnPreProcessor ffcnn = (FeedForwardToCnnPreProcessor) conf.getInputPreProcess(0);
    CnnToFeedForwardPreProcessor cnnff = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(1);

    assertEquals(28, ffcnn.getInputHeight());
    assertEquals(28, ffcnn.getInputWidth());
    assertEquals(1, ffcnn.getNumChannels());

    assertEquals(15, cnnff.getInputHeight());
    assertEquals(15, cnnff.getInputWidth());
    assertEquals(10, cnnff.getNumChannels());

    assertEquals(15 * 15 * 10, ((FeedForwardLayer) conf.getConf(1).getLayer()).getNIn());
}
 

开发者ID:deeplearning4j,
项目名称:deeplearning4j,
代码行数:40,
代码来源:TestPreProcessors.java

示例8: fit

点赞 2

import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; //导入依赖的package包/类
/**
 * Fit the layer based on the specified org.deeplearning4j.spark context text file
 * @param path the path to the text file
 * @param labelIndex the index of the label
 * @param recordReader the record reader
 * @return the fit layer
 */
public Layer fit(String path, int labelIndex, RecordReader recordReader) {
    FeedForwardLayer ffLayer = (FeedForwardLayer) conf.getLayer();

    JavaRDD<String> lines = sc.textFile(path);
    // gotta map this to a Matrix/INDArray
    JavaRDD<DataSet> points = lines.map(new RecordReaderFunction(recordReader, labelIndex, ffLayer.getNOut()));
    return fitDataSet(points);

}
 

开发者ID:deeplearning4j,
项目名称:deeplearning4j,
代码行数:17,
代码来源:SparkDl4jLayer.java

示例9: summary

点赞 2

import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; //导入依赖的package包/类
/**
 * method copied/adapted from
 * https://github.com/deeplearning4j/deeplearning4j/blob/master/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/graph/ComputationGraph.java
 */
public String summary() {
	String ret = "\n";
	ret += StringUtils.repeat("=", 70);
	ret += "\n";
	ret += String.format("%-40s%-15s%-15s\n", "VertexName (VertexType)", "nIn -> nOut", "Layer Params");
	ret += StringUtils.repeat("=", 70);
	ret += "\n";
	int totalParams = 0;

	for (int currentLayerIdx = 0; currentLayerIdx < net.getnLayers(); currentLayerIdx++) {
		Layer current = net.getLayer(currentLayerIdx).conf().getLayer();
		int layerParams = net.getLayer(currentLayerIdx).numParams();

		String name = String.valueOf(currentLayerIdx);
		String[] classNameArr = current.getClass().toString().split("\\.");
		String className = classNameArr[classNameArr.length - 1];

		String paramCount = "-";
		String in = "-";
		String out = "-";
		String paramShape = "-";
		if (current instanceof FeedForwardLayer) {
			FeedForwardLayer currentLayer = (FeedForwardLayer)current;
			classNameArr = currentLayer.getClass().getName().split("\\.");
			className = classNameArr[classNameArr.length - 1];
			paramCount = String.valueOf(layerParams);
			totalParams += layerParams;

			if (layerParams > 0) {
				paramShape = "";
				in = String.valueOf(currentLayer.getNIn());
				out = String.valueOf(currentLayer.getNOut());
				if(paramShape.lastIndexOf(",") >= 0) {
					paramShape = paramShape.subSequence(0, paramShape.lastIndexOf(",")).toString();
				}
			}
		}

		ret += String.format("%-40s%-15s%-15s", name + " (" + className + ")", in + " -> " + out, paramCount);
		ret += "\n";
	}

	ret += StringUtils.repeat("-", 70);
	ret += String.format("\n%30s %d", "Total Parameters: ", totalParams);
	ret += "\n";
	ret += StringUtils.repeat("=", 70);
	ret += "\n";

	return ret;
}
 

开发者ID:matthiaszimmermann,
项目名称:ml_demo,
代码行数:55,
代码来源:WDBCAutoencoder.java

示例10: numParams

点赞 2

import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; //导入依赖的package包/类
@Override
public int numParams(Layer layer) {
    FeedForwardLayer layerConf = (FeedForwardLayer) layer;
    int nIn = layerConf.getNIn();
    return nIn*2; //weights + bias
}
 

开发者ID:deeplearning4j,
项目名称:deeplearning4j,
代码行数:7,
代码来源:ElementWiseParamInitializer.java

示例11: fitLabeledPoint

点赞 1

import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; //导入依赖的package包/类
/**
 * Fit a MultiLayerNetwork using Spark MLLib LabeledPoint instances.
 * This will convert the labeled points to the internal DL4J data format and train the model on that
 *
 * @param rdd the rdd to fitDataSet
 * @return the multi layer network that was fitDataSet
 */
public MultiLayerNetwork fitLabeledPoint(JavaRDD<LabeledPoint> rdd) {
    int nLayers = network.getLayerWiseConfigurations().getConfs().size();
    FeedForwardLayer ffl = (FeedForwardLayer) network.getLayerWiseConfigurations().getConf(nLayers - 1).getLayer();
    JavaRDD<DataSet> ds = MLLibUtil.fromLabeledPoint(sc, rdd, ffl.getNOut());
    return fit(ds);
}
 

开发者ID:deeplearning4j,
项目名称:deeplearning4j,
代码行数:14,
代码来源:SparkDl4jMultiLayer.java

示例12: evaluateRegression

点赞 1

import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; //导入依赖的package包/类
/**
 * Evaluate the network (regression performance) in a distributed manner on the provided data
 *
 * @param data Data to evaluate
 * @param minibatchSize Minibatch size to use when doing performing evaluation
 * @return     {@link RegressionEvaluation} instance with regression performance
 */
public RegressionEvaluation evaluateRegression(JavaRDD<DataSet> data, int minibatchSize) {
    int nOut = ((FeedForwardLayer) network.getOutputLayer().conf().getLayer()).getNOut();
    return doEvaluation(data, new RegressionEvaluation(nOut), minibatchSize);
}
 

开发者ID:deeplearning4j,
项目名称:deeplearning4j,
代码行数:12,
代码来源:SparkDl4jMultiLayer.java

示例13: evaluateRegression

点赞 1

import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; //导入依赖的package包/类
/**
 * Evaluate the network (regression performance) in a distributed manner on the provided data
 *
 * @param data Data to evaluate
 * @param minibatchSize Minibatch size to use when doing performing evaluation
 * @return     {@link RegressionEvaluation} instance with regression performance
 */
public RegressionEvaluation evaluateRegression(JavaRDD<DataSet> data, int minibatchSize) {
    int nOut = ((FeedForwardLayer) network.getOutputLayer(0).conf().getLayer()).getNOut();
    return doEvaluation(data, new RegressionEvaluation(nOut), minibatchSize);
}
 

开发者ID:deeplearning4j,
项目名称:deeplearning4j,
代码行数:12,
代码来源:SparkComputationGraph.java


版权声明:本文转自网络文章,转载此文章仅为分享知识,如有侵权,请联系管理员进行删除。
喜欢 (0)