Skip to content

Commit

Permalink
Updated test resources
Browse files Browse the repository at this point in the history
  • Loading branch information
vruusmann committed Dec 26, 2023
1 parent 62b7159 commit 074ace9
Show file tree
Hide file tree
Showing 9 changed files with 22,316 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,21 @@ public void evaluateLightGBMAudit() throws Exception {
evaluate("LightGBM", AUDIT);
}

@Test
public void evaluateLightGBMAuditNA() throws Exception {
evaluate("LightGBM", AUDIT_NA);
}

@Test
public void evaluateLightGBMAuto() throws Exception {
evaluate("LightGBM", AUTO);
}

@Test
public void evaluateLightGBMAutoNA() throws Exception {
evaluate("LightGBM", AUTO_NA);
}

@Test
public void evaluateLightGBMIris() throws Exception {
evaluate("LightGBM", IRIS);
Expand Down
44 changes: 44 additions & 0 deletions pmml-sparkml-lightgbm/src/test/resources/LightGBMAuditNA.scala
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import java.io.File

import com.microsoft.azure.synapse.ml.lightgbm.LightGBMClassifier
import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.feature._
import org.apache.spark.ml.linalg.Vector
import org.apache.spark.sql.functions.{lit, udf}
import org.apache.spark.sql.types.StringType
import org.jpmml.sparkml.{DatasetUtil, PipelineModelUtil, PMMLBuilder}
import org.jpmml.sparkml.feature.InvalidCategoryTransformer

var df = DatasetUtil.loadCsv(spark, new File("csv/AuditNA.csv"))
df = DatasetUtil.castColumn(df, "Adjusted", StringType)

//DatasetUtil.storeSchema(df, new File("schema/AuditNA.json"))

val cat_cols = Array("Education", "Employment", "Gender", "Marital", "Occupation")
val cont_cols = Array("Age", "Hours", "Income")

val labelIndexer = new StringIndexer().setInputCol("Adjusted").setOutputCol("idx_Adjusted")

val indexer = new StringIndexer().setInputCols(cat_cols).setOutputCols(cat_cols.map(cat_col => "idx_" + cat_col)).setHandleInvalid("keep")
val indexTransformer = new InvalidCategoryTransformer().setInputCols(indexer.getOutputCols).setOutputCols(cat_cols.map(cat_col => "idxTransformed_" + cat_col))

val assembler = new VectorAssembler().setInputCols(indexTransformer.getOutputCols ++ cont_cols).setOutputCol("featureVector").setHandleInvalid("keep")

val classifier = new LightGBMClassifier().setObjective("binary").setNumIterations(101).setLabelCol(labelIndexer.getOutputCol).setFeaturesCol(assembler.getOutputCol)

val pipeline = new Pipeline().setStages(Array(labelIndexer, indexer, indexTransformer, assembler, classifier))
val pipelineModel = pipeline.fit(df)

//PipelineModelUtil.storeZip(pipelineModel, new File("pipeline/LightGBMAuditNA.zip"))

new PMMLBuilder(df.schema, pipelineModel).buildFile(new File("pmml/LightGBMAuditNA.pmml"))

val predLabel = udf{ (value: Float) => value.toInt.toString }
val vectorToColumn = udf{ (vec: Vector, index: Int) => vec(index) }

var lgbDf = pipelineModel.transform(df)
lgbDf = lgbDf.selectExpr("prediction", "probability")
lgbDf = lgbDf.withColumn("Adjusted", predLabel(lgbDf("prediction"))).drop("prediction")
lgbDf = lgbDf.withColumn("probability(0)", vectorToColumn(lgbDf("probability"), lit(0))).withColumn("probability(1)", vectorToColumn(lgbDf("probability"), lit(1))).drop("probability").drop("probability")

DatasetUtil.storeCsv(lgbDf, new File("csv/LightGBMAuditNA.csv"))
33 changes: 33 additions & 0 deletions pmml-sparkml-lightgbm/src/test/resources/LightGBMAutoNA.scala
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import java.io.File

import com.microsoft.azure.synapse.ml.lightgbm.LightGBMRegressor
import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.feature._
import org.jpmml.sparkml.{DatasetUtil, PipelineModelUtil, PMMLBuilder}
import org.jpmml.sparkml.feature.InvalidCategoryTransformer

var df = DatasetUtil.loadCsv(spark, new File("csv/AutoNA.csv"))

//DatasetUtil.storeSchema(df, new File("schema/AutoNA.json"))

val cat_cols = Array("cylinders", "model_year", "origin")
val cont_cols = Array("acceleration", "displacement", "horsepower", "weight")

val indexer = new StringIndexer().setInputCols(cat_cols).setOutputCols(cat_cols.map(cat_col => "idx_" + cat_col)).setHandleInvalid("keep")
val indexTransformer = new InvalidCategoryTransformer().setInputCols(indexer.getOutputCols).setOutputCols(cat_cols.map(cat_col => "idxTransformed_" + cat_col))

val assembler = new VectorAssembler().setInputCols(indexTransformer.getOutputCols ++ cont_cols).setOutputCol("featureVector").setHandleInvalid("keep")

val regressor = new LightGBMRegressor().setNumIterations(101).setLabelCol("mpg").setFeaturesCol(assembler.getOutputCol)

val pipeline = new Pipeline().setStages(Array(indexer, indexTransformer, assembler, regressor))
val pipelineModel = pipeline.fit(df)

//PipelineModelUtil.storeZip(pipelineModel, new File("pipeline/LightGBMAutoNA.zip"))

new PMMLBuilder(df.schema, pipelineModel).buildFile(new File("pmml/LightGBMAutoNA.pmml"))

var lgbDf = pipelineModel.transform(df)
lgbDf = lgbDf.selectExpr("prediction as mpg")

DatasetUtil.storeCsv(lgbDf, new File("csv/LightGBMAutoNA.csv"))
Loading

0 comments on commit 074ace9

Please sign in to comment.