login(`admin,`123456)
undef(all);
clearAllCache();
go;

/***
 * ==========================================
 * 此脚本主要内容为清理测试环境，包括以下几部分内容：
 * 	1. 加载MiniSeed插件
 * 	2.创建库表，包括存储采样数据的分区表、存储基本信息的维度表、存储时延信息的分区表
 * 	3.创建流数据表
 * ==========================================
 */

def loadPlugins(){
	/*
	 * Description：
	 * 	此函数用于加载插件
	 */
	try{
		loadPlugin("./plugins/mseed/PluginMseed.txt");
	}catch(ex){
		print(ex)
	}
}

def createRealDB(realDbName,realTbName){
	/*   
	 *  Description：
	 *  	此函数用于创建  存储实时数据的分布式数据库和分区表   
	 *  	数据库采用TSDB存储引擎，有两级分区，一级分区按日期进行值分区，二级分区按设备Id进行值分区，每个二级分区存储一个id1天的历史数据
	 *  	分区列选择为ts和id，索引列为id,ts，Time列采用delta压缩算法，其余列采用lz4压缩算法，每个分区内索引相同的数据全部保留
	 *  Input：
	 *  	realDbName: STRING 分布式数据库库名   realTbName: STRING   分区表名
	 */
	if( existsDatabase(realDbName) ){ dropDatabase(realDbName) }
	fstPartition = database(,VALUE,2023.03.01..2023.03.10) 
	scdPartition = database(,VALUE,1..3900)
	db = database(directory=realDbName, partitionType=COMPO, partitionScheme=[fstPartition,scdPartition],engine="TSDB")
	factor_partition = db.createPartitionedTable(table=table(1:0,`id`ts`value,[INT,TIMESTAMP,INT]),tableName = realTbName,partitionColumns = ["ts","id"],sortColumns =["id","ts"],compressMethods={ts:"delta",value:"delta"},keepDuplicates=ALL)
}

def createDelayDB(delayDbName,delayTbName){
	/*   
	 *  Description：
	 *  	此函数用于创建  存储时延计算结果的分布式数据库和分区表
	 *  	数据库采用TSDB存储引擎，有两级分区，一级分区按日期进行值分区，二级分区为hash分区
	 *  	分区列选择为startTime和id，索引列为id,startTime，startTime列采用delta压缩算法，其余列采用lz4压缩算法，每个分区内索引相同的数据全部保留
	 *  Input：
	 *  	   delayDbName: STRING 分布式数据库库名   delayTbName: STRING 分区表名
	 */	
	if(existsDatabase(delayDbName)){dropDatabase(delayDbName)}
	fstPartition = database(,VALUE,2023.03.01..2023.03.10)
	scdPartition = database(,HASH,[INT,10])
	db = database(directory=delayDbName, partitionType=COMPO, partitionScheme=[fstPartition,scdPartition],engine="TSDB")
	factor_partition = db.createPartitionedTable(table=table(1:0,`id`tagid`startTime`receivedTime`delay,[INT,SYMBOL,TIMESTAMP,TIMESTAMP,INT]),tableName = delayTbName,partitionColumns = ["startTime","id"],sortColumns =["id","startTime"],compressMethods={startTime:"delta"},keepDuplicates=ALL)			
}

def createDt(realDbName,dtName){  
	/*
	 *Description： 
	 *	此函数用于创建存储台网、台站、位置、通道等基础信息的维度表      
	 * Input：
	 * 	realDbName: STRING 分布式数据库名   dtName: STRING  维度表名
	 */
	if(existsTable(realDbName,dtName)){ dropTable(database(realDbName),dtName) }
	//模拟tagInfo信息
	net = ["ZJ","YN","XZ","XJ","TJ"]
	sta = ["A0001","A0002","A0003","A0004","A0005","A0006","B0001","B0002","B0003","C0001"]
	tmp = `EIE`EIN`EIZ
	netList = stretch(net,150)
	staList = take(stretch(sta,30),150)
	locList = take(`40,150)
	chn = take(tmp,150)
	colt =   array(STRING)
	for(i in 0..(chn.size()-1)){
		colt.append!( chn[i].split()[0] + "_" + chn[i].split()[1] + "_" +chn[i].split()[2] )
	}
	tagid = "XFDSN:"+netList+"_"+staList+"_"+locList+"_"+colt
	t = table(1..150 as id,netList as net,staList as sta,locList as loc,chn,tagid)
	database(realDbName).createTable(t,dtName,,`id).append!(t)
}

def undefStreamTable(meta,data,delay,abnormal,engine){
	/*
	 * Description：
	 * 	此函数用于删除流表及流数据引擎
	 * Input：
	 * 	五个参数均为STRING常量，前四个参数为流数据表名称，最后一个参数为异常检测引擎名称
	 */
	jobId = exec jobId from getRecentJobs() where endTime == NULL
	cancelJob(jobId)
	unsubscribeTable(tableName = `dataStream,actionName = `append_data_into_dfs)
	unsubscribeTable(tableName = `dataStream,actionName = `abnormalDetect)
	unsubscribeTable(tableName = `metaStream,actionName = `calculate_delay)
	unsubscribeTable(tableName = `delayStream,actionName = `append_delay_into_dfs)	
	try{ dropStreamTable(meta) }catch(ex){ print(ex) }
	try{ dropStreamTable(data) }catch(ex){ print(ex) }
	try{ dropStreamTable(delay) }catch(ex){ print(ex) }
	try{ dropStreamEngine(engine) }catch(ex){ print(ex) }
	try{ dropStreamTable(abnormal) }catch(ex){ print(ex) }	
}

def createStreamTable(meta,data,delay,abnormal){
	/*   
	 *  Description：
	 *  	此函数用于创建  建立接收实时流数据的流数据表
	 *  Input：
	 *  	meta: STRING 用于接收解析mseed文件的信息以及时延计算，计算结果注入到delay中
	 *  	data: STRING 用于接收地震数据，其结果有两个走向，一个是存入分布式数据库，另一个是接受外部API的订阅，进行实时展示
	 *  	delay: STRING 用于接收时延计算结果，其结果要存入分布式数据库中
	 *  	abnormal：STRING 用于接收异常告警结果
	 */
	st1 = streamTable(1000000:0,`id`tagid`startTime`receivedTime`actualCount`expectedCount`sampleRate,[INT,SYMBOL,TIMESTAMP,TIMESTAMP,INT,INT,DOUBLE])
	enableTableShareAndPersistence(table=st1, tableName=meta, asynWrite=true, compress=true, cacheSize=2000000, retentionMinutes=120, preCache = 100000)
	st2 = streamTable(1000000:0,`id`ts`data,[INT,TIMESTAMP,INT])
	enableTableShareAndPersistence(table=st2, tableName=data, asynWrite=true, compress=true, cacheSize=2000000000, retentionMinutes=360, preCache = 100000)
	setStreamTableFilterColumn(objByName(data),`id)
	st3 = streamTable(1000000:0,`id`tagid`startTime`receivedTime`delay,[INT,SYMBOL,TIMESTAMP,TIMESTAMP,INT])
	enableTableShareAndPersistence(table=st3, tableName=delay, asynWrite=true, compress=true, cacheSize=2000000, retentionMinutes=120, preCache = 100000)
	st4 = streamTable(10000:0,`Time`id`anomalyType`anomalyString,[TIMESTAMP,STRING,INT,STRING])
	enableTableShareAndPersistence(table=st4, tableName=abnormal, asynWrite=true, compress=true, cacheSize=2000000, retentionMinutes=120, preCache = 100000)
}

def mainPrepare(){
	realDbName,realTbNname,dtName = "dfs://real","realData","tagInfo"
	delayDbName,delayTbName = "dfs://delay","delayData"
	meta,data,delay,abnormal, engine= "metaStream","dataStream","delayStream","abnormalStream","engine"
	loadPlugins()
	undefStreamTable(meta,data,delay,abnormal, engine)
	createRealDB(realDbName,realTbNname)
	createDelayDB(delayDbName,delayTbName)
	createDt(realDbName,dtName)
	createStreamTable(meta,data,delay,abnormal)	
}

