|
1 | 1 | #load "packages.fsx" |
2 | | -open System |
3 | | -open System.IO |
4 | | -open MathNet.Numerics.LinearAlgebra |
| 2 | +#load "../Utils.fs" |
| 3 | +open TorchSharp.Fun |
5 | 4 |
|
6 | | -let dataFolder = @"C:\Users\fwaris\Downloads\pygcn-master\data\cora" |
7 | | -let contentFile = $"{dataFolder}/cora.content" |
8 | | -let citesFile = $"{dataFolder}/cora.cites" |
9 | | -let yourself x = x |
| 5 | +let datafolder = @"C:\s\Repos\gcn\data\cora" |
| 6 | +let adj, features, labels, idx_train, idx_val, idx_test = Utils.loadData datafolder None |
10 | 7 |
|
11 | | -let dataCntnt = |
12 | | - contentFile |
13 | | - |> File.ReadLines |
14 | | - |> Seq.map(fun x -> x.Split('\t')) |
15 | | - |> Seq.map(fun xs -> |
16 | | - {| |
17 | | - Id = xs.[0] |
18 | | - Features = xs.[1 .. xs.Length-2] |> Array.map float32 |
19 | | - Label = xs.[xs.Length-1] |
20 | | - |}) |
| 8 | +let v1 = adj.[0L,50L] |> float |
21 | 9 |
|
22 | | -let dataCites = |
23 | | - citesFile |
24 | | - |> File.ReadLines |
25 | | - |> Seq.map (fun x->x.Split('\t')) |
26 | | - |> Seq.map (fun xs -> xs.[0],xs.[1]) |
27 | | - |> Seq.toArray |
| 10 | +let idx = adj.SparseIndices |> Tensor.getData<int64> |
| 11 | +let rc = idx |> Array.chunkBySize (idx.Length/2) |
| 12 | +let vals = adj.SparseValues |> Tensor.getData<float32> |
28 | 13 |
|
29 | | -let citationIdx = dataCites |> Seq.collect (fun (a,b)->[a;b]) |> Seq.distinct |> Seq.mapi (fun i x->x,i) |> dict |
| 14 | +let i = 500 |
| 15 | +let r,c = rc.[0].[i],rc.[1].[i] |
| 16 | +let vx = adj.[r,c] |> float |
30 | 17 |
|
31 | | -let ftrs = Matrix.Build.DenseOfRows(dataCntnt |> Seq.map (fun x->Array.toSeq x.Features)) |
| 18 | +let df = features |> Tensor.getData<float32> |> Array.chunkBySize (int features.shape.[1]) |
32 | 19 |
|
33 | | -let graph = Matrix.Build.SparseFromCoordinateFormat |
34 | | - ( |
35 | | - dataCites.Length, dataCites.Length, dataCites.Length, |
36 | | - dataCites |> Array.map (fun x -> citationIdx.[fst x]), |
37 | | - dataCites |> Array.map (fun x -> citationIdx.[snd x]), |
38 | | - dataCites |> Array.map (fun _ -> 1.0f) |
39 | | - ) |
| 20 | +let f1 = features.[1L,12L] |> float |
40 | 21 |
|
41 | | -let normalize (m:Matrix<float32>) = |
42 | | - let rowsum = m.RowSums() |
43 | | - let r_inv = rowsum.PointwisePower(-1.0f) |
44 | | - let r_inv = r_inv.Map(fun x-> if Single.IsInfinity x then 0.0f else x) |
45 | | - let r_mat_inv = Matrix.Build.SparseOfDiagonalVector(r_inv) |
46 | | - let mx = r_mat_inv.Multiply(m) |
47 | | - mx |
48 | | - |
49 | | -let graph_n = Matrix.Build.SparseIdentity(graph.RowCount) + graph |> normalize |
50 | | -let ftrs_n = normalize ftrs |
51 | | - |
52 | | -open TorchSharp.Tensor |
53 | | -let sparse_mx_to_torch_sparse_tensor (m:Matrix<float32>) = |
54 | | - let coo = m.EnumerateIndexed(Zeros.AllowSkip) |
55 | | - let rows = coo |> Seq.map (fun (r,c,v) -> int64 r) |
56 | | - let cols = coo |> Seq.map (fun (r,c,v) -> int64 c) |
57 | | - let idxs = Seq.append rows cols |> Seq.toArray |
58 | | - let idx1 = idxs |> Int64Tensor.from |> fun x -> x.view(2L,-1L) |
59 | | - let vals = coo |> Seq.map(fun (r,c,v) -> v) |> Seq.toArray |> Float32Tensor.from |
60 | | - Float32Tensor.sparse(idx1,vals,[|int64 m.RowCount; int64 m.ColumnCount|]) |
61 | | - |
62 | | -let adj = sparse_mx_to_torch_sparse_tensor(graph_n) |
63 | | - |
64 | | -module GCNModel = |
65 | | - open TorchSharp.Tensor |
66 | | - open TorchSharp.NN |
67 | | - open type TorchSharp.NN.Modules |
68 | | - open TorchSharp.Fun |
69 | | - let inline (!>) (x:^a) : ^b = ((^a or ^b) : (static member op_Implicit : ^a -> ^b) x) |
70 | | - |
71 | | - let gcnLayer in_features out_features hasBias (adj:TorchTensor) = |
72 | | - let weight = Parameter(randName(),Float32Tensor.empty([|in_features; out_features|])) |
73 | | - let bias = if hasBias then Parameter(randName(),Float32Tensor.empty([|out_features|])) |> Some else None |
74 | | - let parms = [| yield weight; if hasBias then yield bias.Value|] |
75 | | - Init.kaiming_uniform(weight.Tensor) |> ignore |
76 | | - |
77 | | - Model.create(parms,fun wts t -> |
78 | | - let support = t.mm(wts.[0]) |
79 | | - let output = adj.mm(support) |
80 | | - if hasBias then |
81 | | - output.add(wts.[1]) |
82 | | - else |
83 | | - output) |
84 | | - |
85 | | - let create nfeat nhid nclass dropout adj = |
86 | | - let gc1 = gcnLayer nfeat nhid true adj |
87 | | - let gc2 = gcnLayer nhid nclass true adj |
88 | | - let relu = ReLU() |
89 | | - let logm = LogSoftmax(1L) |
90 | | - let drp = if dropout then Dropout() |> M else Model.nop |
91 | | - fwd3 gc1 gc2 drp (fun t g1 g2 drp -> |
92 | | - use t = gc1.forward(t) |
93 | | - use t = relu.forward(t) |
94 | | - use t = drp.forward(t) |
95 | | - use t = gc2.forward(t) |
96 | | - let t = logm.forward(t) |
97 | | - t) |
98 | 22 |
|
99 | 23 |
|
100 | 24 |
|
0 commit comments