From 58eaf463e38e28cf5e041050b252144d01e8cf76 Mon Sep 17 00:00:00 2001 From: saracv Date: Wed, 21 Oct 2020 22:27:09 -0400 Subject: [PATCH] Assignment 3- SV --- Assignment 3.Rmd | 96 +++++- Assignment-3.html | 845 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 936 insertions(+), 5 deletions(-) create mode 100644 Assignment-3.html diff --git a/Assignment 3.Rmd b/Assignment 3.Rmd index 649407e..297c37b 100644 --- a/Assignment 3.Rmd +++ b/Assignment 3.Rmd @@ -2,11 +2,18 @@ ## Part I Start by installing the "igraph" package. Once you have installed igraph, load the package. +```{r} +library(igraph) +library(tidyverse) +``` + Now upload the data file "comment-data.csv" as a data frame called "D1". Each row represents a comment from one student to another so the first line shows that student "28" commented on the comment of student "21". It also shows the gender of both students and the students' main elective field of study ("major""). ```{r} D1 <- read.csv("comment-data.csv", header = TRUE) +#see the content of D1 and the variable type +D1 ``` Before you proceed, you will need to change the data type of the student id variable. Since it is a number R will automatically think it is an integer and code it as such (look at the list of variables by clicking on the data frame arrow in the Data pane. Here you will see the letters "int"" next to the stid variable, that stands for integer). However, in this case we are treating the variable as a category, there is no numeric meaning in the variable. So we need to change the format to be a category, what R calls a "factor". We can do this with the following code: @@ -34,7 +41,7 @@ Since our data represnts every time a student makes a comment there are multiple EDGE <- count(D2, comment.to, comment.from) -names(EDGE) <- c("from", "to", "count") +names(EDGE) <- c("to", "from", "count") ``` @@ -96,7 +103,9 @@ plot(g,layout=layout.fruchterman.reingold, vertex.color=VERTEX$gender) plot(g,layout=layout.fruchterman.reingold, vertex.color=VERTEX$gender, edge.width=EDGE$count) -```` + + +``` ## Part II @@ -104,20 +113,97 @@ In Part II your task is to [look up](http://igraph.org/r/) in the igraph documen * Ensure that sizing allows for an unobstructed view of the network features (For example, the arrow size is smaller) * The vertices are colored according to major -* The vertices are sized according to the number of comments they have recieved +* The vertices are sized according to the number of comments they have received + +```{r} +#Vertex size to be sized according to the number of comments a user has received +vs <- EDGE %>% group_by(to) %>% summarise(n=sum(count)) +#changed the graph to allow for an unobstructed view of network featrues +plot(g,layout=layout.fruchterman.reingold, edge.arrow.size=0,vertex.size=2*(vs$n), vertex.color=VERTEX$major, vertex.width=15, label.cex=.0625, vertex.label.dist=0, edge.curved=0.25, vertex.frame.color="gray",vertex.lable.color="gray", main="Comment Network") +``` + ## Part III + Now practice with data from our class. This data is real class data directly exported from Qualtrics and you will need to wrangle it into shape before you can work with it. Import it into R as a data frame and look at it carefully to identify problems. +```{r} +#Installed to clean names and create the table +library(janitor) +#read the file, remove rows, and set the header +CD <- read.csv("hudk4050-classes.csv",skip = 1, stringsAsFactors = FALSE, header = TRUE) %>% slice(-1) +#Clean names and remove empty rows and headers +CD2 <- CD %>% clean_names() %>% remove_empty(c("rows","cols")) %>% mutate_at(3:9, str_replace_all," ","") %>% mutate_at(3:9,list(toupper)) +#Remove first row, interest column, and create name column +CD2 <- CD2 %>%slice(-1) %>%select(-9) %>%unite("name",1:2,remove = TRUE) +#Remove possible conflicting characters from the name column +#CD2$name <- CD2 %>% str_replace(CD2$name,"`","") +#Remove duplicates +CD2 <- CD2 %>% distinct() + +``` + Please create a **person-network** with the data set hudk4050-classes.csv. To create this network you will need to create a person-class matrix using the tidyr functions and then create a person-person matrix using `t()`. You will then need to plot a matrix rather than a to/from data frame using igraph. +```{r} +#switching to long file +CD3 <- as_tibble(CD2) %>%pivot_longer(2:7,names_to="QN",values_to="Class") %>% filter(Class!="") %>% unique +#creating the person to class matrix using tabyl to save me some lines and force "" to "emptystring_" in case I left any NAs or empty +personclass <- CD3 %>% tabyl(name,Class) +#Make names column the row names +rownames(personclass) <- personclass$name +#remove name column +personclass <- personclass %>% select(-name,-HUDK4050) +#make person to class a matrix +personclass <- as.matrix(personclass) + #creating the person to person matrix +persontoperson = personclass %*% t(personclass) + +``` + Once you have done this, also [look up](http://igraph.org/r/) how to generate the following network metrics: * Betweeness centrality and dregree centrality. **Who is the most central person in the network according to these two metrics? Write a sentence or two that describes your interpretation of these metrics** -* Color the nodes according to interest. Are there any clusters of interest that correspond to clusters in the network? Write a sentence or two describing your interpetation. + + +#Using iGraph to find the most central person +```{r} +g <- graph.adjacency(persontoperson, mode="undirected",diag = FALSE) + +DC <- sort(degree(g),decreasing = TRUE) +BC <-sort(betweenness(g),decreasing = TRUE) + +DC +BC + +#I believe that Yifei_Zhang is the most central person in the network. While Yifei does not has the highest number of connection,but the second highest, Yifei has the shortest path to most people in the class making Yifei the individual that could connect with most of the class the fastest. + +``` + + +* Color the nodes according to interest. Are there any clusters of interest that correspond to clusters in the network? Write a sentence or two describing your interpretation. + + + +```{r} +#creating a variable for the number of classes so I can use it as the vertex size +nc <- CD3 %>% count(name) +#loading Colorbrewer so we can have a graph with +library(RColorBrewer) +#redoing the table because I was to lazy to rename the original table. I have learned my lesson +DI <- CD %>% clean_names() %>% remove_empty(c("rows","cols")) %>% mutate_at(3:9, str_replace_all," ","") %>% mutate_at(3:9,list(toupper)) %>%unite("name",1:2,remove = TRUE) %>% select(1,8) +#using RColorBrewer to set my color palette +pal <- brewer.pal(DI$which_of_these_topics_is_most_interesting_to_you,"Pastel2") +#creating a plot that shows clusters. I removed the vertex label since that is no interest for the questions posed +plot(g,layout=layout.fruchterman.reingold, vertex.size=1.5*(nc$n), vertex.label.cex=.50, vertex.label=NA, vertex.frame.color="grey44",vertex.color=pal, edge.color="gray", main="Class Network by Course") + +#I think common interest has to do with a person's major and it is likely that students in the same major have the same classes. I believe that is what we see in the clusters with the blue nodes. + +``` + ### To Submit Your Assignment -Please submit your assignment by first "knitting" your RMarkdown document into an html file and then comit, push and pull request both the RMarkdown file and the html file. +Please submit your assignment by first "knitting" your RMarkdown document into an html file and then commit, push and pull request both the RMarkdown file and the html file. diff --git a/Assignment-3.html b/Assignment-3.html new file mode 100644 index 0000000..022caf8 --- /dev/null +++ b/Assignment-3.html @@ -0,0 +1,845 @@ + + + + + + + + + + + + + +Assignment-3.utf8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+

Assignment 3 - Social Network Analysis

+
+

Part I

+

Start by installing the “igraph” package. Once you have installed igraph, load the package.

+
library(igraph)
+
## 
+## Attaching package: 'igraph'
+
## The following objects are masked from 'package:stats':
+## 
+##     decompose, spectrum
+
## The following object is masked from 'package:base':
+## 
+##     union
+
library(tidyverse)
+
## -- Attaching packages --------------------------------------- tidyverse 1.3.0 --
+
## v ggplot2 3.3.2     v purrr   0.3.4
+## v tibble  3.0.4     v dplyr   1.0.2
+## v tidyr   1.1.2     v stringr 1.4.0
+## v readr   1.4.0     v forcats 0.5.0
+
## Warning: package 'tibble' was built under R version 4.0.3
+
## -- Conflicts ------------------------------------------ tidyverse_conflicts() --
+## x dplyr::as_data_frame() masks tibble::as_data_frame(), igraph::as_data_frame()
+## x purrr::compose()       masks igraph::compose()
+## x tidyr::crossing()      masks igraph::crossing()
+## x dplyr::filter()        masks stats::filter()
+## x dplyr::groups()        masks igraph::groups()
+## x dplyr::lag()           masks stats::lag()
+## x purrr::simplify()      masks igraph::simplify()
+

Now upload the data file “comment-data.csv” as a data frame called “D1”. Each row represents a comment from one student to another so the first line shows that student “28” commented on the comment of student “21”. It also shows the gender of both students and the students’ main elective field of study (“major”").

+
D1 <- read.csv("comment-data.csv", header = TRUE)
+#see the content of D1 and the variable type
+D1
+
##    comment.from comment.to from.gender         from.major to.gender
+## 1             3         22           B  cognitive science         A
+## 2             3         22           B  cognitive science         A
+## 3            28         24           B         psychology         B
+## 4             6         12           B learning analytics         A
+## 5             6         12           B learning analytics         A
+## 6            11         21           B learning analytics         B
+## 7            15          7           A         psychology         A
+## 8            17         16           B         psychology         B
+## 9             7         29           A         psychology         A
+## 10            5         20           A learning analytics         A
+## 11           16          7           B  cognitive science         A
+## 12           15         19           A         psychology         A
+## 13            7         18           A         psychology         A
+## 14           17         23           B         psychology         A
+## 15           10         15           B  cognitive science         A
+## 16           10         15           B  cognitive science         A
+## 17           10         15           B  cognitive science         A
+## 18           10         15           B  cognitive science         A
+## 19           10         15           B  cognitive science         A
+## 20           27         29           A         psychology         A
+## 21            4         16           B  cognitive science         B
+## 22           11         15           B learning analytics         A
+## 23            5         21           A learning analytics         B
+## 24            5         21           A learning analytics         B
+## 25            2         18           A         psychology         A
+## 26           16         25           B  cognitive science         B
+## 27            6         22           B learning analytics         A
+## 28           20         27           A  cognitive science         A
+## 29           20         27           A  cognitive science         A
+## 30            6         27           B learning analytics         A
+## 31            4         24           B  cognitive science         B
+## 32            2         28           A         psychology         B
+## 33           17         18           B         psychology         A
+## 34           17         18           B         psychology         A
+## 35           17         18           B         psychology         A
+## 36           20         10           A  cognitive science         B
+## 37            7         20           A         psychology         A
+## 38           26         28           A  cognitive science         B
+## 39           13         24           B         psychology         B
+## 40           19         25           A learning analytics         B
+## 41           26         18           A  cognitive science         A
+## 42           17          8           B         psychology         B
+## 43           17          8           B         psychology         B
+## 44           17          8           B         psychology         B
+## 45           17          8           B         psychology         B
+## 46           17          8           B         psychology         B
+## 47            5         24           A learning analytics         B
+## 48           11          4           B learning analytics         B
+## 49           21          7           B         psychology         A
+## 50           27         21           A         psychology         B
+## 51           27         21           A         psychology         B
+## 52           27         21           A         psychology         B
+## 53           26         14           A  cognitive science         B
+## 54           21         23           B         psychology         A
+## 55            1          6           B applied statistics         B
+## 56            2         25           A         psychology         B
+## 57           24         26           B  cognitive science         A
+## 58           23         16           A applied statistics         B
+## 59           15         28           A         psychology         B
+## 60            3          7           B  cognitive science         A
+## 61            3          7           B  cognitive science         A
+## 62            3          7           B  cognitive science         A
+## 63            3          7           B  cognitive science         A
+## 64            3          7           B  cognitive science         A
+## 65            2         28           A         psychology         B
+## 66           29         25           A learning analytics         B
+## 67           27         15           A         psychology         A
+## 68            7         23           A         psychology         A
+## 69            1         14           B applied statistics         B
+## 70            1         14           B applied statistics         B
+## 71            1         14           B applied statistics         B
+## 72            4         29           B  cognitive science         A
+## 73            4         27           B  cognitive science         A
+## 74           18         23           A learning analytics         A
+## 75           16         24           B  cognitive science         B
+## 76           12         29           A         psychology         A
+## 77            4         27           B  cognitive science         A
+## 78           12         16           A         psychology         B
+## 79           12         16           A         psychology         B
+## 80           12         16           A         psychology         B
+## 81            9          8           B         psychology         B
+## 82           21         19           B         psychology         A
+##              to.major
+## 1   cognitive science
+## 2   cognitive science
+## 3   cognitive science
+## 4          psychology
+## 5          psychology
+## 6          psychology
+## 7          psychology
+## 8   cognitive science
+## 9  learning analytics
+## 10  cognitive science
+## 11         psychology
+## 12 learning analytics
+## 13 learning analytics
+## 14 applied statistics
+## 15         psychology
+## 16         psychology
+## 17         psychology
+## 18         psychology
+## 19         psychology
+## 20 learning analytics
+## 21  cognitive science
+## 22         psychology
+## 23         psychology
+## 24         psychology
+## 25 learning analytics
+## 26  cognitive science
+## 27  cognitive science
+## 28         psychology
+## 29         psychology
+## 30         psychology
+## 31  cognitive science
+## 32         psychology
+## 33 learning analytics
+## 34 learning analytics
+## 35 learning analytics
+## 36  cognitive science
+## 37  cognitive science
+## 38         psychology
+## 39  cognitive science
+## 40  cognitive science
+## 41 learning analytics
+## 42 applied statistics
+## 43 applied statistics
+## 44 applied statistics
+## 45 applied statistics
+## 46 applied statistics
+## 47  cognitive science
+## 48  cognitive science
+## 49         psychology
+## 50         psychology
+## 51         psychology
+## 52         psychology
+## 53 learning analytics
+## 54 applied statistics
+## 55 learning analytics
+## 56  cognitive science
+## 57  cognitive science
+## 58  cognitive science
+## 59         psychology
+## 60         psychology
+## 61         psychology
+## 62         psychology
+## 63         psychology
+## 64         psychology
+## 65         psychology
+## 66  cognitive science
+## 67         psychology
+## 68 applied statistics
+## 69 learning analytics
+## 70 learning analytics
+## 71 learning analytics
+## 72 learning analytics
+## 73         psychology
+## 74 applied statistics
+## 75  cognitive science
+## 76 learning analytics
+## 77         psychology
+## 78  cognitive science
+## 79  cognitive science
+## 80  cognitive science
+## 81 applied statistics
+## 82 learning analytics
+

Before you proceed, you will need to change the data type of the student id variable. Since it is a number R will automatically think it is an integer and code it as such (look at the list of variables by clicking on the data frame arrow in the Data pane. Here you will see the letters “int”" next to the stid variable, that stands for integer). However, in this case we are treating the variable as a category, there is no numeric meaning in the variable. So we need to change the format to be a category, what R calls a “factor”. We can do this with the following code:

+
D1$comment.to <- as.factor(D1$comment.to)
+D1$comment.from <- as.factor(D1$comment.from)
+

igraph requires data to be in a particular structure. There are several structures that it can use but we will be using a combination of an “edge list” and a “vertex list” in this assignment. As you might imagine the edge list contains a list of all the relationships between students and any characteristics of those edges that we might be interested in. There are two essential variables in the edge list a “from” variable and a “to” variable that descibe the relationships between vertices. While the vertex list contains all the characteristics of those vertices, in our case gender and major.

+

So let’s convert our data into an edge list!

+

First we will isolate the variables that are of interest: comment.from and comment.to

+
library(dplyr)
+
+D2 <- select(D1, comment.to, comment.from) #select() chooses the columns
+

Since our data represnts every time a student makes a comment there are multiple rows when the same student comments more than once on another student’s video. We want to collapse these into a single row, with a variable that shows how many times a student-student pair appears.

+
EDGE <- count(D2, comment.to, comment.from)
+
+names(EDGE) <- c("to", "from", "count")
+

EDGE is your edge list. Now we need to make the vertex list, a list of all the students and their characteristics in our network. Because there are some students who only recieve comments and do not give any we will need to combine the comment.from and comment.to variables to produce a complete list.

+
#First we will separate the commenters from our commentees
+V.FROM <- select(D1, comment.from, from.gender, from.major)
+
+#Now we will separate the commentees from our commenters
+V.TO <- select(D1, comment.to, to.gender, to.major)
+
+#Make sure that the from and to data frames have the same variables names
+names(V.FROM) <- c("id", "gender.from", "major.from")
+names(V.TO) <- c("id", "gender.to", "major.to")
+
+#Make sure that the id variable in both dataframes has the same number of levels
+lvls <- sort(union(levels(V.FROM$id), levels(V.TO$id)))
+
+VERTEX <- full_join(mutate(V.FROM, id=factor(id, levels=lvls)),
+    mutate(V.TO, id=factor(id, levels=lvls)), by = "id")
+
+#Fill in missing gender and major values - ifelse() will convert factors to numerical values so convert to character
+VERTEX$gender.from <- ifelse(is.na(VERTEX$gender.from) == TRUE, as.factor(as.character(VERTEX$gender.to)), as.factor(as.character(VERTEX$gender.from)))
+
+VERTEX$major.from <- ifelse(is.na(VERTEX$major.from) == TRUE, as.factor(as.character(VERTEX$major.to)), as.factor(as.character(VERTEX$major.from)))
+
+#Remove redundant gender and major variables
+VERTEX <- select(VERTEX, id, gender.from, major.from)
+
+#rename variables
+names(VERTEX) <- c("id", "gender", "major")
+
+#Remove all the repeats so that we just have a list of each student and their characteristics
+VERTEX <- unique(VERTEX)
+

Now we have both a Vertex and Edge list it is time to plot our graph!

+
#Load the igraph package
+
+library(igraph)
+
+#First we will make an object that contains the graph information using our two dataframes EDGE and VERTEX. Notice that we have made "directed = TRUE" - our graph is directed since comments are being given from one student to another.
+
+g <- graph.data.frame(EDGE, directed=TRUE, vertices=VERTEX)
+
+#Now we can plot our graph using the force directed graphing technique - our old friend Fruchertman-Reingold!
+
+plot(g,layout=layout.fruchterman.reingold)
+

+
#There are many ways to change the attributes of the graph to represent different characteristics of the newtork. For example, we can color the nodes according to gender.
+
+plot(g,layout=layout.fruchterman.reingold, vertex.color=VERTEX$gender)
+

+
#We can change the thickness of the edge according to the number of times a particular student has sent another student a comment.
+
+plot(g,layout=layout.fruchterman.reingold, vertex.color=VERTEX$gender, edge.width=EDGE$count)
+

+
+
+

Part II

+

In Part II your task is to look up in the igraph documentation and modify the graph above so that:

+
    +
  • Ensure that sizing allows for an unobstructed view of the network features (For example, the arrow size is smaller)
  • +
  • The vertices are colored according to major
  • +
  • The vertices are sized according to the number of comments they have received
  • +
+
#Vertex size to be sized according to the number of comments a user has received
+vs <- EDGE %>% group_by(to) %>% summarise(n=sum(count))
+
## `summarise()` ungrouping output (override with `.groups` argument)
+
#changed the graph to allow for an unobstructed view of network featrues
+plot(g,layout=layout.fruchterman.reingold, edge.arrow.size=0,vertex.size=2*(vs$n), vertex.color=VERTEX$major, vertex.width=15, label.cex=.0625, vertex.label.dist=0, edge.curved=0.25, vertex.frame.color="gray",vertex.lable.color="gray", main="Comment Network")
+
## Warning in layout[, 1] + label.dist * cos(-label.degree) * (vertex.size + :
+## longer object length is not a multiple of shorter object length
+
## Warning in layout[, 2] + label.dist * sin(-label.degree) * (vertex.size + :
+## longer object length is not a multiple of shorter object length
+

+
+
+

Part III

+

Now practice with data from our class. This data is real class data directly exported from Qualtrics and you will need to wrangle it into shape before you can work with it. Import it into R as a data frame and look at it carefully to identify problems.

+
#Installed to clean names and create the table
+library(janitor)
+
## Warning: package 'janitor' was built under R version 4.0.3
+
## 
+## Attaching package: 'janitor'
+
## The following objects are masked from 'package:stats':
+## 
+##     chisq.test, fisher.test
+
#read the file, remove rows, and set the header
+CD <- read.csv("hudk4050-classes.csv",skip = 1, stringsAsFactors = FALSE, header = TRUE) %>% slice(-1)
+#Clean names and remove empty rows and headers
+CD2 <- CD %>% clean_names() %>% remove_empty(c("rows","cols")) %>% mutate_at(3:9, str_replace_all," ","") %>% mutate_at(3:9,list(toupper))
+#Remove first row, interest column, and create name column
+CD2 <- CD2 %>%slice(-1) %>%select(-9) %>%unite("name",1:2,remove = TRUE)
+#Remove possible conflicting characters from the name column
+#CD2$name <- CD2 %>% str_replace(CD2$name,"`","")
+#Remove duplicates
+CD2 <- CD2 %>% distinct()
+

Please create a person-network with the data set hudk4050-classes.csv. To create this network you will need to create a person-class matrix using the tidyr functions and then create a person-person matrix using t(). You will then need to plot a matrix rather than a to/from data frame using igraph.

+
#switching to long file
+CD3 <- as_tibble(CD2) %>%pivot_longer(2:7,names_to="QN",values_to="Class") %>% filter(Class!="") %>% unique
+#creating the person to class matrix using tabyl to save me some lines and force  "" to "emptystring_"  in case I left any NAs or empty 
+personclass <- CD3 %>% tabyl(name,Class)
+#Make names column the row names
+rownames(personclass) <- personclass$name
+#remove name column
+personclass <- personclass %>% select(-name,-HUDK4050)
+#make person to class a matrix
+personclass <- as.matrix(personclass)
+  #creating the person to person matrix
+persontoperson = personclass %*% t(personclass)
+

Once you have done this, also look up how to generate the following network metrics:

+
    +
  • Betweeness centrality and dregree centrality. Who is the most central person in the network according to these two metrics? Write a sentence or two that describes your interpretation of these metrics
  • +
+

#Using iGraph to find the most central person

+
g <- graph.adjacency(persontoperson, mode="undirected",diag = FALSE)
+
+DC <- sort(degree(g),decreasing = TRUE)
+BC <-sort(betweenness(g),decreasing = TRUE)
+
+DC
+
##          Guoliang_Xu          Hangshi_Jin           Jiaao `_Qi 
+##                   31                   31                   31 
+##          Jiacong_Zhu          Jiahao_Shen            wenqi_gao 
+##                   31                   31                   31 
+##         Xiyun _Zhang          Yingxin_Xie          Yifei_Zhang 
+##                   31                   31                   24 
+##          Xiaojia_Liu            Yuxuan_Ge        Zhixin _Zheng 
+##                   22                   22                   20 
+## Stanley Si Heng_Zhao          Yuting_Zhou              Dan_Lei 
+##                   18                   16                   14 
+##          Xueshi_Wang         Ruoyi _Zhang         Tianyu_Chang 
+##                   14                   12                   12 
+##           Xijia_Wang           yunzhao_wu        Zach_Friedman 
+##                   12                   12                   11 
+##    Nicole_Schlosberg           Berj_Akian              JIE_YAO 
+##                   10                    9                    9 
+##         Kaijie _Fang           Yixiong_Xu          Yucheng_Pan 
+##                    9                    9                    6 
+##      Amanda_Oliveira          Jiasheng_Yu            Rong_Sang 
+##                    5                    5                    5 
+##             Fei_Wang         Wenning_Xiao           Yingxin_Ye 
+##                    4                    4                    2 
+##           Danny_Shan           Fangqi_Liu          Hyungoo_Lee 
+##                    1                    1                    1 
+##        Shuying_Xiong Abdul Malik _Muftau         Ali _Al Jabri 
+##                    1                    0                    0 
+##            Chris_Kim             He _Chen  Mahshad_Davoodifard 
+##                    0                    0                    0 
+##         Qianhui_Yuan         Sara_Vasquez       Vidya_Madhavan 
+##                    0                    0                    0 
+##           Yurui_Wang 
+##                    0
+
BC
+
##          Yifei_Zhang Stanley Si Heng_Zhao              Dan_Lei 
+##          245.7722551           88.7144326           68.5833333 
+##        Zhixin _Zheng        Zach_Friedman    Nicole_Schlosberg 
+##           64.2352941           42.2277449           34.4499671 
+##           Yingxin_Ye          Xueshi_Wang          Yuting_Zhou 
+##           33.0000000           24.4663830           20.0275177 
+##              JIE_YAO          Guoliang_Xu          Hangshi_Jin 
+##           11.6500000            7.2786166            7.2786166 
+##           Jiaao `_Qi          Jiacong_Zhu          Jiahao_Shen 
+##            7.2786166            7.2786166            7.2786166 
+##            wenqi_gao         Xiyun _Zhang          Yingxin_Xie 
+##            7.2786166            7.2786166            7.2786166 
+##           Yixiong_Xu          Xiaojia_Liu            Yuxuan_Ge 
+##            3.8916667            3.0429031            3.0429031 
+##          Yucheng_Pan Abdul Malik _Muftau         Ali _Al Jabri 
+##            0.6666667            0.0000000            0.0000000 
+##      Amanda_Oliveira           Berj_Akian            Chris_Kim 
+##            0.0000000            0.0000000            0.0000000 
+##           Danny_Shan           Fangqi_Liu             Fei_Wang 
+##            0.0000000            0.0000000            0.0000000 
+##             He _Chen          Hyungoo_Lee          Jiasheng_Yu 
+##            0.0000000            0.0000000            0.0000000 
+##         Kaijie _Fang  Mahshad_Davoodifard         Qianhui_Yuan 
+##            0.0000000            0.0000000            0.0000000 
+##            Rong_Sang         Ruoyi _Zhang         Sara_Vasquez 
+##            0.0000000            0.0000000            0.0000000 
+##        Shuying_Xiong         Tianyu_Chang       Vidya_Madhavan 
+##            0.0000000            0.0000000            0.0000000 
+##         Wenning_Xiao           Xijia_Wang           yunzhao_wu 
+##            0.0000000            0.0000000            0.0000000 
+##           Yurui_Wang 
+##            0.0000000
+
#I believe that Yifei_Zhang is the most central person in the network. While Yifei does not has the highest number of connection,but the second highest, Yifei has the shortest path to most people in the class making Yifei the individual that could connect with most of the class the fastest.
+
    +
  • Color the nodes according to interest. Are there any clusters of interest that correspond to clusters in the network? Write a sentence or two describing your interpretation.
  • +
+
#creating a variable for the number of classes so I can use it as the vertex size
+nc <- CD3 %>% count(name)
+#loading Colorbrewer so we can have a graph with 
+library(RColorBrewer)
+#redoing the table because I was to lazy to rename the original table. I have learned my lesson
+DI <- CD %>% clean_names() %>% remove_empty(c("rows","cols")) %>% mutate_at(3:9, str_replace_all," ","") %>% mutate_at(3:9,list(toupper)) %>%unite("name",1:2,remove = TRUE) %>%  select(1,8)
+#using RColorBrewer to set my color palette
+pal <- brewer.pal(DI$which_of_these_topics_is_most_interesting_to_you,"Pastel2")
+
## Warning in if (n < 3) {: the condition has length > 1 and only the first element
+## will be used
+
## Warning in if (n > maxcolors[which(name == namelist)]) {: the condition has
+## length > 1 and only the first element will be used
+
## Warning in brewer.pal(DI$which_of_these_topics_is_most_interesting_to_you, : n too large, allowed maximum for palette Pastel2 is 8
+## Returning the palette you asked for with that many colors
+
#creating a plot that shows clusters. I removed the vertex label since that is no interest for the questions posed
+plot(g,layout=layout.fruchterman.reingold, vertex.size=1.5*(nc$n), vertex.label.cex=.50, vertex.label=NA, vertex.frame.color="grey44",vertex.color=pal, edge.color="gray", main="Class Network by Course")
+

+
#I think common interest has to do with a person's major and it is likely that students in the same major have the same classes. I believe that is what we see in the clusters with the blue nodes. 
+
+

To Submit Your Assignment

+

Please submit your assignment by first “knitting” your RMarkdown document into an html file and then commit, push and pull request both the RMarkdown file and the html file.

+
+
+
+ + + + +
+ + + + + + + + + + + + + + +