From 55aa787462426a152a1b91a94965c737b57bd38e Mon Sep 17 00:00:00 2001 From: xiaojia-liu <70608311+xiaojia-liu@users.noreply.github.com> Date: Wed, 25 Nov 2020 16:08:14 -0500 Subject: [PATCH] Submitting Assignment 5 - Xiaojia Liu --- assignment5.Rmd | 36 ++- assignment5.html | 646 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 679 insertions(+), 3 deletions(-) create mode 100644 assignment5.html diff --git a/assignment5.Rmd b/assignment5.Rmd index 288bcb3..13bc3f0 100644 --- a/assignment5.Rmd +++ b/assignment5.Rmd @@ -16,7 +16,9 @@ The data you will be using comes from the Assistments online intelligent tutorin ## Start by uploading the data ```{r} -D1 <- +library(readr) +D1 <- read_csv("Assistments-confidence.csv") +View(D1) ``` @@ -38,7 +40,10 @@ ggcorr(D1[,-1], method = c("everything", "pearson")) #ggcorr() doesn't have an e ## Create a new data frame with the mean_correct variable removed, we want to keep that variable intact. The other variables will be included in our PCA. ```{r} -D2 <- + +library(dplyr) +D2 <- select(D1, -mean_correct,-id) +View(D2) ``` @@ -73,10 +78,14 @@ plot(pca, type = "lines") ```{r} #Now, create a data frame of the transformed data from your pca. -D3 <- +D3 <- data.frame(pca$x) +D4 <- data.frame(D3, D1$mean_correct) +View(D4) #Attach the variable "mean_correct" from your original data frame to D3. +ggpairs(D4, progress = FALSE) +ggcorr(D4, method = c("everything", "pearson")) #Now re-run your correlation plots between the transformed data and mean_correct. If you had dropped some components would you have lost important infomation about mean_correct? @@ -95,6 +104,13 @@ loadings <- abs(pca$rotation) #abs() will make all eigenvectors positive #Now examine your components and try to come up with substantive descriptions of what some might represent? +#1) PC1 represents the relationship for hints and problems attempt. +#2) PC2 represents how much knowledge the students had in prior related to the percentage of the correctness for the prior problem. +#3) PC3 represents how much the confidence the students had will impact on the count of the problem attempted. +#4) PC4 represents how much practice the students had before will impact on their confidence. +#5) PC5 represents the attempt and problems attempted, which does not highly correlated with correctness. + + #You can generate a biplot to help you, though these can be a bit confusing. They plot the transformed data by the first two components. Therefore, the axes represent the direction of maximum variance accounted for. Then mapped onto this point cloud are the original directions of the variables, depicted as red arrows. It is supposed to provide a visualization of which variables "go together". Variables that possibly represent the same underlying construct point in the same direction. biplot(pca) @@ -105,6 +121,20 @@ biplot(pca) Also in this repository is a data set collected from TC students (tc-program-combos.csv) that shows how many students thought that a TC program was related to andother TC program. Students were shown three program names at a time and were asked which two of the three were most similar. Use PCA to look for components that represent related programs. Explain why you think there are relationships between these programs. ```{r} +library(readr) +data <- read.csv("tc-program-combos.csv") +View(data) + +ggcorr(data[,-1], method = c("everything", "pearson")) + +data2 <- data[,-1] +pca2 <- prcomp(data2, scale. = TRUE) +pca2$sdev +pca2$sdev^2 +summary(pca2) + +plot(pca2, type = "lines") + ``` diff --git a/assignment5.html b/assignment5.html new file mode 100644 index 0000000..1a23778 --- /dev/null +++ b/assignment5.html @@ -0,0 +1,646 @@ + + + + +
+ + + + + + + + +The data you will be using comes from the Assistments online intelligent tutoring system (https://www.assistments.org/). It describes students working through online math problems. Each student has the following data associated with them:
+library(readr)
+D1 <- read_csv("Assistments-confidence.csv")
+## Parsed with column specification:
+## cols(
+## id = col_double(),
+## prior_prob_count = col_double(),
+## prior_percent_correct = col_double(),
+## problems_attempted = col_double(),
+## mean_correct = col_double(),
+## mean_hint = col_double(),
+## mean_attempt = col_double(),
+## mean_confidence = col_double()
+## )
+View(D1)
+## Warning in system2("/usr/bin/otool", c("-L", shQuote(DSO)), stdout = TRUE):
+## running command ''/usr/bin/otool' -L '/Library/Frameworks/R.framework/Resources/
+## modules/R_de.so'' had status 1
+#You can install the corrplot package to plot some pretty correlation matrices (sometimes called correlograms)
+
+library(ggplot2)
+library(GGally)
+## Registered S3 method overwritten by 'GGally':
+## method from
+## +.gg ggplot2
+ggpairs(D1, 2:8, progress = FALSE) #ggpairs() draws a correlation plot between all the columns you identify by number (second option, you don't need the first column as it is the student ID) and progress = FALSE stops a progress bar appearing as it renders your plot
+
+ggcorr(D1[,-1], method = c("everything", "pearson")) #ggcorr() doesn't have an explicit option to choose variables so we need to use matrix notation to drop the id variable. We then need to choose a "method" which determines how to treat missing values (here we choose to keep everything, and then which kind of correlation calculation to use, here we are using Pearson correlation, the other options are "kendall" or "spearman")
+
+#Study your correlogram images and save them, you will need them later. Take note of what is strongly related to the outcome variable of interest, mean_correct.
+library(dplyr)
+##
+## Attaching package: 'dplyr'
+## The following objects are masked from 'package:stats':
+##
+## filter, lag
+## The following objects are masked from 'package:base':
+##
+## intersect, setdiff, setequal, union
+D2 <- select(D1, -mean_correct,-id)
+View(D2)
+## Warning in system2("/usr/bin/otool", c("-L", shQuote(DSO)), stdout = TRUE):
+## running command ''/usr/bin/otool' -L '/Library/Frameworks/R.framework/Resources/
+## modules/R_de.so'' had status 1
+pca <- prcomp(D2, scale. = TRUE)
+pca$sdev
+## [1] 1.2825140 1.0543565 1.0245688 0.9621486 0.8556715 0.7320146
+#To convert this into variance accounted for we can square it, these numbers are proportional to the eigenvalue
+
+pca$sdev^2
+## [1] 1.6448423 1.1116675 1.0497412 0.9257299 0.7321737 0.5358454
+#A summary of our pca will give us the proportion of variance accounted for by each component
+
+summary(pca)
+## Importance of components:
+## PC1 PC2 PC3 PC4 PC5 PC6
+## Standard deviation 1.2825 1.0544 1.0246 0.9621 0.8557 0.73201
+## Proportion of Variance 0.2741 0.1853 0.1750 0.1543 0.1220 0.08931
+## Cumulative Proportion 0.2741 0.4594 0.6344 0.7887 0.9107 1.00000
+#We can look at this to get an idea of which components we should keep and which we should drop
+
+plot(pca, type = "lines")
+
+#Now, create a data frame of the transformed data from your pca.
+
+D3 <- data.frame(pca$x)
+D4 <- data.frame(D3, D1$mean_correct)
+View(D4)
+## Warning in system2("/usr/bin/otool", c("-L", shQuote(DSO)), stdout = TRUE):
+## running command ''/usr/bin/otool' -L '/Library/Frameworks/R.framework/Resources/
+## modules/R_de.so'' had status 1
+#Attach the variable "mean_correct" from your original data frame to D3.
+
+ggpairs(D4, progress = FALSE)
+
+ggcorr(D4, method = c("everything", "pearson"))
+
+#Now re-run your correlation plots between the transformed data and mean_correct. If you had dropped some components would you have lost important infomation about mean_correct?
+pca$rotation
+## PC1 PC2 PC3 PC4
+## prior_prob_count -0.26034140 0.45818753 -0.40090679 -0.6897642
+## prior_percent_correct 0.16840319 0.81617867 0.09267306 0.2640040
+## problems_attempted -0.45568733 0.31685183 0.36387724 0.3168141
+## mean_hint -0.63337594 -0.12501620 -0.08008842 -0.1122586
+## mean_attempt -0.54200011 -0.08510858 -0.04585364 0.3108682
+## mean_confidence 0.03581325 0.02547483 -0.83051917 0.4948890
+## PC5 PC6
+## prior_prob_count -0.007142834 -0.29280482
+## prior_percent_correct 0.298843852 0.37134715
+## problems_attempted -0.592336569 -0.32911025
+## mean_hint -0.102302115 0.74412634
+## mean_attempt 0.697232132 -0.33781385
+## mean_confidence -0.251357022 -0.01452143
+#Examine the eigenvectors, notice that they are a little difficult to interpret. It is much easier to make sense of them if we make them proportional within each component
+
+loadings <- abs(pca$rotation) #abs() will make all eigenvectors positive
+
+#Now examine your components and try to come up with substantive descriptions of what some might represent?
+
+#1) PC1 represents the relationship for hints and problems attempt.
+#2) PC2 represents how much knowledge the students had in prior related to the percentage of the correctness for the prior problem.
+#3) PC3 represents how much the confidence the students had will impact on the count of the problem attempted.
+#4) PC4 represents how much practice the students had before will impact on their confidence.
+#5) PC5 represents the attempt and problems attempted, which does not highly correlated with correctness.
+
+
+#You can generate a biplot to help you, though these can be a bit confusing. They plot the transformed data by the first two components. Therefore, the axes represent the direction of maximum variance accounted for. Then mapped onto this point cloud are the original directions of the variables, depicted as red arrows. It is supposed to provide a visualization of which variables "go together". Variables that possibly represent the same underlying construct point in the same direction.
+
+biplot(pca)
+ # Part III
+Also in this repository is a data set collected from TC students (tc-program-combos.csv) that shows how many students thought that a TC program was related to andother TC program. Students were shown three program names at a time and were asked which two of the three were most similar. Use PCA to look for components that represent related programs. Explain why you think there are relationships between these programs.
library(readr)
+data <- read.csv("tc-program-combos.csv")
+View(data)
+## Warning in system2("/usr/bin/otool", c("-L", shQuote(DSO)), stdout = TRUE):
+## running command ''/usr/bin/otool' -L '/Library/Frameworks/R.framework/Resources/
+## modules/R_de.so'' had status 1
+ggcorr(data[,-1], method = c("everything", "pearson"))
+
+data2 <- data[,-1]
+pca2 <- prcomp(data2, scale. = TRUE)
+pca2$sdev
+## [1] 2.66699514 2.33303087 2.03824332 1.80893489 1.71451092 1.60411744
+## [7] 1.58798960 1.49222150 1.46424566 1.39138869 1.33520786 1.32516917
+## [13] 1.31212863 1.26312253 1.25366254 1.22338737 1.21896028 1.18649390
+## [19] 1.13127469 1.12814038 1.10432926 1.06319093 1.01168384 0.99665812
+## [25] 0.96528383 0.95048841 0.93256896 0.90507518 0.85160824 0.83479067
+## [31] 0.81879538 0.78538963 0.76079365 0.73350908 0.72278124 0.67319166
+## [37] 0.66343310 0.64839067 0.62448974 0.60331242 0.56846989 0.55769066
+## [43] 0.51031628 0.49442626 0.47128286 0.44551299 0.43288829 0.41344476
+## [49] 0.37259965 0.36653720 0.35015529 0.33278376 0.32799766 0.30414406
+## [55] 0.28040415 0.27066834 0.23729873 0.21156010 0.17616906 0.16541514
+## [61] 0.14778289 0.14204907 0.11092615 0.07054620 0.04430024 0.03588606
+## [67] 0.01241193
+pca2$sdev^2
+## [1] 7.1128630864 5.4430330560 4.1544358466 3.2722454371 2.9395476996
+## [6] 2.5731927459 2.5217109546 2.2267250142 2.1440153558 1.9359624893
+## [11] 1.7827800336 1.7560733413 1.7216815395 1.5954785267 1.5716697682
+## [16] 1.4966766496 1.4858641675 1.4077677636 1.2797824290 1.2727007063
+## [21] 1.2195431178 1.1303749458 1.0235041934 0.9933274149 0.9317728773
+## [26] 0.9034282205 0.8696848576 0.8191610815 0.7252366024 0.6968754570
+## [31] 0.6704258671 0.6168368672 0.5788069731 0.5380355701 0.5224127176
+## [36] 0.4531870174 0.4401434846 0.4204104623 0.3899874305 0.3639858772
+## [41] 0.3231580154 0.3110188729 0.2604227077 0.2444573271 0.2221075356
+## [46] 0.1984818264 0.1873922684 0.1709365720 0.1388304984 0.1343495215
+## [51] 0.1226087294 0.1107450296 0.1075824634 0.0925036076 0.0786264855
+## [56] 0.0732613524 0.0563106886 0.0447576745 0.0310355381 0.0273621679
+## [61] 0.0218397813 0.0201779378 0.0123046098 0.0049767662 0.0019625111
+## [66] 0.0012878092 0.0001540561
+summary(pca2)
+## Importance of components:
+## PC1 PC2 PC3 PC4 PC5 PC6 PC7
+## Standard deviation 2.6670 2.33303 2.03824 1.80893 1.71451 1.60412 1.58799
+## Proportion of Variance 0.1062 0.08124 0.06201 0.04884 0.04387 0.03841 0.03764
+## Cumulative Proportion 0.1062 0.18740 0.24941 0.29825 0.34212 0.38053 0.41816
+## PC8 PC9 PC10 PC11 PC12 PC13 PC14
+## Standard deviation 1.49222 1.4642 1.39139 1.33521 1.32517 1.3121 1.26312
+## Proportion of Variance 0.03323 0.0320 0.02889 0.02661 0.02621 0.0257 0.02381
+## Cumulative Proportion 0.45140 0.4834 0.51229 0.53890 0.56511 0.5908 0.61462
+## PC15 PC16 PC17 PC18 PC19 PC20 PC21
+## Standard deviation 1.25366 1.22339 1.21896 1.18649 1.1313 1.1281 1.1043
+## Proportion of Variance 0.02346 0.02234 0.02218 0.02101 0.0191 0.0190 0.0182
+## Cumulative Proportion 0.63808 0.66042 0.68260 0.70361 0.7227 0.7417 0.7599
+## PC22 PC23 PC24 PC25 PC26 PC27 PC28
+## Standard deviation 1.06319 1.01168 0.99666 0.96528 0.95049 0.93257 0.90508
+## Proportion of Variance 0.01687 0.01528 0.01483 0.01391 0.01348 0.01298 0.01223
+## Cumulative Proportion 0.77678 0.79205 0.80688 0.82079 0.83427 0.84725 0.85948
+## PC29 PC30 PC31 PC32 PC33 PC34 PC35
+## Standard deviation 0.85161 0.8348 0.81880 0.78539 0.76079 0.73351 0.7228
+## Proportion of Variance 0.01082 0.0104 0.01001 0.00921 0.00864 0.00803 0.0078
+## Cumulative Proportion 0.87030 0.8807 0.89071 0.89992 0.90856 0.91659 0.9244
+## PC36 PC37 PC38 PC39 PC40 PC41 PC42
+## Standard deviation 0.67319 0.66343 0.64839 0.62449 0.60331 0.56847 0.55769
+## Proportion of Variance 0.00676 0.00657 0.00627 0.00582 0.00543 0.00482 0.00464
+## Cumulative Proportion 0.93115 0.93772 0.94399 0.94981 0.95524 0.96007 0.96471
+## PC43 PC44 PC45 PC46 PC47 PC48 PC49
+## Standard deviation 0.51032 0.49443 0.47128 0.44551 0.4329 0.41344 0.37260
+## Proportion of Variance 0.00389 0.00365 0.00332 0.00296 0.0028 0.00255 0.00207
+## Cumulative Proportion 0.96860 0.97224 0.97556 0.97852 0.9813 0.98387 0.98594
+## PC50 PC51 PC52 PC53 PC54 PC55 PC56
+## Standard deviation 0.36654 0.35016 0.33278 0.32800 0.30414 0.28040 0.27067
+## Proportion of Variance 0.00201 0.00183 0.00165 0.00161 0.00138 0.00117 0.00109
+## Cumulative Proportion 0.98795 0.98978 0.99143 0.99304 0.99442 0.99559 0.99668
+## PC57 PC58 PC59 PC60 PC61 PC62 PC63
+## Standard deviation 0.23730 0.21156 0.17617 0.16542 0.14778 0.1420 0.11093
+## Proportion of Variance 0.00084 0.00067 0.00046 0.00041 0.00033 0.0003 0.00018
+## Cumulative Proportion 0.99752 0.99819 0.99866 0.99906 0.99939 0.9997 0.99987
+## PC64 PC65 PC66 PC67
+## Standard deviation 0.07055 0.04430 0.03589 0.01241
+## Proportion of Variance 0.00007 0.00003 0.00002 0.00000
+## Cumulative Proportion 0.99995 0.99998 1.00000 1.00000
+plot(pca2, type = "lines")
+
+